1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 
38 #include "vid.h"
39 #include "amdgpu.h"
40 #include "amdgpu_display.h"
41 #include "amdgpu_ucode.h"
42 #include "atom.h"
43 #include "amdgpu_dm.h"
44 #ifdef CONFIG_DRM_AMD_DC_HDCP
45 #include "amdgpu_dm_hdcp.h"
46 #include <drm/drm_hdcp.h>
47 #endif
48 #include "amdgpu_pm.h"
49 
50 #include "amd_shared.h"
51 #include "amdgpu_dm_irq.h"
52 #include "dm_helpers.h"
53 #include "amdgpu_dm_mst_types.h"
54 #if defined(CONFIG_DEBUG_FS)
55 #include "amdgpu_dm_debugfs.h"
56 #endif
57 
58 #include "ivsrcid/ivsrcid_vislands30.h"
59 
60 #include <linux/module.h>
61 #include <linux/moduleparam.h>
62 #include <linux/version.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68 
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79 
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82 
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87 
88 #include "soc15_common.h"
89 #endif
90 
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94 
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #endif
103 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
105 
106 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
107 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
108 
109 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
111 
112 /* Number of bytes in PSP header for firmware. */
113 #define PSP_HEADER_BYTES 0x100
114 
115 /* Number of bytes in PSP footer for firmware. */
116 #define PSP_FOOTER_BYTES 0x100
117 
118 /**
119  * DOC: overview
120  *
121  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
122  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
123  * requests into DC requests, and DC responses into DRM responses.
124  *
125  * The root control structure is &struct amdgpu_display_manager.
126  */
127 
128 /* basic init/fini API */
129 static int amdgpu_dm_init(struct amdgpu_device *adev);
130 static void amdgpu_dm_fini(struct amdgpu_device *adev);
131 
get_subconnector_type(struct dc_link * link)132 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
133 {
134 	switch (link->dpcd_caps.dongle_type) {
135 	case DISPLAY_DONGLE_NONE:
136 		return DRM_MODE_SUBCONNECTOR_Native;
137 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
138 		return DRM_MODE_SUBCONNECTOR_VGA;
139 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
140 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
141 		return DRM_MODE_SUBCONNECTOR_DVID;
142 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
143 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
144 		return DRM_MODE_SUBCONNECTOR_HDMIA;
145 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
146 	default:
147 		return DRM_MODE_SUBCONNECTOR_Unknown;
148 	}
149 }
150 
update_subconnector_property(struct amdgpu_dm_connector * aconnector)151 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
152 {
153 	struct dc_link *link = aconnector->dc_link;
154 	struct drm_connector *connector = &aconnector->base;
155 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
156 
157 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
158 		return;
159 
160 	if (aconnector->dc_sink)
161 		subconnector = get_subconnector_type(link);
162 
163 	drm_object_property_set_value(&connector->base,
164 			connector->dev->mode_config.dp_subconnector_property,
165 			subconnector);
166 }
167 
168 /*
169  * initializes drm_device display related structures, based on the information
170  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
171  * drm_encoder, drm_mode_config
172  *
173  * Returns 0 on success
174  */
175 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
176 /* removes and deallocates the drm structures, created by the above function */
177 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
178 
179 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
180 				struct drm_plane *plane,
181 				unsigned long possible_crtcs,
182 				const struct dc_plane_cap *plane_cap);
183 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
184 			       struct drm_plane *plane,
185 			       uint32_t link_index);
186 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
187 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
188 				    uint32_t link_index,
189 				    struct amdgpu_encoder *amdgpu_encoder);
190 static int amdgpu_dm_encoder_init(struct drm_device *dev,
191 				  struct amdgpu_encoder *aencoder,
192 				  uint32_t link_index);
193 
194 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
195 
196 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
197 				   struct drm_atomic_state *state,
198 				   bool nonblock);
199 
200 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 
202 static int amdgpu_dm_atomic_check(struct drm_device *dev,
203 				  struct drm_atomic_state *state);
204 
205 static void handle_cursor_update(struct drm_plane *plane,
206 				 struct drm_plane_state *old_plane_state);
207 
208 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
209 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
212 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 
214 /*
215  * dm_vblank_get_counter
216  *
217  * @brief
218  * Get counter for number of vertical blanks
219  *
220  * @param
221  * struct amdgpu_device *adev - [in] desired amdgpu device
222  * int disp_idx - [in] which CRTC to get the counter from
223  *
224  * @return
225  * Counter for vertical blanks
226  */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)227 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
228 {
229 	if (crtc >= adev->mode_info.num_crtc)
230 		return 0;
231 	else {
232 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
233 
234 		if (acrtc->dm_irq_params.stream == NULL) {
235 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
236 				  crtc);
237 			return 0;
238 		}
239 
240 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
241 	}
242 }
243 
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)244 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
245 				  u32 *vbl, u32 *position)
246 {
247 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
248 
249 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
250 		return -EINVAL;
251 	else {
252 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
253 
254 		if (acrtc->dm_irq_params.stream ==  NULL) {
255 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
256 				  crtc);
257 			return 0;
258 		}
259 
260 		/*
261 		 * TODO rework base driver to use values directly.
262 		 * for now parse it back into reg-format
263 		 */
264 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
265 					 &v_blank_start,
266 					 &v_blank_end,
267 					 &h_position,
268 					 &v_position);
269 
270 		*position = v_position | (h_position << 16);
271 		*vbl = v_blank_start | (v_blank_end << 16);
272 	}
273 
274 	return 0;
275 }
276 
dm_is_idle(void * handle)277 static bool dm_is_idle(void *handle)
278 {
279 	/* XXX todo */
280 	return true;
281 }
282 
dm_wait_for_idle(void * handle)283 static int dm_wait_for_idle(void *handle)
284 {
285 	/* XXX todo */
286 	return 0;
287 }
288 
dm_check_soft_reset(void * handle)289 static bool dm_check_soft_reset(void *handle)
290 {
291 	return false;
292 }
293 
dm_soft_reset(void * handle)294 static int dm_soft_reset(void *handle)
295 {
296 	/* XXX todo */
297 	return 0;
298 }
299 
300 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)301 get_crtc_by_otg_inst(struct amdgpu_device *adev,
302 		     int otg_inst)
303 {
304 	struct drm_device *dev = adev_to_drm(adev);
305 	struct drm_crtc *crtc;
306 	struct amdgpu_crtc *amdgpu_crtc;
307 
308 	if (otg_inst == -1) {
309 		WARN_ON(1);
310 		return adev->mode_info.crtcs[0];
311 	}
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
314 		amdgpu_crtc = to_amdgpu_crtc(crtc);
315 
316 		if (amdgpu_crtc->otg_inst == otg_inst)
317 			return amdgpu_crtc;
318 	}
319 
320 	return NULL;
321 }
322 
amdgpu_dm_vrr_active_irq(struct amdgpu_crtc * acrtc)323 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
324 {
325 	return acrtc->dm_irq_params.freesync_config.state ==
326 		       VRR_STATE_ACTIVE_VARIABLE ||
327 	       acrtc->dm_irq_params.freesync_config.state ==
328 		       VRR_STATE_ACTIVE_FIXED;
329 }
330 
amdgpu_dm_vrr_active(struct dm_crtc_state * dm_state)331 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
332 {
333 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
334 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
335 }
336 
337 /**
338  * dm_pflip_high_irq() - Handle pageflip interrupt
339  * @interrupt_params: ignored
340  *
341  * Handles the pageflip interrupt by notifying all interested parties
342  * that the pageflip has been completed.
343  */
dm_pflip_high_irq(void * interrupt_params)344 static void dm_pflip_high_irq(void *interrupt_params)
345 {
346 	struct amdgpu_crtc *amdgpu_crtc;
347 	struct common_irq_params *irq_params = interrupt_params;
348 	struct amdgpu_device *adev = irq_params->adev;
349 	unsigned long flags;
350 	struct drm_pending_vblank_event *e;
351 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
352 	bool vrr_active;
353 
354 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
355 
356 	/* IRQ could occur when in initial stage */
357 	/* TODO work and BO cleanup */
358 	if (amdgpu_crtc == NULL) {
359 		DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
360 		return;
361 	}
362 
363 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
364 
365 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
366 		DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
367 						 amdgpu_crtc->pflip_status,
368 						 AMDGPU_FLIP_SUBMITTED,
369 						 amdgpu_crtc->crtc_id,
370 						 amdgpu_crtc);
371 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
372 		return;
373 	}
374 
375 	/* page flip completed. */
376 	e = amdgpu_crtc->event;
377 	amdgpu_crtc->event = NULL;
378 
379 	if (!e)
380 		WARN_ON(1);
381 
382 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
383 
384 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
385 	if (!vrr_active ||
386 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
387 				      &v_blank_end, &hpos, &vpos) ||
388 	    (vpos < v_blank_start)) {
389 		/* Update to correct count and vblank timestamp if racing with
390 		 * vblank irq. This also updates to the correct vblank timestamp
391 		 * even in VRR mode, as scanout is past the front-porch atm.
392 		 */
393 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
394 
395 		/* Wake up userspace by sending the pageflip event with proper
396 		 * count and timestamp of vblank of flip completion.
397 		 */
398 		if (e) {
399 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
400 
401 			/* Event sent, so done with vblank for this flip */
402 			drm_crtc_vblank_put(&amdgpu_crtc->base);
403 		}
404 	} else if (e) {
405 		/* VRR active and inside front-porch: vblank count and
406 		 * timestamp for pageflip event will only be up to date after
407 		 * drm_crtc_handle_vblank() has been executed from late vblank
408 		 * irq handler after start of back-porch (vline 0). We queue the
409 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
410 		 * updated timestamp and count, once it runs after us.
411 		 *
412 		 * We need to open-code this instead of using the helper
413 		 * drm_crtc_arm_vblank_event(), as that helper would
414 		 * call drm_crtc_accurate_vblank_count(), which we must
415 		 * not call in VRR mode while we are in front-porch!
416 		 */
417 
418 		/* sequence will be replaced by real count during send-out. */
419 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
420 		e->pipe = amdgpu_crtc->crtc_id;
421 
422 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
423 		e = NULL;
424 	}
425 
426 	/* Keep track of vblank of this flip for flip throttling. We use the
427 	 * cooked hw counter, as that one incremented at start of this vblank
428 	 * of pageflip completion, so last_flip_vblank is the forbidden count
429 	 * for queueing new pageflips if vsync + VRR is enabled.
430 	 */
431 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
432 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
433 
434 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
435 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436 
437 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
438 			 amdgpu_crtc->crtc_id, amdgpu_crtc,
439 			 vrr_active, (int) !e);
440 }
441 
dm_vupdate_high_irq(void * interrupt_params)442 static void dm_vupdate_high_irq(void *interrupt_params)
443 {
444 	struct common_irq_params *irq_params = interrupt_params;
445 	struct amdgpu_device *adev = irq_params->adev;
446 	struct amdgpu_crtc *acrtc;
447 	unsigned long flags;
448 	int vrr_active;
449 
450 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
451 
452 	if (acrtc) {
453 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
454 
455 		DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
456 			      acrtc->crtc_id,
457 			      vrr_active);
458 
459 		/* Core vblank handling is done here after end of front-porch in
460 		 * vrr mode, as vblank timestamping will give valid results
461 		 * while now done after front-porch. This will also deliver
462 		 * page-flip completion events that have been queued to us
463 		 * if a pageflip happened inside front-porch.
464 		 */
465 		if (vrr_active) {
466 			drm_crtc_handle_vblank(&acrtc->base);
467 
468 			/* BTR processing for pre-DCE12 ASICs */
469 			if (acrtc->dm_irq_params.stream &&
470 			    adev->family < AMDGPU_FAMILY_AI) {
471 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
472 				mod_freesync_handle_v_update(
473 				    adev->dm.freesync_module,
474 				    acrtc->dm_irq_params.stream,
475 				    &acrtc->dm_irq_params.vrr_params);
476 
477 				dc_stream_adjust_vmin_vmax(
478 				    adev->dm.dc,
479 				    acrtc->dm_irq_params.stream,
480 				    &acrtc->dm_irq_params.vrr_params.adjust);
481 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
482 			}
483 		}
484 	}
485 }
486 
487 /**
488  * dm_crtc_high_irq() - Handles CRTC interrupt
489  * @interrupt_params: used for determining the CRTC instance
490  *
491  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
492  * event handler.
493  */
dm_crtc_high_irq(void * interrupt_params)494 static void dm_crtc_high_irq(void *interrupt_params)
495 {
496 	struct common_irq_params *irq_params = interrupt_params;
497 	struct amdgpu_device *adev = irq_params->adev;
498 	struct amdgpu_crtc *acrtc;
499 	unsigned long flags;
500 	int vrr_active;
501 
502 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
503 	if (!acrtc)
504 		return;
505 
506 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
507 
508 	DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
509 		      vrr_active, acrtc->dm_irq_params.active_planes);
510 
511 	/**
512 	 * Core vblank handling at start of front-porch is only possible
513 	 * in non-vrr mode, as only there vblank timestamping will give
514 	 * valid results while done in front-porch. Otherwise defer it
515 	 * to dm_vupdate_high_irq after end of front-porch.
516 	 */
517 	if (!vrr_active)
518 		drm_crtc_handle_vblank(&acrtc->base);
519 
520 	/**
521 	 * Following stuff must happen at start of vblank, for crc
522 	 * computation and below-the-range btr support in vrr mode.
523 	 */
524 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
525 
526 	/* BTR updates need to happen before VUPDATE on Vega and above. */
527 	if (adev->family < AMDGPU_FAMILY_AI)
528 		return;
529 
530 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
531 
532 	if (acrtc->dm_irq_params.stream &&
533 	    acrtc->dm_irq_params.vrr_params.supported &&
534 	    acrtc->dm_irq_params.freesync_config.state ==
535 		    VRR_STATE_ACTIVE_VARIABLE) {
536 		mod_freesync_handle_v_update(adev->dm.freesync_module,
537 					     acrtc->dm_irq_params.stream,
538 					     &acrtc->dm_irq_params.vrr_params);
539 
540 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
541 					   &acrtc->dm_irq_params.vrr_params.adjust);
542 	}
543 
544 	/*
545 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
546 	 * In that case, pageflip completion interrupts won't fire and pageflip
547 	 * completion events won't get delivered. Prevent this by sending
548 	 * pending pageflip events from here if a flip is still pending.
549 	 *
550 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
551 	 * avoid race conditions between flip programming and completion,
552 	 * which could cause too early flip completion events.
553 	 */
554 	if (adev->family >= AMDGPU_FAMILY_RV &&
555 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
556 	    acrtc->dm_irq_params.active_planes == 0) {
557 		if (acrtc->event) {
558 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
559 			acrtc->event = NULL;
560 			drm_crtc_vblank_put(&acrtc->base);
561 		}
562 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
563 	}
564 
565 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
566 }
567 
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)568 static int dm_set_clockgating_state(void *handle,
569 		  enum amd_clockgating_state state)
570 {
571 	return 0;
572 }
573 
dm_set_powergating_state(void * handle,enum amd_powergating_state state)574 static int dm_set_powergating_state(void *handle,
575 		  enum amd_powergating_state state)
576 {
577 	return 0;
578 }
579 
580 /* Prototypes of private functions */
581 static int dm_early_init(void* handle);
582 
583 /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector * connector)584 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
585 {
586 	struct drm_device *dev = connector->dev;
587 	struct amdgpu_device *adev = drm_to_adev(dev);
588 	struct dm_compressor_info *compressor = &adev->dm.compressor;
589 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
590 	struct drm_display_mode *mode;
591 	unsigned long max_size = 0;
592 
593 	if (adev->dm.dc->fbc_compressor == NULL)
594 		return;
595 
596 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
597 		return;
598 
599 	if (compressor->bo_ptr)
600 		return;
601 
602 
603 	list_for_each_entry(mode, &connector->modes, head) {
604 		if (max_size < mode->htotal * mode->vtotal)
605 			max_size = mode->htotal * mode->vtotal;
606 	}
607 
608 	if (max_size) {
609 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
610 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
611 			    &compressor->gpu_addr, &compressor->cpu_addr);
612 
613 		if (r)
614 			DRM_ERROR("DM: Failed to initialize FBC\n");
615 		else {
616 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
617 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
618 		}
619 
620 	}
621 
622 }
623 
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)624 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
625 					  int pipe, bool *enabled,
626 					  unsigned char *buf, int max_bytes)
627 {
628 	struct drm_device *dev = dev_get_drvdata(kdev);
629 	struct amdgpu_device *adev = drm_to_adev(dev);
630 	struct drm_connector *connector;
631 	struct drm_connector_list_iter conn_iter;
632 	struct amdgpu_dm_connector *aconnector;
633 	int ret = 0;
634 
635 	*enabled = false;
636 
637 	mutex_lock(&adev->dm.audio_lock);
638 
639 	drm_connector_list_iter_begin(dev, &conn_iter);
640 	drm_for_each_connector_iter(connector, &conn_iter) {
641 		aconnector = to_amdgpu_dm_connector(connector);
642 		if (aconnector->audio_inst != port)
643 			continue;
644 
645 		*enabled = true;
646 		ret = drm_eld_size(connector->eld);
647 		memcpy(buf, connector->eld, min(max_bytes, ret));
648 
649 		break;
650 	}
651 	drm_connector_list_iter_end(&conn_iter);
652 
653 	mutex_unlock(&adev->dm.audio_lock);
654 
655 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
656 
657 	return ret;
658 }
659 
660 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
661 	.get_eld = amdgpu_dm_audio_component_get_eld,
662 };
663 
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)664 static int amdgpu_dm_audio_component_bind(struct device *kdev,
665 				       struct device *hda_kdev, void *data)
666 {
667 	struct drm_device *dev = dev_get_drvdata(kdev);
668 	struct amdgpu_device *adev = drm_to_adev(dev);
669 	struct drm_audio_component *acomp = data;
670 
671 	acomp->ops = &amdgpu_dm_audio_component_ops;
672 	acomp->dev = kdev;
673 	adev->dm.audio_component = acomp;
674 
675 	return 0;
676 }
677 
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)678 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
679 					  struct device *hda_kdev, void *data)
680 {
681 	struct drm_device *dev = dev_get_drvdata(kdev);
682 	struct amdgpu_device *adev = drm_to_adev(dev);
683 	struct drm_audio_component *acomp = data;
684 
685 	acomp->ops = NULL;
686 	acomp->dev = NULL;
687 	adev->dm.audio_component = NULL;
688 }
689 
690 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
691 	.bind	= amdgpu_dm_audio_component_bind,
692 	.unbind	= amdgpu_dm_audio_component_unbind,
693 };
694 
amdgpu_dm_audio_init(struct amdgpu_device * adev)695 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
696 {
697 	int i, ret;
698 
699 	if (!amdgpu_audio)
700 		return 0;
701 
702 	adev->mode_info.audio.enabled = true;
703 
704 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
705 
706 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
707 		adev->mode_info.audio.pin[i].channels = -1;
708 		adev->mode_info.audio.pin[i].rate = -1;
709 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
710 		adev->mode_info.audio.pin[i].status_bits = 0;
711 		adev->mode_info.audio.pin[i].category_code = 0;
712 		adev->mode_info.audio.pin[i].connected = false;
713 		adev->mode_info.audio.pin[i].id =
714 			adev->dm.dc->res_pool->audios[i]->inst;
715 		adev->mode_info.audio.pin[i].offset = 0;
716 	}
717 
718 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
719 	if (ret < 0)
720 		return ret;
721 
722 	adev->dm.audio_registered = true;
723 
724 	return 0;
725 }
726 
amdgpu_dm_audio_fini(struct amdgpu_device * adev)727 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
728 {
729 	if (!amdgpu_audio)
730 		return;
731 
732 	if (!adev->mode_info.audio.enabled)
733 		return;
734 
735 	if (adev->dm.audio_registered) {
736 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
737 		adev->dm.audio_registered = false;
738 	}
739 
740 	/* TODO: Disable audio? */
741 
742 	adev->mode_info.audio.enabled = false;
743 }
744 
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)745 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
746 {
747 	struct drm_audio_component *acomp = adev->dm.audio_component;
748 
749 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
750 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
751 
752 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
753 						 pin, -1);
754 	}
755 }
756 
dm_dmub_hw_init(struct amdgpu_device * adev)757 static int dm_dmub_hw_init(struct amdgpu_device *adev)
758 {
759 	const struct dmcub_firmware_header_v1_0 *hdr;
760 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
761 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
762 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
763 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
764 	struct abm *abm = adev->dm.dc->res_pool->abm;
765 	struct dmub_srv_hw_params hw_params;
766 	enum dmub_status status;
767 	const unsigned char *fw_inst_const, *fw_bss_data;
768 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
769 	bool has_hw_support;
770 
771 	if (!dmub_srv)
772 		/* DMUB isn't supported on the ASIC. */
773 		return 0;
774 
775 	if (!fb_info) {
776 		DRM_ERROR("No framebuffer info for DMUB service.\n");
777 		return -EINVAL;
778 	}
779 
780 	if (!dmub_fw) {
781 		/* Firmware required for DMUB support. */
782 		DRM_ERROR("No firmware provided for DMUB.\n");
783 		return -EINVAL;
784 	}
785 
786 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
787 	if (status != DMUB_STATUS_OK) {
788 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
789 		return -EINVAL;
790 	}
791 
792 	if (!has_hw_support) {
793 		DRM_INFO("DMUB unsupported on ASIC\n");
794 		return 0;
795 	}
796 
797 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
798 
799 	fw_inst_const = dmub_fw->data +
800 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
801 			PSP_HEADER_BYTES;
802 
803 	fw_bss_data = dmub_fw->data +
804 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 		      le32_to_cpu(hdr->inst_const_bytes);
806 
807 	/* Copy firmware and bios info into FB memory. */
808 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
809 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
810 
811 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
812 
813 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
814 	 * amdgpu_ucode_init_single_fw will load dmub firmware
815 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
816 	 * will be done by dm_dmub_hw_init
817 	 */
818 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
819 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
820 				fw_inst_const_size);
821 	}
822 
823 	if (fw_bss_data_size)
824 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
825 		       fw_bss_data, fw_bss_data_size);
826 
827 	/* Copy firmware bios info into FB memory. */
828 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
829 	       adev->bios_size);
830 
831 	/* Reset regions that need to be reset. */
832 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
833 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
834 
835 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
836 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
837 
838 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
839 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
840 
841 	/* Initialize hardware. */
842 	memset(&hw_params, 0, sizeof(hw_params));
843 	hw_params.fb_base = adev->gmc.fb_start;
844 	hw_params.fb_offset = adev->gmc.aper_base;
845 
846 	/* backdoor load firmware and trigger dmub running */
847 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
848 		hw_params.load_inst_const = true;
849 
850 	if (dmcu)
851 		hw_params.psp_version = dmcu->psp_version;
852 
853 	for (i = 0; i < fb_info->num_fb; ++i)
854 		hw_params.fb[i] = &fb_info->fb[i];
855 
856 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
857 	if (status != DMUB_STATUS_OK) {
858 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
859 		return -EINVAL;
860 	}
861 
862 	/* Wait for firmware load to finish. */
863 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
864 	if (status != DMUB_STATUS_OK)
865 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
866 
867 	/* Init DMCU and ABM if available. */
868 	if (dmcu && abm) {
869 		dmcu->funcs->dmcu_init(dmcu);
870 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
871 	}
872 
873 	adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
874 	if (!adev->dm.dc->ctx->dmub_srv) {
875 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
876 		return -ENOMEM;
877 	}
878 
879 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
880 		 adev->dm.dmcub_fw_version);
881 
882 	return 0;
883 }
884 
amdgpu_check_debugfs_connector_property_change(struct amdgpu_device * adev,struct drm_atomic_state * state)885 static void amdgpu_check_debugfs_connector_property_change(struct amdgpu_device *adev,
886 							   struct drm_atomic_state *state)
887 {
888 	struct drm_connector *connector;
889 	struct drm_crtc *crtc;
890 	struct amdgpu_dm_connector *amdgpu_dm_connector;
891 	struct drm_connector_state *conn_state;
892 	struct dm_crtc_state *acrtc_state;
893 	struct drm_crtc_state *crtc_state;
894 	struct dc_stream_state *stream;
895 	struct drm_device *dev = adev_to_drm(adev);
896 
897 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
898 
899 		amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
900 		conn_state = connector->state;
901 
902 		if (!(conn_state && conn_state->crtc))
903 			continue;
904 
905 		crtc = conn_state->crtc;
906 		acrtc_state = to_dm_crtc_state(crtc->state);
907 
908 		if (!(acrtc_state && acrtc_state->stream))
909 			continue;
910 
911 		stream = acrtc_state->stream;
912 
913 		if (amdgpu_dm_connector->dsc_settings.dsc_force_enable ||
914 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_v ||
915 		    amdgpu_dm_connector->dsc_settings.dsc_num_slices_h ||
916 		    amdgpu_dm_connector->dsc_settings.dsc_bits_per_pixel) {
917 			conn_state = drm_atomic_get_connector_state(state, connector);
918 			crtc_state = drm_atomic_get_crtc_state(state, crtc);
919 			crtc_state->mode_changed = true;
920 		}
921 	}
922 }
923 
amdgpu_dm_init(struct amdgpu_device * adev)924 static int amdgpu_dm_init(struct amdgpu_device *adev)
925 {
926 	struct dc_init_data init_data;
927 #ifdef CONFIG_DRM_AMD_DC_HDCP
928 	struct dc_callback_init init_params;
929 #endif
930 	int r;
931 
932 	adev->dm.ddev = adev_to_drm(adev);
933 	adev->dm.adev = adev;
934 
935 	/* Zero all the fields */
936 	memset(&init_data, 0, sizeof(init_data));
937 #ifdef CONFIG_DRM_AMD_DC_HDCP
938 	memset(&init_params, 0, sizeof(init_params));
939 #endif
940 
941 	mutex_init(&adev->dm.dc_lock);
942 	mutex_init(&adev->dm.audio_lock);
943 
944 	if(amdgpu_dm_irq_init(adev)) {
945 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
946 		goto error;
947 	}
948 
949 	init_data.asic_id.chip_family = adev->family;
950 
951 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
952 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
953 
954 	init_data.asic_id.vram_width = adev->gmc.vram_width;
955 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
956 	init_data.asic_id.atombios_base_address =
957 		adev->mode_info.atom_context->bios;
958 
959 	init_data.driver = adev;
960 
961 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
962 
963 	if (!adev->dm.cgs_device) {
964 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
965 		goto error;
966 	}
967 
968 	init_data.cgs_device = adev->dm.cgs_device;
969 
970 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
971 
972 	switch (adev->asic_type) {
973 	case CHIP_CARRIZO:
974 	case CHIP_STONEY:
975 	case CHIP_RAVEN:
976 	case CHIP_RENOIR:
977 		init_data.flags.gpu_vm_support = true;
978 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
979 			init_data.flags.disable_dmcu = true;
980 		break;
981 	default:
982 		break;
983 	}
984 
985 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
986 		init_data.flags.fbc_support = true;
987 
988 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
989 		init_data.flags.multi_mon_pp_mclk_switch = true;
990 
991 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
992 		init_data.flags.disable_fractional_pwm = true;
993 
994 	init_data.flags.power_down_display_on_boot = true;
995 
996 	init_data.soc_bounding_box = adev->dm.soc_bounding_box;
997 
998 	/* Display Core create. */
999 	adev->dm.dc = dc_create(&init_data);
1000 
1001 	if (adev->dm.dc) {
1002 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1003 	} else {
1004 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1005 		goto error;
1006 	}
1007 
1008 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1009 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1010 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1011 	}
1012 
1013 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1014 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1015 
1016 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1017 		adev->dm.dc->debug.disable_stutter = true;
1018 
1019 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1020 		adev->dm.dc->debug.disable_dsc = true;
1021 
1022 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1023 		adev->dm.dc->debug.disable_clock_gate = true;
1024 
1025 	r = dm_dmub_hw_init(adev);
1026 	if (r) {
1027 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1028 		goto error;
1029 	}
1030 
1031 	dc_hardware_init(adev->dm.dc);
1032 
1033 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1034 	if (!adev->dm.freesync_module) {
1035 		DRM_ERROR(
1036 		"amdgpu: failed to initialize freesync_module.\n");
1037 	} else
1038 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1039 				adev->dm.freesync_module);
1040 
1041 	amdgpu_dm_init_color_mod();
1042 
1043 #ifdef CONFIG_DRM_AMD_DC_HDCP
1044 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1045 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1046 
1047 		if (!adev->dm.hdcp_workqueue)
1048 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1049 		else
1050 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1051 
1052 		dc_init_callbacks(adev->dm.dc, &init_params);
1053 	}
1054 #endif
1055 	if (amdgpu_dm_initialize_drm_device(adev)) {
1056 		DRM_ERROR(
1057 		"amdgpu: failed to initialize sw for display support.\n");
1058 		goto error;
1059 	}
1060 
1061 	/* create fake encoders for MST */
1062 	dm_dp_create_fake_mst_encoders(adev);
1063 
1064 	/* TODO: Add_display_info? */
1065 
1066 	/* TODO use dynamic cursor width */
1067 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1068 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1069 
1070 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1071 		DRM_ERROR(
1072 		"amdgpu: failed to initialize sw for display support.\n");
1073 		goto error;
1074 	}
1075 
1076 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1077 
1078 	return 0;
1079 error:
1080 	amdgpu_dm_fini(adev);
1081 
1082 	return -EINVAL;
1083 }
1084 
amdgpu_dm_fini(struct amdgpu_device * adev)1085 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1086 {
1087 	int i;
1088 
1089 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1090 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1091 	}
1092 
1093 	amdgpu_dm_audio_fini(adev);
1094 
1095 	amdgpu_dm_destroy_drm_device(&adev->dm);
1096 
1097 #ifdef CONFIG_DRM_AMD_DC_HDCP
1098 	if (adev->dm.hdcp_workqueue) {
1099 		hdcp_destroy(adev->dm.hdcp_workqueue);
1100 		adev->dm.hdcp_workqueue = NULL;
1101 	}
1102 
1103 	if (adev->dm.dc)
1104 		dc_deinit_callbacks(adev->dm.dc);
1105 #endif
1106 	if (adev->dm.dc->ctx->dmub_srv) {
1107 		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1108 		adev->dm.dc->ctx->dmub_srv = NULL;
1109 	}
1110 
1111 	if (adev->dm.dmub_bo)
1112 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1113 				      &adev->dm.dmub_bo_gpu_addr,
1114 				      &adev->dm.dmub_bo_cpu_addr);
1115 
1116 	/* DC Destroy TODO: Replace destroy DAL */
1117 	if (adev->dm.dc)
1118 		dc_destroy(&adev->dm.dc);
1119 	/*
1120 	 * TODO: pageflip, vlank interrupt
1121 	 *
1122 	 * amdgpu_dm_irq_fini(adev);
1123 	 */
1124 
1125 	if (adev->dm.cgs_device) {
1126 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1127 		adev->dm.cgs_device = NULL;
1128 	}
1129 	if (adev->dm.freesync_module) {
1130 		mod_freesync_destroy(adev->dm.freesync_module);
1131 		adev->dm.freesync_module = NULL;
1132 	}
1133 
1134 	mutex_destroy(&adev->dm.audio_lock);
1135 	mutex_destroy(&adev->dm.dc_lock);
1136 
1137 	return;
1138 }
1139 
load_dmcu_fw(struct amdgpu_device * adev)1140 static int load_dmcu_fw(struct amdgpu_device *adev)
1141 {
1142 	const char *fw_name_dmcu = NULL;
1143 	int r;
1144 	const struct dmcu_firmware_header_v1_0 *hdr;
1145 
1146 	switch(adev->asic_type) {
1147 #if defined(CONFIG_DRM_AMD_DC_SI)
1148 	case CHIP_TAHITI:
1149 	case CHIP_PITCAIRN:
1150 	case CHIP_VERDE:
1151 	case CHIP_OLAND:
1152 #endif
1153 	case CHIP_BONAIRE:
1154 	case CHIP_HAWAII:
1155 	case CHIP_KAVERI:
1156 	case CHIP_KABINI:
1157 	case CHIP_MULLINS:
1158 	case CHIP_TONGA:
1159 	case CHIP_FIJI:
1160 	case CHIP_CARRIZO:
1161 	case CHIP_STONEY:
1162 	case CHIP_POLARIS11:
1163 	case CHIP_POLARIS10:
1164 	case CHIP_POLARIS12:
1165 	case CHIP_VEGAM:
1166 	case CHIP_VEGA10:
1167 	case CHIP_VEGA12:
1168 	case CHIP_VEGA20:
1169 	case CHIP_NAVI10:
1170 	case CHIP_NAVI14:
1171 	case CHIP_RENOIR:
1172 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1173 	case CHIP_SIENNA_CICHLID:
1174 	case CHIP_NAVY_FLOUNDER:
1175 #endif
1176 		return 0;
1177 	case CHIP_NAVI12:
1178 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1179 		break;
1180 	case CHIP_RAVEN:
1181 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1182 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1183 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1184 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1185 		else
1186 			return 0;
1187 		break;
1188 	default:
1189 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1190 		return -EINVAL;
1191 	}
1192 
1193 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1194 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1195 		return 0;
1196 	}
1197 
1198 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1199 	if (r == -ENOENT) {
1200 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1201 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1202 		adev->dm.fw_dmcu = NULL;
1203 		return 0;
1204 	}
1205 	if (r) {
1206 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1207 			fw_name_dmcu);
1208 		return r;
1209 	}
1210 
1211 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1212 	if (r) {
1213 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1214 			fw_name_dmcu);
1215 		release_firmware(adev->dm.fw_dmcu);
1216 		adev->dm.fw_dmcu = NULL;
1217 		return r;
1218 	}
1219 
1220 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1221 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1222 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1223 	adev->firmware.fw_size +=
1224 		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1225 
1226 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1227 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1228 	adev->firmware.fw_size +=
1229 		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1230 
1231 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1232 
1233 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1234 
1235 	return 0;
1236 }
1237 
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)1238 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1239 {
1240 	struct amdgpu_device *adev = ctx;
1241 
1242 	return dm_read_reg(adev->dm.dc->ctx, address);
1243 }
1244 
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)1245 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1246 				     uint32_t value)
1247 {
1248 	struct amdgpu_device *adev = ctx;
1249 
1250 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1251 }
1252 
dm_dmub_sw_init(struct amdgpu_device * adev)1253 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1254 {
1255 	struct dmub_srv_create_params create_params;
1256 	struct dmub_srv_region_params region_params;
1257 	struct dmub_srv_region_info region_info;
1258 	struct dmub_srv_fb_params fb_params;
1259 	struct dmub_srv_fb_info *fb_info;
1260 	struct dmub_srv *dmub_srv;
1261 	const struct dmcub_firmware_header_v1_0 *hdr;
1262 	const char *fw_name_dmub;
1263 	enum dmub_asic dmub_asic;
1264 	enum dmub_status status;
1265 	int r;
1266 
1267 	switch (adev->asic_type) {
1268 	case CHIP_RENOIR:
1269 		dmub_asic = DMUB_ASIC_DCN21;
1270 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1271 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1272 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1273 		break;
1274 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
1275 	case CHIP_SIENNA_CICHLID:
1276 		dmub_asic = DMUB_ASIC_DCN30;
1277 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1278 		break;
1279 	case CHIP_NAVY_FLOUNDER:
1280 		dmub_asic = DMUB_ASIC_DCN30;
1281 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1282 		break;
1283 #endif
1284 
1285 	default:
1286 		/* ASIC doesn't support DMUB. */
1287 		return 0;
1288 	}
1289 
1290 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1291 	if (r) {
1292 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1293 		return 0;
1294 	}
1295 
1296 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1297 	if (r) {
1298 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1299 		return 0;
1300 	}
1301 
1302 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1303 
1304 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1305 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1306 			AMDGPU_UCODE_ID_DMCUB;
1307 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1308 			adev->dm.dmub_fw;
1309 		adev->firmware.fw_size +=
1310 			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1311 
1312 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1313 			 adev->dm.dmcub_fw_version);
1314 	}
1315 
1316 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1317 
1318 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1319 	dmub_srv = adev->dm.dmub_srv;
1320 
1321 	if (!dmub_srv) {
1322 		DRM_ERROR("Failed to allocate DMUB service!\n");
1323 		return -ENOMEM;
1324 	}
1325 
1326 	memset(&create_params, 0, sizeof(create_params));
1327 	create_params.user_ctx = adev;
1328 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1329 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1330 	create_params.asic = dmub_asic;
1331 
1332 	/* Create the DMUB service. */
1333 	status = dmub_srv_create(dmub_srv, &create_params);
1334 	if (status != DMUB_STATUS_OK) {
1335 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1336 		return -EINVAL;
1337 	}
1338 
1339 	/* Calculate the size of all the regions for the DMUB service. */
1340 	memset(&region_params, 0, sizeof(region_params));
1341 
1342 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1343 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1344 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1345 	region_params.vbios_size = adev->bios_size;
1346 	region_params.fw_bss_data = region_params.bss_data_size ?
1347 		adev->dm.dmub_fw->data +
1348 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1349 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1350 	region_params.fw_inst_const =
1351 		adev->dm.dmub_fw->data +
1352 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1353 		PSP_HEADER_BYTES;
1354 
1355 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1356 					   &region_info);
1357 
1358 	if (status != DMUB_STATUS_OK) {
1359 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1360 		return -EINVAL;
1361 	}
1362 
1363 	/*
1364 	 * Allocate a framebuffer based on the total size of all the regions.
1365 	 * TODO: Move this into GART.
1366 	 */
1367 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1368 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1369 				    &adev->dm.dmub_bo_gpu_addr,
1370 				    &adev->dm.dmub_bo_cpu_addr);
1371 	if (r)
1372 		return r;
1373 
1374 	/* Rebase the regions on the framebuffer address. */
1375 	memset(&fb_params, 0, sizeof(fb_params));
1376 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1377 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1378 	fb_params.region_info = &region_info;
1379 
1380 	adev->dm.dmub_fb_info =
1381 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1382 	fb_info = adev->dm.dmub_fb_info;
1383 
1384 	if (!fb_info) {
1385 		DRM_ERROR(
1386 			"Failed to allocate framebuffer info for DMUB service!\n");
1387 		return -ENOMEM;
1388 	}
1389 
1390 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1391 	if (status != DMUB_STATUS_OK) {
1392 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1393 		return -EINVAL;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
dm_sw_init(void * handle)1399 static int dm_sw_init(void *handle)
1400 {
1401 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1402 	int r;
1403 
1404 	r = dm_dmub_sw_init(adev);
1405 	if (r)
1406 		return r;
1407 
1408 	return load_dmcu_fw(adev);
1409 }
1410 
dm_sw_fini(void * handle)1411 static int dm_sw_fini(void *handle)
1412 {
1413 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1414 
1415 	kfree(adev->dm.dmub_fb_info);
1416 	adev->dm.dmub_fb_info = NULL;
1417 
1418 	if (adev->dm.dmub_srv) {
1419 		dmub_srv_destroy(adev->dm.dmub_srv);
1420 		adev->dm.dmub_srv = NULL;
1421 	}
1422 
1423 	release_firmware(adev->dm.dmub_fw);
1424 	adev->dm.dmub_fw = NULL;
1425 
1426 	release_firmware(adev->dm.fw_dmcu);
1427 	adev->dm.fw_dmcu = NULL;
1428 
1429 	return 0;
1430 }
1431 
detect_mst_link_for_all_connectors(struct drm_device * dev)1432 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1433 {
1434 	struct amdgpu_dm_connector *aconnector;
1435 	struct drm_connector *connector;
1436 	struct drm_connector_list_iter iter;
1437 	int ret = 0;
1438 
1439 	drm_connector_list_iter_begin(dev, &iter);
1440 	drm_for_each_connector_iter(connector, &iter) {
1441 		aconnector = to_amdgpu_dm_connector(connector);
1442 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
1443 		    aconnector->mst_mgr.aux) {
1444 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1445 					 aconnector,
1446 					 aconnector->base.base.id);
1447 
1448 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1449 			if (ret < 0) {
1450 				DRM_ERROR("DM_MST: Failed to start MST\n");
1451 				aconnector->dc_link->type =
1452 					dc_connection_single;
1453 				break;
1454 			}
1455 		}
1456 	}
1457 	drm_connector_list_iter_end(&iter);
1458 
1459 	return ret;
1460 }
1461 
dm_late_init(void * handle)1462 static int dm_late_init(void *handle)
1463 {
1464 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1465 
1466 	struct dmcu_iram_parameters params;
1467 	unsigned int linear_lut[16];
1468 	int i;
1469 	struct dmcu *dmcu = NULL;
1470 	bool ret = true;
1471 
1472 	dmcu = adev->dm.dc->res_pool->dmcu;
1473 
1474 	for (i = 0; i < 16; i++)
1475 		linear_lut[i] = 0xFFFF * i / 15;
1476 
1477 	params.set = 0;
1478 	params.backlight_ramping_start = 0xCCCC;
1479 	params.backlight_ramping_reduction = 0xCCCCCCCC;
1480 	params.backlight_lut_array_size = 16;
1481 	params.backlight_lut_array = linear_lut;
1482 
1483 	/* Min backlight level after ABM reduction,  Don't allow below 1%
1484 	 * 0xFFFF x 0.01 = 0x28F
1485 	 */
1486 	params.min_abm_backlight = 0x28F;
1487 
1488 	/* In the case where abm is implemented on dmcub,
1489 	 * dmcu object will be null.
1490 	 * ABM 2.4 and up are implemented on dmcub.
1491 	 */
1492 	if (dmcu)
1493 		ret = dmcu_load_iram(dmcu, params);
1494 	else if (adev->dm.dc->ctx->dmub_srv)
1495 		ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1496 
1497 	if (!ret)
1498 		return -EINVAL;
1499 
1500 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1501 }
1502 
s3_handle_mst(struct drm_device * dev,bool suspend)1503 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1504 {
1505 	struct amdgpu_dm_connector *aconnector;
1506 	struct drm_connector *connector;
1507 	struct drm_connector_list_iter iter;
1508 	struct drm_dp_mst_topology_mgr *mgr;
1509 	int ret;
1510 	bool need_hotplug = false;
1511 
1512 	drm_connector_list_iter_begin(dev, &iter);
1513 	drm_for_each_connector_iter(connector, &iter) {
1514 		aconnector = to_amdgpu_dm_connector(connector);
1515 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
1516 		    aconnector->mst_port)
1517 			continue;
1518 
1519 		mgr = &aconnector->mst_mgr;
1520 
1521 		if (suspend) {
1522 			drm_dp_mst_topology_mgr_suspend(mgr);
1523 		} else {
1524 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1525 			if (ret < 0) {
1526 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
1527 				need_hotplug = true;
1528 			}
1529 		}
1530 	}
1531 	drm_connector_list_iter_end(&iter);
1532 
1533 	if (need_hotplug)
1534 		drm_kms_helper_hotplug_event(dev);
1535 }
1536 
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)1537 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1538 {
1539 	struct smu_context *smu = &adev->smu;
1540 	int ret = 0;
1541 
1542 	if (!is_support_sw_smu(adev))
1543 		return 0;
1544 
1545 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1546 	 * on window driver dc implementation.
1547 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1548 	 * should be passed to smu during boot up and resume from s3.
1549 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
1550 	 * dcn20_resource_construct
1551 	 * then call pplib functions below to pass the settings to smu:
1552 	 * smu_set_watermarks_for_clock_ranges
1553 	 * smu_set_watermarks_table
1554 	 * navi10_set_watermarks_table
1555 	 * smu_write_watermarks_table
1556 	 *
1557 	 * For Renoir, clock settings of dcn watermark are also fixed values.
1558 	 * dc has implemented different flow for window driver:
1559 	 * dc_hardware_init / dc_set_power_state
1560 	 * dcn10_init_hw
1561 	 * notify_wm_ranges
1562 	 * set_wm_ranges
1563 	 * -- Linux
1564 	 * smu_set_watermarks_for_clock_ranges
1565 	 * renoir_set_watermarks_table
1566 	 * smu_write_watermarks_table
1567 	 *
1568 	 * For Linux,
1569 	 * dc_hardware_init -> amdgpu_dm_init
1570 	 * dc_set_power_state --> dm_resume
1571 	 *
1572 	 * therefore, this function apply to navi10/12/14 but not Renoir
1573 	 * *
1574 	 */
1575 	switch(adev->asic_type) {
1576 	case CHIP_NAVI10:
1577 	case CHIP_NAVI14:
1578 	case CHIP_NAVI12:
1579 		break;
1580 	default:
1581 		return 0;
1582 	}
1583 
1584 	ret = smu_write_watermarks_table(smu);
1585 	if (ret) {
1586 		DRM_ERROR("Failed to update WMTABLE!\n");
1587 		return ret;
1588 	}
1589 
1590 	return 0;
1591 }
1592 
1593 /**
1594  * dm_hw_init() - Initialize DC device
1595  * @handle: The base driver device containing the amdgpu_dm device.
1596  *
1597  * Initialize the &struct amdgpu_display_manager device. This involves calling
1598  * the initializers of each DM component, then populating the struct with them.
1599  *
1600  * Although the function implies hardware initialization, both hardware and
1601  * software are initialized here. Splitting them out to their relevant init
1602  * hooks is a future TODO item.
1603  *
1604  * Some notable things that are initialized here:
1605  *
1606  * - Display Core, both software and hardware
1607  * - DC modules that we need (freesync and color management)
1608  * - DRM software states
1609  * - Interrupt sources and handlers
1610  * - Vblank support
1611  * - Debug FS entries, if enabled
1612  */
dm_hw_init(void * handle)1613 static int dm_hw_init(void *handle)
1614 {
1615 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1616 	/* Create DAL display manager */
1617 	amdgpu_dm_init(adev);
1618 	amdgpu_dm_hpd_init(adev);
1619 
1620 	return 0;
1621 }
1622 
1623 /**
1624  * dm_hw_fini() - Teardown DC device
1625  * @handle: The base driver device containing the amdgpu_dm device.
1626  *
1627  * Teardown components within &struct amdgpu_display_manager that require
1628  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1629  * were loaded. Also flush IRQ workqueues and disable them.
1630  */
dm_hw_fini(void * handle)1631 static int dm_hw_fini(void *handle)
1632 {
1633 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1634 
1635 	amdgpu_dm_hpd_fini(adev);
1636 
1637 	amdgpu_dm_irq_fini(adev);
1638 	amdgpu_dm_fini(adev);
1639 	return 0;
1640 }
1641 
1642 
1643 static int dm_enable_vblank(struct drm_crtc *crtc);
1644 static void dm_disable_vblank(struct drm_crtc *crtc);
1645 
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)1646 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1647 				 struct dc_state *state, bool enable)
1648 {
1649 	enum dc_irq_source irq_source;
1650 	struct amdgpu_crtc *acrtc;
1651 	int rc = -EBUSY;
1652 	int i = 0;
1653 
1654 	for (i = 0; i < state->stream_count; i++) {
1655 		acrtc = get_crtc_by_otg_inst(
1656 				adev, state->stream_status[i].primary_otg_inst);
1657 
1658 		if (acrtc && state->stream_status[i].plane_count != 0) {
1659 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1660 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1661 			DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1662 				  acrtc->crtc_id, enable ? "en" : "dis", rc);
1663 			if (rc)
1664 				DRM_WARN("Failed to %s pflip interrupts\n",
1665 					 enable ? "enable" : "disable");
1666 
1667 			if (enable) {
1668 				rc = dm_enable_vblank(&acrtc->base);
1669 				if (rc)
1670 					DRM_WARN("Failed to enable vblank interrupts\n");
1671 			} else {
1672 				dm_disable_vblank(&acrtc->base);
1673 			}
1674 
1675 		}
1676 	}
1677 
1678 }
1679 
amdgpu_dm_commit_zero_streams(struct dc * dc)1680 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1681 {
1682 	struct dc_state *context = NULL;
1683 	enum dc_status res = DC_ERROR_UNEXPECTED;
1684 	int i;
1685 	struct dc_stream_state *del_streams[MAX_PIPES];
1686 	int del_streams_count = 0;
1687 
1688 	memset(del_streams, 0, sizeof(del_streams));
1689 
1690 	context = dc_create_state(dc);
1691 	if (context == NULL)
1692 		goto context_alloc_fail;
1693 
1694 	dc_resource_state_copy_construct_current(dc, context);
1695 
1696 	/* First remove from context all streams */
1697 	for (i = 0; i < context->stream_count; i++) {
1698 		struct dc_stream_state *stream = context->streams[i];
1699 
1700 		del_streams[del_streams_count++] = stream;
1701 	}
1702 
1703 	/* Remove all planes for removed streams and then remove the streams */
1704 	for (i = 0; i < del_streams_count; i++) {
1705 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1706 			res = DC_FAIL_DETACH_SURFACES;
1707 			goto fail;
1708 		}
1709 
1710 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1711 		if (res != DC_OK)
1712 			goto fail;
1713 	}
1714 
1715 
1716 	res = dc_validate_global_state(dc, context, false);
1717 
1718 	if (res != DC_OK) {
1719 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1720 		goto fail;
1721 	}
1722 
1723 	res = dc_commit_state(dc, context);
1724 
1725 fail:
1726 	dc_release_state(context);
1727 
1728 context_alloc_fail:
1729 	return res;
1730 }
1731 
dm_suspend(void * handle)1732 static int dm_suspend(void *handle)
1733 {
1734 	struct amdgpu_device *adev = handle;
1735 	struct amdgpu_display_manager *dm = &adev->dm;
1736 	int ret = 0;
1737 
1738 	if (amdgpu_in_reset(adev)) {
1739 		mutex_lock(&dm->dc_lock);
1740 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1741 
1742 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1743 
1744 		amdgpu_dm_commit_zero_streams(dm->dc);
1745 
1746 		amdgpu_dm_irq_suspend(adev);
1747 
1748 		return ret;
1749 	}
1750 
1751 	WARN_ON(adev->dm.cached_state);
1752 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1753 
1754 	s3_handle_mst(adev_to_drm(adev), true);
1755 
1756 	amdgpu_dm_irq_suspend(adev);
1757 
1758 
1759 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1760 
1761 	return 0;
1762 }
1763 
1764 static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)1765 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1766 					     struct drm_crtc *crtc)
1767 {
1768 	uint32_t i;
1769 	struct drm_connector_state *new_con_state;
1770 	struct drm_connector *connector;
1771 	struct drm_crtc *crtc_from_state;
1772 
1773 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
1774 		crtc_from_state = new_con_state->crtc;
1775 
1776 		if (crtc_from_state == crtc)
1777 			return to_amdgpu_dm_connector(connector);
1778 	}
1779 
1780 	return NULL;
1781 }
1782 
emulated_link_detect(struct dc_link * link)1783 static void emulated_link_detect(struct dc_link *link)
1784 {
1785 	struct dc_sink_init_data sink_init_data = { 0 };
1786 	struct display_sink_capability sink_caps = { 0 };
1787 	enum dc_edid_status edid_status;
1788 	struct dc_context *dc_ctx = link->ctx;
1789 	struct dc_sink *sink = NULL;
1790 	struct dc_sink *prev_sink = NULL;
1791 
1792 	link->type = dc_connection_none;
1793 	prev_sink = link->local_sink;
1794 
1795 	if (prev_sink != NULL)
1796 		dc_sink_retain(prev_sink);
1797 
1798 	switch (link->connector_signal) {
1799 	case SIGNAL_TYPE_HDMI_TYPE_A: {
1800 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1801 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1802 		break;
1803 	}
1804 
1805 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1806 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1807 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1808 		break;
1809 	}
1810 
1811 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
1812 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1813 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1814 		break;
1815 	}
1816 
1817 	case SIGNAL_TYPE_LVDS: {
1818 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1819 		sink_caps.signal = SIGNAL_TYPE_LVDS;
1820 		break;
1821 	}
1822 
1823 	case SIGNAL_TYPE_EDP: {
1824 		sink_caps.transaction_type =
1825 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1826 		sink_caps.signal = SIGNAL_TYPE_EDP;
1827 		break;
1828 	}
1829 
1830 	case SIGNAL_TYPE_DISPLAY_PORT: {
1831 		sink_caps.transaction_type =
1832 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1833 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1834 		break;
1835 	}
1836 
1837 	default:
1838 		DC_ERROR("Invalid connector type! signal:%d\n",
1839 			link->connector_signal);
1840 		return;
1841 	}
1842 
1843 	sink_init_data.link = link;
1844 	sink_init_data.sink_signal = sink_caps.signal;
1845 
1846 	sink = dc_sink_create(&sink_init_data);
1847 	if (!sink) {
1848 		DC_ERROR("Failed to create sink!\n");
1849 		return;
1850 	}
1851 
1852 	/* dc_sink_create returns a new reference */
1853 	link->local_sink = sink;
1854 
1855 	edid_status = dm_helpers_read_local_edid(
1856 			link->ctx,
1857 			link,
1858 			sink);
1859 
1860 	if (edid_status != EDID_OK)
1861 		DC_ERROR("Failed to read EDID");
1862 
1863 }
1864 
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)1865 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1866 				     struct amdgpu_display_manager *dm)
1867 {
1868 	struct {
1869 		struct dc_surface_update surface_updates[MAX_SURFACES];
1870 		struct dc_plane_info plane_infos[MAX_SURFACES];
1871 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
1872 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1873 		struct dc_stream_update stream_update;
1874 	} * bundle;
1875 	int k, m;
1876 
1877 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1878 
1879 	if (!bundle) {
1880 		dm_error("Failed to allocate update bundle\n");
1881 		goto cleanup;
1882 	}
1883 
1884 	for (k = 0; k < dc_state->stream_count; k++) {
1885 		bundle->stream_update.stream = dc_state->streams[k];
1886 
1887 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1888 			bundle->surface_updates[m].surface =
1889 				dc_state->stream_status->plane_states[m];
1890 			bundle->surface_updates[m].surface->force_full_update =
1891 				true;
1892 		}
1893 		dc_commit_updates_for_stream(
1894 			dm->dc, bundle->surface_updates,
1895 			dc_state->stream_status->plane_count,
1896 			dc_state->streams[k], &bundle->stream_update, dc_state);
1897 	}
1898 
1899 cleanup:
1900 	kfree(bundle);
1901 
1902 	return;
1903 }
1904 
dm_resume(void * handle)1905 static int dm_resume(void *handle)
1906 {
1907 	struct amdgpu_device *adev = handle;
1908 	struct drm_device *ddev = adev_to_drm(adev);
1909 	struct amdgpu_display_manager *dm = &adev->dm;
1910 	struct amdgpu_dm_connector *aconnector;
1911 	struct drm_connector *connector;
1912 	struct drm_connector_list_iter iter;
1913 	struct drm_crtc *crtc;
1914 	struct drm_crtc_state *new_crtc_state;
1915 	struct dm_crtc_state *dm_new_crtc_state;
1916 	struct drm_plane *plane;
1917 	struct drm_plane_state *new_plane_state;
1918 	struct dm_plane_state *dm_new_plane_state;
1919 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1920 	enum dc_connection_type new_connection_type = dc_connection_none;
1921 	struct dc_state *dc_state;
1922 	int i, r, j;
1923 
1924 	if (amdgpu_in_reset(adev)) {
1925 		dc_state = dm->cached_dc_state;
1926 
1927 		r = dm_dmub_hw_init(adev);
1928 		if (r)
1929 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1930 
1931 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1932 		dc_resume(dm->dc);
1933 
1934 		amdgpu_dm_irq_resume_early(adev);
1935 
1936 		for (i = 0; i < dc_state->stream_count; i++) {
1937 			dc_state->streams[i]->mode_changed = true;
1938 			for (j = 0; j < dc_state->stream_status->plane_count; j++) {
1939 				dc_state->stream_status->plane_states[j]->update_flags.raw
1940 					= 0xffffffff;
1941 			}
1942 		}
1943 
1944 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
1945 
1946 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
1947 
1948 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
1949 
1950 		dc_release_state(dm->cached_dc_state);
1951 		dm->cached_dc_state = NULL;
1952 
1953 		amdgpu_dm_irq_resume_late(adev);
1954 
1955 		mutex_unlock(&dm->dc_lock);
1956 
1957 		return 0;
1958 	}
1959 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
1960 	dc_release_state(dm_state->context);
1961 	dm_state->context = dc_create_state(dm->dc);
1962 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
1963 	dc_resource_state_construct(dm->dc, dm_state->context);
1964 
1965 	/* Before powering on DC we need to re-initialize DMUB. */
1966 	r = dm_dmub_hw_init(adev);
1967 	if (r)
1968 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1969 
1970 	/* power on hardware */
1971 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
1972 
1973 	/* program HPD filter */
1974 	dc_resume(dm->dc);
1975 
1976 	/*
1977 	 * early enable HPD Rx IRQ, should be done before set mode as short
1978 	 * pulse interrupts are used for MST
1979 	 */
1980 	amdgpu_dm_irq_resume_early(adev);
1981 
1982 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
1983 	s3_handle_mst(ddev, false);
1984 
1985 	/* Do detection*/
1986 	drm_connector_list_iter_begin(ddev, &iter);
1987 	drm_for_each_connector_iter(connector, &iter) {
1988 		aconnector = to_amdgpu_dm_connector(connector);
1989 
1990 		/*
1991 		 * this is the case when traversing through already created
1992 		 * MST connectors, should be skipped
1993 		 */
1994 		if (aconnector->mst_port)
1995 			continue;
1996 
1997 		mutex_lock(&aconnector->hpd_lock);
1998 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1999 			DRM_ERROR("KMS: Failed to detect connector\n");
2000 
2001 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2002 			emulated_link_detect(aconnector->dc_link);
2003 		else
2004 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2005 
2006 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2007 			aconnector->fake_enable = false;
2008 
2009 		if (aconnector->dc_sink)
2010 			dc_sink_release(aconnector->dc_sink);
2011 		aconnector->dc_sink = NULL;
2012 		amdgpu_dm_update_connector_after_detect(aconnector);
2013 		mutex_unlock(&aconnector->hpd_lock);
2014 	}
2015 	drm_connector_list_iter_end(&iter);
2016 
2017 	/* Force mode set in atomic commit */
2018 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2019 		new_crtc_state->active_changed = true;
2020 
2021 	/*
2022 	 * atomic_check is expected to create the dc states. We need to release
2023 	 * them here, since they were duplicated as part of the suspend
2024 	 * procedure.
2025 	 */
2026 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2027 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2028 		if (dm_new_crtc_state->stream) {
2029 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2030 			dc_stream_release(dm_new_crtc_state->stream);
2031 			dm_new_crtc_state->stream = NULL;
2032 		}
2033 	}
2034 
2035 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2036 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2037 		if (dm_new_plane_state->dc_state) {
2038 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2039 			dc_plane_state_release(dm_new_plane_state->dc_state);
2040 			dm_new_plane_state->dc_state = NULL;
2041 		}
2042 	}
2043 
2044 	drm_atomic_helper_resume(ddev, dm->cached_state);
2045 
2046 	dm->cached_state = NULL;
2047 
2048 	amdgpu_dm_irq_resume_late(adev);
2049 
2050 	amdgpu_dm_smu_write_watermarks_table(adev);
2051 
2052 	return 0;
2053 }
2054 
2055 /**
2056  * DOC: DM Lifecycle
2057  *
2058  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2059  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2060  * the base driver's device list to be initialized and torn down accordingly.
2061  *
2062  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2063  */
2064 
2065 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2066 	.name = "dm",
2067 	.early_init = dm_early_init,
2068 	.late_init = dm_late_init,
2069 	.sw_init = dm_sw_init,
2070 	.sw_fini = dm_sw_fini,
2071 	.hw_init = dm_hw_init,
2072 	.hw_fini = dm_hw_fini,
2073 	.suspend = dm_suspend,
2074 	.resume = dm_resume,
2075 	.is_idle = dm_is_idle,
2076 	.wait_for_idle = dm_wait_for_idle,
2077 	.check_soft_reset = dm_check_soft_reset,
2078 	.soft_reset = dm_soft_reset,
2079 	.set_clockgating_state = dm_set_clockgating_state,
2080 	.set_powergating_state = dm_set_powergating_state,
2081 };
2082 
2083 const struct amdgpu_ip_block_version dm_ip_block =
2084 {
2085 	.type = AMD_IP_BLOCK_TYPE_DCE,
2086 	.major = 1,
2087 	.minor = 0,
2088 	.rev = 0,
2089 	.funcs = &amdgpu_dm_funcs,
2090 };
2091 
2092 
2093 /**
2094  * DOC: atomic
2095  *
2096  * *WIP*
2097  */
2098 
2099 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2100 	.fb_create = amdgpu_display_user_framebuffer_create,
2101 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2102 	.atomic_check = amdgpu_dm_atomic_check,
2103 	.atomic_commit = amdgpu_dm_atomic_commit,
2104 };
2105 
2106 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2107 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2108 };
2109 
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)2110 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2111 {
2112 	u32 max_cll, min_cll, max, min, q, r;
2113 	struct amdgpu_dm_backlight_caps *caps;
2114 	struct amdgpu_display_manager *dm;
2115 	struct drm_connector *conn_base;
2116 	struct amdgpu_device *adev;
2117 	struct dc_link *link = NULL;
2118 	static const u8 pre_computed_values[] = {
2119 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2120 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2121 
2122 	if (!aconnector || !aconnector->dc_link)
2123 		return;
2124 
2125 	link = aconnector->dc_link;
2126 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2127 		return;
2128 
2129 	conn_base = &aconnector->base;
2130 	adev = drm_to_adev(conn_base->dev);
2131 	dm = &adev->dm;
2132 	caps = &dm->backlight_caps;
2133 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2134 	caps->aux_support = false;
2135 	max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2136 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2137 
2138 	if (caps->ext_caps->bits.oled == 1 ||
2139 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2140 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2141 		caps->aux_support = true;
2142 
2143 	/* From the specification (CTA-861-G), for calculating the maximum
2144 	 * luminance we need to use:
2145 	 *	Luminance = 50*2**(CV/32)
2146 	 * Where CV is a one-byte value.
2147 	 * For calculating this expression we may need float point precision;
2148 	 * to avoid this complexity level, we take advantage that CV is divided
2149 	 * by a constant. From the Euclids division algorithm, we know that CV
2150 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2151 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2152 	 * need to pre-compute the value of r/32. For pre-computing the values
2153 	 * We just used the following Ruby line:
2154 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2155 	 * The results of the above expressions can be verified at
2156 	 * pre_computed_values.
2157 	 */
2158 	q = max_cll >> 5;
2159 	r = max_cll % 32;
2160 	max = (1 << q) * pre_computed_values[r];
2161 
2162 	// min luminance: maxLum * (CV/255)^2 / 100
2163 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2164 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2165 
2166 	caps->aux_max_input_signal = max;
2167 	caps->aux_min_input_signal = min;
2168 }
2169 
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)2170 void amdgpu_dm_update_connector_after_detect(
2171 		struct amdgpu_dm_connector *aconnector)
2172 {
2173 	struct drm_connector *connector = &aconnector->base;
2174 	struct drm_device *dev = connector->dev;
2175 	struct dc_sink *sink;
2176 
2177 	/* MST handled by drm_mst framework */
2178 	if (aconnector->mst_mgr.mst_state == true)
2179 		return;
2180 
2181 	sink = aconnector->dc_link->local_sink;
2182 	if (sink)
2183 		dc_sink_retain(sink);
2184 
2185 	/*
2186 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2187 	 * the connector sink is set to either fake or physical sink depends on link status.
2188 	 * Skip if already done during boot.
2189 	 */
2190 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2191 			&& aconnector->dc_em_sink) {
2192 
2193 		/*
2194 		 * For S3 resume with headless use eml_sink to fake stream
2195 		 * because on resume connector->sink is set to NULL
2196 		 */
2197 		mutex_lock(&dev->mode_config.mutex);
2198 
2199 		if (sink) {
2200 			if (aconnector->dc_sink) {
2201 				amdgpu_dm_update_freesync_caps(connector, NULL);
2202 				/*
2203 				 * retain and release below are used to
2204 				 * bump up refcount for sink because the link doesn't point
2205 				 * to it anymore after disconnect, so on next crtc to connector
2206 				 * reshuffle by UMD we will get into unwanted dc_sink release
2207 				 */
2208 				dc_sink_release(aconnector->dc_sink);
2209 			}
2210 			aconnector->dc_sink = sink;
2211 			dc_sink_retain(aconnector->dc_sink);
2212 			amdgpu_dm_update_freesync_caps(connector,
2213 					aconnector->edid);
2214 		} else {
2215 			amdgpu_dm_update_freesync_caps(connector, NULL);
2216 			if (!aconnector->dc_sink) {
2217 				aconnector->dc_sink = aconnector->dc_em_sink;
2218 				dc_sink_retain(aconnector->dc_sink);
2219 			}
2220 		}
2221 
2222 		mutex_unlock(&dev->mode_config.mutex);
2223 
2224 		if (sink)
2225 			dc_sink_release(sink);
2226 		return;
2227 	}
2228 
2229 	/*
2230 	 * TODO: temporary guard to look for proper fix
2231 	 * if this sink is MST sink, we should not do anything
2232 	 */
2233 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2234 		dc_sink_release(sink);
2235 		return;
2236 	}
2237 
2238 	if (aconnector->dc_sink == sink) {
2239 		/*
2240 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2241 		 * Do nothing!!
2242 		 */
2243 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2244 				aconnector->connector_id);
2245 		if (sink)
2246 			dc_sink_release(sink);
2247 		return;
2248 	}
2249 
2250 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2251 		aconnector->connector_id, aconnector->dc_sink, sink);
2252 
2253 	mutex_lock(&dev->mode_config.mutex);
2254 
2255 	/*
2256 	 * 1. Update status of the drm connector
2257 	 * 2. Send an event and let userspace tell us what to do
2258 	 */
2259 	if (sink) {
2260 		/*
2261 		 * TODO: check if we still need the S3 mode update workaround.
2262 		 * If yes, put it here.
2263 		 */
2264 		if (aconnector->dc_sink)
2265 			amdgpu_dm_update_freesync_caps(connector, NULL);
2266 
2267 		aconnector->dc_sink = sink;
2268 		dc_sink_retain(aconnector->dc_sink);
2269 		if (sink->dc_edid.length == 0) {
2270 			aconnector->edid = NULL;
2271 			if (aconnector->dc_link->aux_mode) {
2272 				drm_dp_cec_unset_edid(
2273 					&aconnector->dm_dp_aux.aux);
2274 			}
2275 		} else {
2276 			aconnector->edid =
2277 				(struct edid *)sink->dc_edid.raw_edid;
2278 
2279 			drm_connector_update_edid_property(connector,
2280 							   aconnector->edid);
2281 			drm_add_edid_modes(connector, aconnector->edid);
2282 
2283 			if (aconnector->dc_link->aux_mode)
2284 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2285 						    aconnector->edid);
2286 		}
2287 
2288 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2289 		update_connector_ext_caps(aconnector);
2290 	} else {
2291 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2292 		amdgpu_dm_update_freesync_caps(connector, NULL);
2293 		drm_connector_update_edid_property(connector, NULL);
2294 		aconnector->num_modes = 0;
2295 		dc_sink_release(aconnector->dc_sink);
2296 		aconnector->dc_sink = NULL;
2297 		aconnector->edid = NULL;
2298 #ifdef CONFIG_DRM_AMD_DC_HDCP
2299 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2300 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2301 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2302 #endif
2303 	}
2304 
2305 	mutex_unlock(&dev->mode_config.mutex);
2306 
2307 	update_subconnector_property(aconnector);
2308 
2309 	if (sink)
2310 		dc_sink_release(sink);
2311 }
2312 
handle_hpd_irq(void * param)2313 static void handle_hpd_irq(void *param)
2314 {
2315 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2316 	struct drm_connector *connector = &aconnector->base;
2317 	struct drm_device *dev = connector->dev;
2318 	enum dc_connection_type new_connection_type = dc_connection_none;
2319 #ifdef CONFIG_DRM_AMD_DC_HDCP
2320 	struct amdgpu_device *adev = drm_to_adev(dev);
2321 #endif
2322 
2323 	/*
2324 	 * In case of failure or MST no need to update connector status or notify the OS
2325 	 * since (for MST case) MST does this in its own context.
2326 	 */
2327 	mutex_lock(&aconnector->hpd_lock);
2328 
2329 #ifdef CONFIG_DRM_AMD_DC_HDCP
2330 	if (adev->dm.hdcp_workqueue)
2331 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2332 #endif
2333 	if (aconnector->fake_enable)
2334 		aconnector->fake_enable = false;
2335 
2336 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2337 		DRM_ERROR("KMS: Failed to detect connector\n");
2338 
2339 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
2340 		emulated_link_detect(aconnector->dc_link);
2341 
2342 
2343 		drm_modeset_lock_all(dev);
2344 		dm_restore_drm_connector_state(dev, connector);
2345 		drm_modeset_unlock_all(dev);
2346 
2347 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2348 			drm_kms_helper_hotplug_event(dev);
2349 
2350 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2351 		amdgpu_dm_update_connector_after_detect(aconnector);
2352 
2353 
2354 		drm_modeset_lock_all(dev);
2355 		dm_restore_drm_connector_state(dev, connector);
2356 		drm_modeset_unlock_all(dev);
2357 
2358 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2359 			drm_kms_helper_hotplug_event(dev);
2360 	}
2361 	mutex_unlock(&aconnector->hpd_lock);
2362 
2363 }
2364 
dm_handle_hpd_rx_irq(struct amdgpu_dm_connector * aconnector)2365 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2366 {
2367 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2368 	uint8_t dret;
2369 	bool new_irq_handled = false;
2370 	int dpcd_addr;
2371 	int dpcd_bytes_to_read;
2372 
2373 	const int max_process_count = 30;
2374 	int process_count = 0;
2375 
2376 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2377 
2378 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2379 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2380 		/* DPCD 0x200 - 0x201 for downstream IRQ */
2381 		dpcd_addr = DP_SINK_COUNT;
2382 	} else {
2383 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2384 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
2385 		dpcd_addr = DP_SINK_COUNT_ESI;
2386 	}
2387 
2388 	dret = drm_dp_dpcd_read(
2389 		&aconnector->dm_dp_aux.aux,
2390 		dpcd_addr,
2391 		esi,
2392 		dpcd_bytes_to_read);
2393 
2394 	while (dret == dpcd_bytes_to_read &&
2395 		process_count < max_process_count) {
2396 		uint8_t retry;
2397 		dret = 0;
2398 
2399 		process_count++;
2400 
2401 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2402 		/* handle HPD short pulse irq */
2403 		if (aconnector->mst_mgr.mst_state)
2404 			drm_dp_mst_hpd_irq(
2405 				&aconnector->mst_mgr,
2406 				esi,
2407 				&new_irq_handled);
2408 
2409 		if (new_irq_handled) {
2410 			/* ACK at DPCD to notify down stream */
2411 			const int ack_dpcd_bytes_to_write =
2412 				dpcd_bytes_to_read - 1;
2413 
2414 			for (retry = 0; retry < 3; retry++) {
2415 				uint8_t wret;
2416 
2417 				wret = drm_dp_dpcd_write(
2418 					&aconnector->dm_dp_aux.aux,
2419 					dpcd_addr + 1,
2420 					&esi[1],
2421 					ack_dpcd_bytes_to_write);
2422 				if (wret == ack_dpcd_bytes_to_write)
2423 					break;
2424 			}
2425 
2426 			/* check if there is new irq to be handled */
2427 			dret = drm_dp_dpcd_read(
2428 				&aconnector->dm_dp_aux.aux,
2429 				dpcd_addr,
2430 				esi,
2431 				dpcd_bytes_to_read);
2432 
2433 			new_irq_handled = false;
2434 		} else {
2435 			break;
2436 		}
2437 	}
2438 
2439 	if (process_count == max_process_count)
2440 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2441 }
2442 
handle_hpd_rx_irq(void * param)2443 static void handle_hpd_rx_irq(void *param)
2444 {
2445 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2446 	struct drm_connector *connector = &aconnector->base;
2447 	struct drm_device *dev = connector->dev;
2448 	struct dc_link *dc_link = aconnector->dc_link;
2449 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2450 	enum dc_connection_type new_connection_type = dc_connection_none;
2451 #ifdef CONFIG_DRM_AMD_DC_HDCP
2452 	union hpd_irq_data hpd_irq_data;
2453 	struct amdgpu_device *adev = drm_to_adev(dev);
2454 
2455 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2456 #endif
2457 
2458 	/*
2459 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2460 	 * conflict, after implement i2c helper, this mutex should be
2461 	 * retired.
2462 	 */
2463 	if (dc_link->type != dc_connection_mst_branch)
2464 		mutex_lock(&aconnector->hpd_lock);
2465 
2466 
2467 #ifdef CONFIG_DRM_AMD_DC_HDCP
2468 	if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) &&
2469 #else
2470 	if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
2471 #endif
2472 			!is_mst_root_connector) {
2473 		/* Downstream Port status changed. */
2474 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
2475 			DRM_ERROR("KMS: Failed to detect connector\n");
2476 
2477 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2478 			emulated_link_detect(dc_link);
2479 
2480 			if (aconnector->fake_enable)
2481 				aconnector->fake_enable = false;
2482 
2483 			amdgpu_dm_update_connector_after_detect(aconnector);
2484 
2485 
2486 			drm_modeset_lock_all(dev);
2487 			dm_restore_drm_connector_state(dev, connector);
2488 			drm_modeset_unlock_all(dev);
2489 
2490 			drm_kms_helper_hotplug_event(dev);
2491 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2492 
2493 			if (aconnector->fake_enable)
2494 				aconnector->fake_enable = false;
2495 
2496 			amdgpu_dm_update_connector_after_detect(aconnector);
2497 
2498 
2499 			drm_modeset_lock_all(dev);
2500 			dm_restore_drm_connector_state(dev, connector);
2501 			drm_modeset_unlock_all(dev);
2502 
2503 			drm_kms_helper_hotplug_event(dev);
2504 		}
2505 	}
2506 #ifdef CONFIG_DRM_AMD_DC_HDCP
2507 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2508 		if (adev->dm.hdcp_workqueue)
2509 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2510 	}
2511 #endif
2512 	if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2513 	    (dc_link->type == dc_connection_mst_branch))
2514 		dm_handle_hpd_rx_irq(aconnector);
2515 
2516 	if (dc_link->type != dc_connection_mst_branch) {
2517 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2518 		mutex_unlock(&aconnector->hpd_lock);
2519 	}
2520 }
2521 
register_hpd_handlers(struct amdgpu_device * adev)2522 static void register_hpd_handlers(struct amdgpu_device *adev)
2523 {
2524 	struct drm_device *dev = adev_to_drm(adev);
2525 	struct drm_connector *connector;
2526 	struct amdgpu_dm_connector *aconnector;
2527 	const struct dc_link *dc_link;
2528 	struct dc_interrupt_params int_params = {0};
2529 
2530 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2531 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2532 
2533 	list_for_each_entry(connector,
2534 			&dev->mode_config.connector_list, head)	{
2535 
2536 		aconnector = to_amdgpu_dm_connector(connector);
2537 		dc_link = aconnector->dc_link;
2538 
2539 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2540 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2541 			int_params.irq_source = dc_link->irq_source_hpd;
2542 
2543 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2544 					handle_hpd_irq,
2545 					(void *) aconnector);
2546 		}
2547 
2548 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2549 
2550 			/* Also register for DP short pulse (hpd_rx). */
2551 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2552 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
2553 
2554 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
2555 					handle_hpd_rx_irq,
2556 					(void *) aconnector);
2557 		}
2558 	}
2559 }
2560 
2561 #if defined(CONFIG_DRM_AMD_DC_SI)
2562 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)2563 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2564 {
2565 	struct dc *dc = adev->dm.dc;
2566 	struct common_irq_params *c_irq_params;
2567 	struct dc_interrupt_params int_params = {0};
2568 	int r;
2569 	int i;
2570 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2571 
2572 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2573 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2574 
2575 	/*
2576 	 * Actions of amdgpu_irq_add_id():
2577 	 * 1. Register a set() function with base driver.
2578 	 *    Base driver will call set() function to enable/disable an
2579 	 *    interrupt in DC hardware.
2580 	 * 2. Register amdgpu_dm_irq_handler().
2581 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2582 	 *    coming from DC hardware.
2583 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2584 	 *    for acknowledging and handling. */
2585 
2586 	/* Use VBLANK interrupt */
2587 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
2588 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2589 		if (r) {
2590 			DRM_ERROR("Failed to add crtc irq id!\n");
2591 			return r;
2592 		}
2593 
2594 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2595 		int_params.irq_source =
2596 			dc_interrupt_to_irq_source(dc, i+1 , 0);
2597 
2598 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2599 
2600 		c_irq_params->adev = adev;
2601 		c_irq_params->irq_src = int_params.irq_source;
2602 
2603 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2604 				dm_crtc_high_irq, c_irq_params);
2605 	}
2606 
2607 	/* Use GRPH_PFLIP interrupt */
2608 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2609 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2610 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2611 		if (r) {
2612 			DRM_ERROR("Failed to add page flip irq id!\n");
2613 			return r;
2614 		}
2615 
2616 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2617 		int_params.irq_source =
2618 			dc_interrupt_to_irq_source(dc, i, 0);
2619 
2620 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2621 
2622 		c_irq_params->adev = adev;
2623 		c_irq_params->irq_src = int_params.irq_source;
2624 
2625 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2626 				dm_pflip_high_irq, c_irq_params);
2627 
2628 	}
2629 
2630 	/* HPD */
2631 	r = amdgpu_irq_add_id(adev, client_id,
2632 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2633 	if (r) {
2634 		DRM_ERROR("Failed to add hpd irq id!\n");
2635 		return r;
2636 	}
2637 
2638 	register_hpd_handlers(adev);
2639 
2640 	return 0;
2641 }
2642 #endif
2643 
2644 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)2645 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2646 {
2647 	struct dc *dc = adev->dm.dc;
2648 	struct common_irq_params *c_irq_params;
2649 	struct dc_interrupt_params int_params = {0};
2650 	int r;
2651 	int i;
2652 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2653 
2654 	if (adev->asic_type >= CHIP_VEGA10)
2655 		client_id = SOC15_IH_CLIENTID_DCE;
2656 
2657 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2658 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2659 
2660 	/*
2661 	 * Actions of amdgpu_irq_add_id():
2662 	 * 1. Register a set() function with base driver.
2663 	 *    Base driver will call set() function to enable/disable an
2664 	 *    interrupt in DC hardware.
2665 	 * 2. Register amdgpu_dm_irq_handler().
2666 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2667 	 *    coming from DC hardware.
2668 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2669 	 *    for acknowledging and handling. */
2670 
2671 	/* Use VBLANK interrupt */
2672 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2673 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2674 		if (r) {
2675 			DRM_ERROR("Failed to add crtc irq id!\n");
2676 			return r;
2677 		}
2678 
2679 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2680 		int_params.irq_source =
2681 			dc_interrupt_to_irq_source(dc, i, 0);
2682 
2683 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2684 
2685 		c_irq_params->adev = adev;
2686 		c_irq_params->irq_src = int_params.irq_source;
2687 
2688 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2689 				dm_crtc_high_irq, c_irq_params);
2690 	}
2691 
2692 	/* Use VUPDATE interrupt */
2693 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2694 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2695 		if (r) {
2696 			DRM_ERROR("Failed to add vupdate irq id!\n");
2697 			return r;
2698 		}
2699 
2700 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2701 		int_params.irq_source =
2702 			dc_interrupt_to_irq_source(dc, i, 0);
2703 
2704 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2705 
2706 		c_irq_params->adev = adev;
2707 		c_irq_params->irq_src = int_params.irq_source;
2708 
2709 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2710 				dm_vupdate_high_irq, c_irq_params);
2711 	}
2712 
2713 	/* Use GRPH_PFLIP interrupt */
2714 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2715 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2716 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2717 		if (r) {
2718 			DRM_ERROR("Failed to add page flip irq id!\n");
2719 			return r;
2720 		}
2721 
2722 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2723 		int_params.irq_source =
2724 			dc_interrupt_to_irq_source(dc, i, 0);
2725 
2726 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2727 
2728 		c_irq_params->adev = adev;
2729 		c_irq_params->irq_src = int_params.irq_source;
2730 
2731 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2732 				dm_pflip_high_irq, c_irq_params);
2733 
2734 	}
2735 
2736 	/* HPD */
2737 	r = amdgpu_irq_add_id(adev, client_id,
2738 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2739 	if (r) {
2740 		DRM_ERROR("Failed to add hpd irq id!\n");
2741 		return r;
2742 	}
2743 
2744 	register_hpd_handlers(adev);
2745 
2746 	return 0;
2747 }
2748 
2749 #if defined(CONFIG_DRM_AMD_DC_DCN)
2750 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)2751 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2752 {
2753 	struct dc *dc = adev->dm.dc;
2754 	struct common_irq_params *c_irq_params;
2755 	struct dc_interrupt_params int_params = {0};
2756 	int r;
2757 	int i;
2758 
2759 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2760 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2761 
2762 	/*
2763 	 * Actions of amdgpu_irq_add_id():
2764 	 * 1. Register a set() function with base driver.
2765 	 *    Base driver will call set() function to enable/disable an
2766 	 *    interrupt in DC hardware.
2767 	 * 2. Register amdgpu_dm_irq_handler().
2768 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2769 	 *    coming from DC hardware.
2770 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2771 	 *    for acknowledging and handling.
2772 	 */
2773 
2774 	/* Use VSTARTUP interrupt */
2775 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2776 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2777 			i++) {
2778 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2779 
2780 		if (r) {
2781 			DRM_ERROR("Failed to add crtc irq id!\n");
2782 			return r;
2783 		}
2784 
2785 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2786 		int_params.irq_source =
2787 			dc_interrupt_to_irq_source(dc, i, 0);
2788 
2789 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2790 
2791 		c_irq_params->adev = adev;
2792 		c_irq_params->irq_src = int_params.irq_source;
2793 
2794 		amdgpu_dm_irq_register_interrupt(
2795 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
2796 	}
2797 
2798 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2799 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2800 	 * to trigger at end of each vblank, regardless of state of the lock,
2801 	 * matching DCE behaviour.
2802 	 */
2803 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2804 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2805 	     i++) {
2806 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2807 
2808 		if (r) {
2809 			DRM_ERROR("Failed to add vupdate irq id!\n");
2810 			return r;
2811 		}
2812 
2813 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2814 		int_params.irq_source =
2815 			dc_interrupt_to_irq_source(dc, i, 0);
2816 
2817 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2818 
2819 		c_irq_params->adev = adev;
2820 		c_irq_params->irq_src = int_params.irq_source;
2821 
2822 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2823 				dm_vupdate_high_irq, c_irq_params);
2824 	}
2825 
2826 	/* Use GRPH_PFLIP interrupt */
2827 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2828 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2829 			i++) {
2830 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2831 		if (r) {
2832 			DRM_ERROR("Failed to add page flip irq id!\n");
2833 			return r;
2834 		}
2835 
2836 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2837 		int_params.irq_source =
2838 			dc_interrupt_to_irq_source(dc, i, 0);
2839 
2840 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2841 
2842 		c_irq_params->adev = adev;
2843 		c_irq_params->irq_src = int_params.irq_source;
2844 
2845 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
2846 				dm_pflip_high_irq, c_irq_params);
2847 
2848 	}
2849 
2850 	/* HPD */
2851 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2852 			&adev->hpd_irq);
2853 	if (r) {
2854 		DRM_ERROR("Failed to add hpd irq id!\n");
2855 		return r;
2856 	}
2857 
2858 	register_hpd_handlers(adev);
2859 
2860 	return 0;
2861 }
2862 #endif
2863 
2864 /*
2865  * Acquires the lock for the atomic state object and returns
2866  * the new atomic state.
2867  *
2868  * This should only be called during atomic check.
2869  */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)2870 static int dm_atomic_get_state(struct drm_atomic_state *state,
2871 			       struct dm_atomic_state **dm_state)
2872 {
2873 	struct drm_device *dev = state->dev;
2874 	struct amdgpu_device *adev = drm_to_adev(dev);
2875 	struct amdgpu_display_manager *dm = &adev->dm;
2876 	struct drm_private_state *priv_state;
2877 
2878 	if (*dm_state)
2879 		return 0;
2880 
2881 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2882 	if (IS_ERR(priv_state))
2883 		return PTR_ERR(priv_state);
2884 
2885 	*dm_state = to_dm_atomic_state(priv_state);
2886 
2887 	return 0;
2888 }
2889 
2890 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)2891 dm_atomic_get_new_state(struct drm_atomic_state *state)
2892 {
2893 	struct drm_device *dev = state->dev;
2894 	struct amdgpu_device *adev = drm_to_adev(dev);
2895 	struct amdgpu_display_manager *dm = &adev->dm;
2896 	struct drm_private_obj *obj;
2897 	struct drm_private_state *new_obj_state;
2898 	int i;
2899 
2900 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2901 		if (obj->funcs == dm->atomic_obj.funcs)
2902 			return to_dm_atomic_state(new_obj_state);
2903 	}
2904 
2905 	return NULL;
2906 }
2907 
2908 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)2909 dm_atomic_duplicate_state(struct drm_private_obj *obj)
2910 {
2911 	struct dm_atomic_state *old_state, *new_state;
2912 
2913 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
2914 	if (!new_state)
2915 		return NULL;
2916 
2917 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
2918 
2919 	old_state = to_dm_atomic_state(obj->state);
2920 
2921 	if (old_state && old_state->context)
2922 		new_state->context = dc_copy_state(old_state->context);
2923 
2924 	if (!new_state->context) {
2925 		kfree(new_state);
2926 		return NULL;
2927 	}
2928 
2929 	return &new_state->base;
2930 }
2931 
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)2932 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
2933 				    struct drm_private_state *state)
2934 {
2935 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
2936 
2937 	if (dm_state && dm_state->context)
2938 		dc_release_state(dm_state->context);
2939 
2940 	kfree(dm_state);
2941 }
2942 
2943 static struct drm_private_state_funcs dm_atomic_state_funcs = {
2944 	.atomic_duplicate_state = dm_atomic_duplicate_state,
2945 	.atomic_destroy_state = dm_atomic_destroy_state,
2946 };
2947 
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)2948 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
2949 {
2950 	struct dm_atomic_state *state;
2951 	int r;
2952 
2953 	adev->mode_info.mode_config_initialized = true;
2954 
2955 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
2956 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
2957 
2958 	adev_to_drm(adev)->mode_config.max_width = 16384;
2959 	adev_to_drm(adev)->mode_config.max_height = 16384;
2960 
2961 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
2962 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2963 	/* indicates support for immediate flip */
2964 	adev_to_drm(adev)->mode_config.async_page_flip = true;
2965 
2966 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2967 
2968 	state = kzalloc(sizeof(*state), GFP_KERNEL);
2969 	if (!state)
2970 		return -ENOMEM;
2971 
2972 	state->context = dc_create_state(adev->dm.dc);
2973 	if (!state->context) {
2974 		kfree(state);
2975 		return -ENOMEM;
2976 	}
2977 
2978 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
2979 
2980 	drm_atomic_private_obj_init(adev_to_drm(adev),
2981 				    &adev->dm.atomic_obj,
2982 				    &state->base,
2983 				    &dm_atomic_state_funcs);
2984 
2985 	r = amdgpu_display_modeset_create_props(adev);
2986 	if (r) {
2987 		dc_release_state(state->context);
2988 		kfree(state);
2989 		return r;
2990 	}
2991 
2992 	r = amdgpu_dm_audio_init(adev);
2993 	if (r) {
2994 		dc_release_state(state->context);
2995 		kfree(state);
2996 		return r;
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3003 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3004 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3005 
3006 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3007 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3008 
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm)3009 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3010 {
3011 #if defined(CONFIG_ACPI)
3012 	struct amdgpu_dm_backlight_caps caps;
3013 
3014 	memset(&caps, 0, sizeof(caps));
3015 
3016 	if (dm->backlight_caps.caps_valid)
3017 		return;
3018 
3019 	amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3020 	if (caps.caps_valid) {
3021 		dm->backlight_caps.caps_valid = true;
3022 		if (caps.aux_support)
3023 			return;
3024 		dm->backlight_caps.min_input_signal = caps.min_input_signal;
3025 		dm->backlight_caps.max_input_signal = caps.max_input_signal;
3026 	} else {
3027 		dm->backlight_caps.min_input_signal =
3028 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3029 		dm->backlight_caps.max_input_signal =
3030 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3031 	}
3032 #else
3033 	if (dm->backlight_caps.aux_support)
3034 		return;
3035 
3036 	dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3037 	dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3038 #endif
3039 }
3040 
set_backlight_via_aux(struct dc_link * link,uint32_t brightness)3041 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3042 {
3043 	bool rc;
3044 
3045 	if (!link)
3046 		return 1;
3047 
3048 	rc = dc_link_set_backlight_level_nits(link, true, brightness,
3049 					      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3050 
3051 	return rc ? 0 : 1;
3052 }
3053 
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned * min,unsigned * max)3054 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3055 				unsigned *min, unsigned *max)
3056 {
3057 	if (!caps)
3058 		return 0;
3059 
3060 	if (caps->aux_support) {
3061 		// Firmware limits are in nits, DC API wants millinits.
3062 		*max = 1000 * caps->aux_max_input_signal;
3063 		*min = 1000 * caps->aux_min_input_signal;
3064 	} else {
3065 		// Firmware limits are 8-bit, PWM control is 16-bit.
3066 		*max = 0x101 * caps->max_input_signal;
3067 		*min = 0x101 * caps->min_input_signal;
3068 	}
3069 	return 1;
3070 }
3071 
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3072 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3073 					uint32_t brightness)
3074 {
3075 	unsigned min, max;
3076 
3077 	if (!get_brightness_range(caps, &min, &max))
3078 		return brightness;
3079 
3080 	// Rescale 0..255 to min..max
3081 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3082 				       AMDGPU_MAX_BL_LEVEL);
3083 }
3084 
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)3085 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3086 				      uint32_t brightness)
3087 {
3088 	unsigned min, max;
3089 
3090 	if (!get_brightness_range(caps, &min, &max))
3091 		return brightness;
3092 
3093 	if (brightness < min)
3094 		return 0;
3095 	// Rescale min..max to 0..255
3096 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3097 				 max - min);
3098 }
3099 
amdgpu_dm_backlight_update_status(struct backlight_device * bd)3100 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3101 {
3102 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3103 	struct amdgpu_dm_backlight_caps caps;
3104 	struct dc_link *link = NULL;
3105 	u32 brightness;
3106 	bool rc;
3107 
3108 	amdgpu_dm_update_backlight_caps(dm);
3109 	caps = dm->backlight_caps;
3110 
3111 	link = (struct dc_link *)dm->backlight_link;
3112 
3113 	brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3114 	// Change brightness based on AUX property
3115 	if (caps.aux_support)
3116 		return set_backlight_via_aux(link, brightness);
3117 
3118 	rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3119 
3120 	return rc ? 0 : 1;
3121 }
3122 
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)3123 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3124 {
3125 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3126 	int ret = dc_link_get_backlight_level(dm->backlight_link);
3127 
3128 	if (ret == DC_ERROR_UNEXPECTED)
3129 		return bd->props.brightness;
3130 	return convert_brightness_to_user(&dm->backlight_caps, ret);
3131 }
3132 
3133 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3134 	.options = BL_CORE_SUSPENDRESUME,
3135 	.get_brightness = amdgpu_dm_backlight_get_brightness,
3136 	.update_status	= amdgpu_dm_backlight_update_status,
3137 };
3138 
3139 static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager * dm)3140 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3141 {
3142 	char bl_name[16];
3143 	struct backlight_properties props = { 0 };
3144 
3145 	amdgpu_dm_update_backlight_caps(dm);
3146 
3147 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3148 	props.brightness = AMDGPU_MAX_BL_LEVEL;
3149 	props.type = BACKLIGHT_RAW;
3150 
3151 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3152 		 adev_to_drm(dm->adev)->primary->index);
3153 
3154 	dm->backlight_dev = backlight_device_register(bl_name,
3155 						      adev_to_drm(dm->adev)->dev,
3156 						      dm,
3157 						      &amdgpu_dm_backlight_ops,
3158 						      &props);
3159 
3160 	if (IS_ERR(dm->backlight_dev))
3161 		DRM_ERROR("DM: Backlight registration failed!\n");
3162 	else
3163 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3164 }
3165 
3166 #endif
3167 
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)3168 static int initialize_plane(struct amdgpu_display_manager *dm,
3169 			    struct amdgpu_mode_info *mode_info, int plane_id,
3170 			    enum drm_plane_type plane_type,
3171 			    const struct dc_plane_cap *plane_cap)
3172 {
3173 	struct drm_plane *plane;
3174 	unsigned long possible_crtcs;
3175 	int ret = 0;
3176 
3177 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3178 	if (!plane) {
3179 		DRM_ERROR("KMS: Failed to allocate plane\n");
3180 		return -ENOMEM;
3181 	}
3182 	plane->type = plane_type;
3183 
3184 	/*
3185 	 * HACK: IGT tests expect that the primary plane for a CRTC
3186 	 * can only have one possible CRTC. Only expose support for
3187 	 * any CRTC if they're not going to be used as a primary plane
3188 	 * for a CRTC - like overlay or underlay planes.
3189 	 */
3190 	possible_crtcs = 1 << plane_id;
3191 	if (plane_id >= dm->dc->caps.max_streams)
3192 		possible_crtcs = 0xff;
3193 
3194 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3195 
3196 	if (ret) {
3197 		DRM_ERROR("KMS: Failed to initialize plane\n");
3198 		kfree(plane);
3199 		return ret;
3200 	}
3201 
3202 	if (mode_info)
3203 		mode_info->planes[plane_id] = plane;
3204 
3205 	return ret;
3206 }
3207 
3208 
register_backlight_device(struct amdgpu_display_manager * dm,struct dc_link * link)3209 static void register_backlight_device(struct amdgpu_display_manager *dm,
3210 				      struct dc_link *link)
3211 {
3212 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3213 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3214 
3215 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3216 	    link->type != dc_connection_none) {
3217 		/*
3218 		 * Event if registration failed, we should continue with
3219 		 * DM initialization because not having a backlight control
3220 		 * is better then a black screen.
3221 		 */
3222 		amdgpu_dm_register_backlight_device(dm);
3223 
3224 		if (dm->backlight_dev)
3225 			dm->backlight_link = link;
3226 	}
3227 #endif
3228 }
3229 
3230 
3231 /*
3232  * In this architecture, the association
3233  * connector -> encoder -> crtc
3234  * id not really requried. The crtc and connector will hold the
3235  * display_index as an abstraction to use with DAL component
3236  *
3237  * Returns 0 on success
3238  */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)3239 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3240 {
3241 	struct amdgpu_display_manager *dm = &adev->dm;
3242 	int32_t i;
3243 	struct amdgpu_dm_connector *aconnector = NULL;
3244 	struct amdgpu_encoder *aencoder = NULL;
3245 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
3246 	uint32_t link_cnt;
3247 	int32_t primary_planes;
3248 	enum dc_connection_type new_connection_type = dc_connection_none;
3249 	const struct dc_plane_cap *plane;
3250 
3251 	dm->display_indexes_num = dm->dc->caps.max_streams;
3252 	/* Update the actual used number of crtc */
3253 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3254 
3255 	link_cnt = dm->dc->caps.max_links;
3256 	if (amdgpu_dm_mode_config_init(dm->adev)) {
3257 		DRM_ERROR("DM: Failed to initialize mode config\n");
3258 		return -EINVAL;
3259 	}
3260 
3261 	/* There is one primary plane per CRTC */
3262 	primary_planes = dm->dc->caps.max_streams;
3263 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3264 
3265 	/*
3266 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
3267 	 * Order is reversed to match iteration order in atomic check.
3268 	 */
3269 	for (i = (primary_planes - 1); i >= 0; i--) {
3270 		plane = &dm->dc->caps.planes[i];
3271 
3272 		if (initialize_plane(dm, mode_info, i,
3273 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
3274 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
3275 			goto fail;
3276 		}
3277 	}
3278 
3279 	/*
3280 	 * Initialize overlay planes, index starting after primary planes.
3281 	 * These planes have a higher DRM index than the primary planes since
3282 	 * they should be considered as having a higher z-order.
3283 	 * Order is reversed to match iteration order in atomic check.
3284 	 *
3285 	 * Only support DCN for now, and only expose one so we don't encourage
3286 	 * userspace to use up all the pipes.
3287 	 */
3288 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3289 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3290 
3291 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3292 			continue;
3293 
3294 		if (!plane->blends_with_above || !plane->blends_with_below)
3295 			continue;
3296 
3297 		if (!plane->pixel_format_support.argb8888)
3298 			continue;
3299 
3300 		if (initialize_plane(dm, NULL, primary_planes + i,
3301 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
3302 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3303 			goto fail;
3304 		}
3305 
3306 		/* Only create one overlay plane. */
3307 		break;
3308 	}
3309 
3310 	for (i = 0; i < dm->dc->caps.max_streams; i++)
3311 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3312 			DRM_ERROR("KMS: Failed to initialize crtc\n");
3313 			goto fail;
3314 		}
3315 
3316 	/* loops over all connectors on the board */
3317 	for (i = 0; i < link_cnt; i++) {
3318 		struct dc_link *link = NULL;
3319 
3320 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3321 			DRM_ERROR(
3322 				"KMS: Cannot support more than %d display indexes\n",
3323 					AMDGPU_DM_MAX_DISPLAY_INDEX);
3324 			continue;
3325 		}
3326 
3327 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3328 		if (!aconnector)
3329 			goto fail;
3330 
3331 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3332 		if (!aencoder)
3333 			goto fail;
3334 
3335 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3336 			DRM_ERROR("KMS: Failed to initialize encoder\n");
3337 			goto fail;
3338 		}
3339 
3340 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3341 			DRM_ERROR("KMS: Failed to initialize connector\n");
3342 			goto fail;
3343 		}
3344 
3345 		link = dc_get_link_at_index(dm->dc, i);
3346 
3347 		if (!dc_link_detect_sink(link, &new_connection_type))
3348 			DRM_ERROR("KMS: Failed to detect connector\n");
3349 
3350 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3351 			emulated_link_detect(link);
3352 			amdgpu_dm_update_connector_after_detect(aconnector);
3353 
3354 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3355 			amdgpu_dm_update_connector_after_detect(aconnector);
3356 			register_backlight_device(dm, link);
3357 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3358 				amdgpu_dm_set_psr_caps(link);
3359 		}
3360 
3361 
3362 	}
3363 
3364 	/* Software is initialized. Now we can register interrupt handlers. */
3365 	switch (adev->asic_type) {
3366 #if defined(CONFIG_DRM_AMD_DC_SI)
3367 	case CHIP_TAHITI:
3368 	case CHIP_PITCAIRN:
3369 	case CHIP_VERDE:
3370 	case CHIP_OLAND:
3371 		if (dce60_register_irq_handlers(dm->adev)) {
3372 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3373 			goto fail;
3374 		}
3375 		break;
3376 #endif
3377 	case CHIP_BONAIRE:
3378 	case CHIP_HAWAII:
3379 	case CHIP_KAVERI:
3380 	case CHIP_KABINI:
3381 	case CHIP_MULLINS:
3382 	case CHIP_TONGA:
3383 	case CHIP_FIJI:
3384 	case CHIP_CARRIZO:
3385 	case CHIP_STONEY:
3386 	case CHIP_POLARIS11:
3387 	case CHIP_POLARIS10:
3388 	case CHIP_POLARIS12:
3389 	case CHIP_VEGAM:
3390 	case CHIP_VEGA10:
3391 	case CHIP_VEGA12:
3392 	case CHIP_VEGA20:
3393 		if (dce110_register_irq_handlers(dm->adev)) {
3394 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3395 			goto fail;
3396 		}
3397 		break;
3398 #if defined(CONFIG_DRM_AMD_DC_DCN)
3399 	case CHIP_RAVEN:
3400 	case CHIP_NAVI12:
3401 	case CHIP_NAVI10:
3402 	case CHIP_NAVI14:
3403 	case CHIP_RENOIR:
3404 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3405 	case CHIP_SIENNA_CICHLID:
3406 	case CHIP_NAVY_FLOUNDER:
3407 #endif
3408 		if (dcn10_register_irq_handlers(dm->adev)) {
3409 			DRM_ERROR("DM: Failed to initialize IRQ\n");
3410 			goto fail;
3411 		}
3412 		break;
3413 #endif
3414 	default:
3415 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3416 		goto fail;
3417 	}
3418 
3419 	return 0;
3420 fail:
3421 	kfree(aencoder);
3422 	kfree(aconnector);
3423 
3424 	return -EINVAL;
3425 }
3426 
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)3427 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3428 {
3429 	drm_mode_config_cleanup(dm->ddev);
3430 	drm_atomic_private_obj_fini(&dm->atomic_obj);
3431 	return;
3432 }
3433 
3434 /******************************************************************************
3435  * amdgpu_display_funcs functions
3436  *****************************************************************************/
3437 
3438 /*
3439  * dm_bandwidth_update - program display watermarks
3440  *
3441  * @adev: amdgpu_device pointer
3442  *
3443  * Calculate and program the display watermarks and line buffer allocation.
3444  */
dm_bandwidth_update(struct amdgpu_device * adev)3445 static void dm_bandwidth_update(struct amdgpu_device *adev)
3446 {
3447 	/* TODO: implement later */
3448 }
3449 
3450 static const struct amdgpu_display_funcs dm_display_funcs = {
3451 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3452 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3453 	.backlight_set_level = NULL, /* never called for DC */
3454 	.backlight_get_level = NULL, /* never called for DC */
3455 	.hpd_sense = NULL,/* called unconditionally */
3456 	.hpd_set_polarity = NULL, /* called unconditionally */
3457 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3458 	.page_flip_get_scanoutpos =
3459 		dm_crtc_get_scanoutpos,/* called unconditionally */
3460 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3461 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
3462 };
3463 
3464 #if defined(CONFIG_DEBUG_KERNEL_DC)
3465 
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)3466 static ssize_t s3_debug_store(struct device *device,
3467 			      struct device_attribute *attr,
3468 			      const char *buf,
3469 			      size_t count)
3470 {
3471 	int ret;
3472 	int s3_state;
3473 	struct drm_device *drm_dev = dev_get_drvdata(device);
3474 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
3475 
3476 	ret = kstrtoint(buf, 0, &s3_state);
3477 
3478 	if (ret == 0) {
3479 		if (s3_state) {
3480 			dm_resume(adev);
3481 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
3482 		} else
3483 			dm_suspend(adev);
3484 	}
3485 
3486 	return ret == 0 ? count : 0;
3487 }
3488 
3489 DEVICE_ATTR_WO(s3_debug);
3490 
3491 #endif
3492 
dm_early_init(void * handle)3493 static int dm_early_init(void *handle)
3494 {
3495 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3496 
3497 	switch (adev->asic_type) {
3498 #if defined(CONFIG_DRM_AMD_DC_SI)
3499 	case CHIP_TAHITI:
3500 	case CHIP_PITCAIRN:
3501 	case CHIP_VERDE:
3502 		adev->mode_info.num_crtc = 6;
3503 		adev->mode_info.num_hpd = 6;
3504 		adev->mode_info.num_dig = 6;
3505 		break;
3506 	case CHIP_OLAND:
3507 		adev->mode_info.num_crtc = 2;
3508 		adev->mode_info.num_hpd = 2;
3509 		adev->mode_info.num_dig = 2;
3510 		break;
3511 #endif
3512 	case CHIP_BONAIRE:
3513 	case CHIP_HAWAII:
3514 		adev->mode_info.num_crtc = 6;
3515 		adev->mode_info.num_hpd = 6;
3516 		adev->mode_info.num_dig = 6;
3517 		break;
3518 	case CHIP_KAVERI:
3519 		adev->mode_info.num_crtc = 4;
3520 		adev->mode_info.num_hpd = 6;
3521 		adev->mode_info.num_dig = 7;
3522 		break;
3523 	case CHIP_KABINI:
3524 	case CHIP_MULLINS:
3525 		adev->mode_info.num_crtc = 2;
3526 		adev->mode_info.num_hpd = 6;
3527 		adev->mode_info.num_dig = 6;
3528 		break;
3529 	case CHIP_FIJI:
3530 	case CHIP_TONGA:
3531 		adev->mode_info.num_crtc = 6;
3532 		adev->mode_info.num_hpd = 6;
3533 		adev->mode_info.num_dig = 7;
3534 		break;
3535 	case CHIP_CARRIZO:
3536 		adev->mode_info.num_crtc = 3;
3537 		adev->mode_info.num_hpd = 6;
3538 		adev->mode_info.num_dig = 9;
3539 		break;
3540 	case CHIP_STONEY:
3541 		adev->mode_info.num_crtc = 2;
3542 		adev->mode_info.num_hpd = 6;
3543 		adev->mode_info.num_dig = 9;
3544 		break;
3545 	case CHIP_POLARIS11:
3546 	case CHIP_POLARIS12:
3547 		adev->mode_info.num_crtc = 5;
3548 		adev->mode_info.num_hpd = 5;
3549 		adev->mode_info.num_dig = 5;
3550 		break;
3551 	case CHIP_POLARIS10:
3552 	case CHIP_VEGAM:
3553 		adev->mode_info.num_crtc = 6;
3554 		adev->mode_info.num_hpd = 6;
3555 		adev->mode_info.num_dig = 6;
3556 		break;
3557 	case CHIP_VEGA10:
3558 	case CHIP_VEGA12:
3559 	case CHIP_VEGA20:
3560 		adev->mode_info.num_crtc = 6;
3561 		adev->mode_info.num_hpd = 6;
3562 		adev->mode_info.num_dig = 6;
3563 		break;
3564 #if defined(CONFIG_DRM_AMD_DC_DCN)
3565 	case CHIP_RAVEN:
3566 		adev->mode_info.num_crtc = 4;
3567 		adev->mode_info.num_hpd = 4;
3568 		adev->mode_info.num_dig = 4;
3569 		break;
3570 #endif
3571 	case CHIP_NAVI10:
3572 	case CHIP_NAVI12:
3573 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3574 	case CHIP_SIENNA_CICHLID:
3575 	case CHIP_NAVY_FLOUNDER:
3576 #endif
3577 		adev->mode_info.num_crtc = 6;
3578 		adev->mode_info.num_hpd = 6;
3579 		adev->mode_info.num_dig = 6;
3580 		break;
3581 	case CHIP_NAVI14:
3582 		adev->mode_info.num_crtc = 5;
3583 		adev->mode_info.num_hpd = 5;
3584 		adev->mode_info.num_dig = 5;
3585 		break;
3586 	case CHIP_RENOIR:
3587 		adev->mode_info.num_crtc = 4;
3588 		adev->mode_info.num_hpd = 4;
3589 		adev->mode_info.num_dig = 4;
3590 		break;
3591 	default:
3592 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3593 		return -EINVAL;
3594 	}
3595 
3596 	amdgpu_dm_set_irq_funcs(adev);
3597 
3598 	if (adev->mode_info.funcs == NULL)
3599 		adev->mode_info.funcs = &dm_display_funcs;
3600 
3601 	/*
3602 	 * Note: Do NOT change adev->audio_endpt_rreg and
3603 	 * adev->audio_endpt_wreg because they are initialised in
3604 	 * amdgpu_device_init()
3605 	 */
3606 #if defined(CONFIG_DEBUG_KERNEL_DC)
3607 	device_create_file(
3608 		adev_to_drm(adev)->dev,
3609 		&dev_attr_s3_debug);
3610 #endif
3611 
3612 	return 0;
3613 }
3614 
modeset_required(struct drm_crtc_state * crtc_state,struct dc_stream_state * new_stream,struct dc_stream_state * old_stream)3615 static bool modeset_required(struct drm_crtc_state *crtc_state,
3616 			     struct dc_stream_state *new_stream,
3617 			     struct dc_stream_state *old_stream)
3618 {
3619 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3620 }
3621 
modereset_required(struct drm_crtc_state * crtc_state)3622 static bool modereset_required(struct drm_crtc_state *crtc_state)
3623 {
3624 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3625 }
3626 
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)3627 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3628 {
3629 	drm_encoder_cleanup(encoder);
3630 	kfree(encoder);
3631 }
3632 
3633 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3634 	.destroy = amdgpu_dm_encoder_destroy,
3635 };
3636 
3637 
fill_dc_scaling_info(const struct drm_plane_state * state,struct dc_scaling_info * scaling_info)3638 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3639 				struct dc_scaling_info *scaling_info)
3640 {
3641 	int scale_w, scale_h;
3642 
3643 	memset(scaling_info, 0, sizeof(*scaling_info));
3644 
3645 	/* Source is fixed 16.16 but we ignore mantissa for now... */
3646 	scaling_info->src_rect.x = state->src_x >> 16;
3647 	scaling_info->src_rect.y = state->src_y >> 16;
3648 
3649 	scaling_info->src_rect.width = state->src_w >> 16;
3650 	if (scaling_info->src_rect.width == 0)
3651 		return -EINVAL;
3652 
3653 	scaling_info->src_rect.height = state->src_h >> 16;
3654 	if (scaling_info->src_rect.height == 0)
3655 		return -EINVAL;
3656 
3657 	scaling_info->dst_rect.x = state->crtc_x;
3658 	scaling_info->dst_rect.y = state->crtc_y;
3659 
3660 	if (state->crtc_w == 0)
3661 		return -EINVAL;
3662 
3663 	scaling_info->dst_rect.width = state->crtc_w;
3664 
3665 	if (state->crtc_h == 0)
3666 		return -EINVAL;
3667 
3668 	scaling_info->dst_rect.height = state->crtc_h;
3669 
3670 	/* DRM doesn't specify clipping on destination output. */
3671 	scaling_info->clip_rect = scaling_info->dst_rect;
3672 
3673 	/* TODO: Validate scaling per-format with DC plane caps */
3674 	scale_w = scaling_info->dst_rect.width * 1000 /
3675 		  scaling_info->src_rect.width;
3676 
3677 	if (scale_w < 250 || scale_w > 16000)
3678 		return -EINVAL;
3679 
3680 	scale_h = scaling_info->dst_rect.height * 1000 /
3681 		  scaling_info->src_rect.height;
3682 
3683 	if (scale_h < 250 || scale_h > 16000)
3684 		return -EINVAL;
3685 
3686 	/*
3687 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3688 	 * assume reasonable defaults based on the format.
3689 	 */
3690 
3691 	return 0;
3692 }
3693 
get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface)3694 static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
3695 		       uint64_t *tiling_flags, bool *tmz_surface)
3696 {
3697 	struct amdgpu_bo *rbo;
3698 	int r;
3699 
3700 	if (!amdgpu_fb) {
3701 		*tiling_flags = 0;
3702 		*tmz_surface = false;
3703 		return 0;
3704 	}
3705 
3706 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
3707 	r = amdgpu_bo_reserve(rbo, false);
3708 
3709 	if (unlikely(r)) {
3710 		/* Don't show error message when returning -ERESTARTSYS */
3711 		if (r != -ERESTARTSYS)
3712 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
3713 		return r;
3714 	}
3715 
3716 	if (tiling_flags)
3717 		amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
3718 
3719 	if (tmz_surface)
3720 		*tmz_surface = amdgpu_bo_encrypted(rbo);
3721 
3722 	amdgpu_bo_unreserve(rbo);
3723 
3724 	return r;
3725 }
3726 
get_dcc_address(uint64_t address,uint64_t tiling_flags)3727 static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
3728 {
3729 	uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
3730 
3731 	return offset ? (address + offset * 256) : 0;
3732 }
3733 
3734 static int
fill_plane_dcc_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const struct plane_size * plane_size,const union dc_tiling_info * tiling_info,const uint64_t info,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool force_disable_dcc)3735 fill_plane_dcc_attributes(struct amdgpu_device *adev,
3736 			  const struct amdgpu_framebuffer *afb,
3737 			  const enum surface_pixel_format format,
3738 			  const enum dc_rotation_angle rotation,
3739 			  const struct plane_size *plane_size,
3740 			  const union dc_tiling_info *tiling_info,
3741 			  const uint64_t info,
3742 			  struct dc_plane_dcc_param *dcc,
3743 			  struct dc_plane_address *address,
3744 			  bool force_disable_dcc)
3745 {
3746 	struct dc *dc = adev->dm.dc;
3747 	struct dc_dcc_surface_param input;
3748 	struct dc_surface_dcc_cap output;
3749 	uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
3750 	uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
3751 	uint64_t dcc_address;
3752 
3753 	memset(&input, 0, sizeof(input));
3754 	memset(&output, 0, sizeof(output));
3755 
3756 	if (force_disable_dcc)
3757 		return 0;
3758 
3759 	if (!offset)
3760 		return 0;
3761 
3762 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3763 		return 0;
3764 
3765 	if (!dc->cap_funcs.get_dcc_compression_cap)
3766 		return -EINVAL;
3767 
3768 	input.format = format;
3769 	input.surface_size.width = plane_size->surface_size.width;
3770 	input.surface_size.height = plane_size->surface_size.height;
3771 	input.swizzle_mode = tiling_info->gfx9.swizzle;
3772 
3773 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3774 		input.scan = SCAN_DIRECTION_HORIZONTAL;
3775 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3776 		input.scan = SCAN_DIRECTION_VERTICAL;
3777 
3778 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3779 		return -EINVAL;
3780 
3781 	if (!output.capable)
3782 		return -EINVAL;
3783 
3784 	if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
3785 		return -EINVAL;
3786 
3787 	dcc->enable = 1;
3788 	dcc->meta_pitch =
3789 		AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
3790 	dcc->independent_64b_blks = i64b;
3791 
3792 	dcc_address = get_dcc_address(afb->address, info);
3793 	address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
3794 	address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
3795 
3796 	return 0;
3797 }
3798 
3799 static int
fill_plane_buffer_attributes(struct amdgpu_device * adev,const struct amdgpu_framebuffer * afb,const enum surface_pixel_format format,const enum dc_rotation_angle rotation,const uint64_t tiling_flags,union dc_tiling_info * tiling_info,struct plane_size * plane_size,struct dc_plane_dcc_param * dcc,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)3800 fill_plane_buffer_attributes(struct amdgpu_device *adev,
3801 			     const struct amdgpu_framebuffer *afb,
3802 			     const enum surface_pixel_format format,
3803 			     const enum dc_rotation_angle rotation,
3804 			     const uint64_t tiling_flags,
3805 			     union dc_tiling_info *tiling_info,
3806 			     struct plane_size *plane_size,
3807 			     struct dc_plane_dcc_param *dcc,
3808 			     struct dc_plane_address *address,
3809 			     bool tmz_surface,
3810 			     bool force_disable_dcc)
3811 {
3812 	const struct drm_framebuffer *fb = &afb->base;
3813 	int ret;
3814 
3815 	memset(tiling_info, 0, sizeof(*tiling_info));
3816 	memset(plane_size, 0, sizeof(*plane_size));
3817 	memset(dcc, 0, sizeof(*dcc));
3818 	memset(address, 0, sizeof(*address));
3819 
3820 	address->tmz_surface = tmz_surface;
3821 
3822 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
3823 		plane_size->surface_size.x = 0;
3824 		plane_size->surface_size.y = 0;
3825 		plane_size->surface_size.width = fb->width;
3826 		plane_size->surface_size.height = fb->height;
3827 		plane_size->surface_pitch =
3828 			fb->pitches[0] / fb->format->cpp[0];
3829 
3830 		address->type = PLN_ADDR_TYPE_GRAPHICS;
3831 		address->grph.addr.low_part = lower_32_bits(afb->address);
3832 		address->grph.addr.high_part = upper_32_bits(afb->address);
3833 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
3834 		uint64_t chroma_addr = afb->address + fb->offsets[1];
3835 
3836 		plane_size->surface_size.x = 0;
3837 		plane_size->surface_size.y = 0;
3838 		plane_size->surface_size.width = fb->width;
3839 		plane_size->surface_size.height = fb->height;
3840 		plane_size->surface_pitch =
3841 			fb->pitches[0] / fb->format->cpp[0];
3842 
3843 		plane_size->chroma_size.x = 0;
3844 		plane_size->chroma_size.y = 0;
3845 		/* TODO: set these based on surface format */
3846 		plane_size->chroma_size.width = fb->width / 2;
3847 		plane_size->chroma_size.height = fb->height / 2;
3848 
3849 		plane_size->chroma_pitch =
3850 			fb->pitches[1] / fb->format->cpp[1];
3851 
3852 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
3853 		address->video_progressive.luma_addr.low_part =
3854 			lower_32_bits(afb->address);
3855 		address->video_progressive.luma_addr.high_part =
3856 			upper_32_bits(afb->address);
3857 		address->video_progressive.chroma_addr.low_part =
3858 			lower_32_bits(chroma_addr);
3859 		address->video_progressive.chroma_addr.high_part =
3860 			upper_32_bits(chroma_addr);
3861 	}
3862 
3863 	/* Fill GFX8 params */
3864 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3865 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3866 
3867 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3868 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3869 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3870 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3871 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3872 
3873 		/* XXX fix me for VI */
3874 		tiling_info->gfx8.num_banks = num_banks;
3875 		tiling_info->gfx8.array_mode =
3876 				DC_ARRAY_2D_TILED_THIN1;
3877 		tiling_info->gfx8.tile_split = tile_split;
3878 		tiling_info->gfx8.bank_width = bankw;
3879 		tiling_info->gfx8.bank_height = bankh;
3880 		tiling_info->gfx8.tile_aspect = mtaspect;
3881 		tiling_info->gfx8.tile_mode =
3882 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3883 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3884 			== DC_ARRAY_1D_TILED_THIN1) {
3885 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3886 	}
3887 
3888 	tiling_info->gfx8.pipe_config =
3889 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3890 
3891 	if (adev->asic_type == CHIP_VEGA10 ||
3892 	    adev->asic_type == CHIP_VEGA12 ||
3893 	    adev->asic_type == CHIP_VEGA20 ||
3894 	    adev->asic_type == CHIP_NAVI10 ||
3895 	    adev->asic_type == CHIP_NAVI14 ||
3896 	    adev->asic_type == CHIP_NAVI12 ||
3897 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
3898 		adev->asic_type == CHIP_SIENNA_CICHLID ||
3899 		adev->asic_type == CHIP_NAVY_FLOUNDER ||
3900 #endif
3901 	    adev->asic_type == CHIP_RENOIR ||
3902 	    adev->asic_type == CHIP_RAVEN) {
3903 		/* Fill GFX9 params */
3904 		tiling_info->gfx9.num_pipes =
3905 			adev->gfx.config.gb_addr_config_fields.num_pipes;
3906 		tiling_info->gfx9.num_banks =
3907 			adev->gfx.config.gb_addr_config_fields.num_banks;
3908 		tiling_info->gfx9.pipe_interleave =
3909 			adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3910 		tiling_info->gfx9.num_shader_engines =
3911 			adev->gfx.config.gb_addr_config_fields.num_se;
3912 		tiling_info->gfx9.max_compressed_frags =
3913 			adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3914 		tiling_info->gfx9.num_rb_per_se =
3915 			adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3916 		tiling_info->gfx9.swizzle =
3917 			AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3918 		tiling_info->gfx9.shaderEnable = 1;
3919 
3920 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
3921 		if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3922 		    adev->asic_type == CHIP_NAVY_FLOUNDER)
3923 			tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3924 #endif
3925 		ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
3926 						plane_size, tiling_info,
3927 						tiling_flags, dcc, address,
3928 						force_disable_dcc);
3929 		if (ret)
3930 			return ret;
3931 	}
3932 
3933 	return 0;
3934 }
3935 
3936 static void
fill_blending_from_plane_state(const struct drm_plane_state * plane_state,bool * per_pixel_alpha,bool * global_alpha,int * global_alpha_value)3937 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
3938 			       bool *per_pixel_alpha, bool *global_alpha,
3939 			       int *global_alpha_value)
3940 {
3941 	*per_pixel_alpha = false;
3942 	*global_alpha = false;
3943 	*global_alpha_value = 0xff;
3944 
3945 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
3946 		return;
3947 
3948 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
3949 		static const uint32_t alpha_formats[] = {
3950 			DRM_FORMAT_ARGB8888,
3951 			DRM_FORMAT_RGBA8888,
3952 			DRM_FORMAT_ABGR8888,
3953 		};
3954 		uint32_t format = plane_state->fb->format->format;
3955 		unsigned int i;
3956 
3957 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
3958 			if (format == alpha_formats[i]) {
3959 				*per_pixel_alpha = true;
3960 				break;
3961 			}
3962 		}
3963 	}
3964 
3965 	if (plane_state->alpha < 0xffff) {
3966 		*global_alpha = true;
3967 		*global_alpha_value = plane_state->alpha >> 8;
3968 	}
3969 }
3970 
3971 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)3972 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
3973 			    const enum surface_pixel_format format,
3974 			    enum dc_color_space *color_space)
3975 {
3976 	bool full_range;
3977 
3978 	*color_space = COLOR_SPACE_SRGB;
3979 
3980 	/* DRM color properties only affect non-RGB formats. */
3981 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
3982 		return 0;
3983 
3984 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
3985 
3986 	switch (plane_state->color_encoding) {
3987 	case DRM_COLOR_YCBCR_BT601:
3988 		if (full_range)
3989 			*color_space = COLOR_SPACE_YCBCR601;
3990 		else
3991 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
3992 		break;
3993 
3994 	case DRM_COLOR_YCBCR_BT709:
3995 		if (full_range)
3996 			*color_space = COLOR_SPACE_YCBCR709;
3997 		else
3998 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
3999 		break;
4000 
4001 	case DRM_COLOR_YCBCR_BT2020:
4002 		if (full_range)
4003 			*color_space = COLOR_SPACE_2020_YCBCR;
4004 		else
4005 			return -EINVAL;
4006 		break;
4007 
4008 	default:
4009 		return -EINVAL;
4010 	}
4011 
4012 	return 0;
4013 }
4014 
4015 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const uint64_t tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4016 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4017 			    const struct drm_plane_state *plane_state,
4018 			    const uint64_t tiling_flags,
4019 			    struct dc_plane_info *plane_info,
4020 			    struct dc_plane_address *address,
4021 			    bool tmz_surface,
4022 			    bool force_disable_dcc)
4023 {
4024 	const struct drm_framebuffer *fb = plane_state->fb;
4025 	const struct amdgpu_framebuffer *afb =
4026 		to_amdgpu_framebuffer(plane_state->fb);
4027 	struct drm_format_name_buf format_name;
4028 	int ret;
4029 
4030 	memset(plane_info, 0, sizeof(*plane_info));
4031 
4032 	switch (fb->format->format) {
4033 	case DRM_FORMAT_C8:
4034 		plane_info->format =
4035 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4036 		break;
4037 	case DRM_FORMAT_RGB565:
4038 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4039 		break;
4040 	case DRM_FORMAT_XRGB8888:
4041 	case DRM_FORMAT_ARGB8888:
4042 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4043 		break;
4044 	case DRM_FORMAT_XRGB2101010:
4045 	case DRM_FORMAT_ARGB2101010:
4046 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4047 		break;
4048 	case DRM_FORMAT_XBGR2101010:
4049 	case DRM_FORMAT_ABGR2101010:
4050 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4051 		break;
4052 	case DRM_FORMAT_XBGR8888:
4053 	case DRM_FORMAT_ABGR8888:
4054 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4055 		break;
4056 	case DRM_FORMAT_NV21:
4057 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4058 		break;
4059 	case DRM_FORMAT_NV12:
4060 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4061 		break;
4062 	case DRM_FORMAT_P010:
4063 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4064 		break;
4065 	case DRM_FORMAT_XRGB16161616F:
4066 	case DRM_FORMAT_ARGB16161616F:
4067 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4068 		break;
4069 	case DRM_FORMAT_XBGR16161616F:
4070 	case DRM_FORMAT_ABGR16161616F:
4071 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4072 		break;
4073 	default:
4074 		DRM_ERROR(
4075 			"Unsupported screen format %s\n",
4076 			drm_get_format_name(fb->format->format, &format_name));
4077 		return -EINVAL;
4078 	}
4079 
4080 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4081 	case DRM_MODE_ROTATE_0:
4082 		plane_info->rotation = ROTATION_ANGLE_0;
4083 		break;
4084 	case DRM_MODE_ROTATE_90:
4085 		plane_info->rotation = ROTATION_ANGLE_90;
4086 		break;
4087 	case DRM_MODE_ROTATE_180:
4088 		plane_info->rotation = ROTATION_ANGLE_180;
4089 		break;
4090 	case DRM_MODE_ROTATE_270:
4091 		plane_info->rotation = ROTATION_ANGLE_270;
4092 		break;
4093 	default:
4094 		plane_info->rotation = ROTATION_ANGLE_0;
4095 		break;
4096 	}
4097 
4098 	plane_info->visible = true;
4099 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4100 
4101 	plane_info->layer_index = 0;
4102 
4103 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
4104 					  &plane_info->color_space);
4105 	if (ret)
4106 		return ret;
4107 
4108 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4109 					   plane_info->rotation, tiling_flags,
4110 					   &plane_info->tiling_info,
4111 					   &plane_info->plane_size,
4112 					   &plane_info->dcc, address, tmz_surface,
4113 					   force_disable_dcc);
4114 	if (ret)
4115 		return ret;
4116 
4117 	fill_blending_from_plane_state(
4118 		plane_state, &plane_info->per_pixel_alpha,
4119 		&plane_info->global_alpha, &plane_info->global_alpha_value);
4120 
4121 	return 0;
4122 }
4123 
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)4124 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4125 				    struct dc_plane_state *dc_plane_state,
4126 				    struct drm_plane_state *plane_state,
4127 				    struct drm_crtc_state *crtc_state)
4128 {
4129 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4130 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
4131 	struct dc_scaling_info scaling_info;
4132 	struct dc_plane_info plane_info;
4133 	int ret;
4134 	bool force_disable_dcc = false;
4135 
4136 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
4137 	if (ret)
4138 		return ret;
4139 
4140 	dc_plane_state->src_rect = scaling_info.src_rect;
4141 	dc_plane_state->dst_rect = scaling_info.dst_rect;
4142 	dc_plane_state->clip_rect = scaling_info.clip_rect;
4143 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4144 
4145 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4146 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
4147 					  dm_plane_state->tiling_flags,
4148 					  &plane_info,
4149 					  &dc_plane_state->address,
4150 					  dm_plane_state->tmz_surface,
4151 					  force_disable_dcc);
4152 	if (ret)
4153 		return ret;
4154 
4155 	dc_plane_state->format = plane_info.format;
4156 	dc_plane_state->color_space = plane_info.color_space;
4157 	dc_plane_state->format = plane_info.format;
4158 	dc_plane_state->plane_size = plane_info.plane_size;
4159 	dc_plane_state->rotation = plane_info.rotation;
4160 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4161 	dc_plane_state->stereo_format = plane_info.stereo_format;
4162 	dc_plane_state->tiling_info = plane_info.tiling_info;
4163 	dc_plane_state->visible = plane_info.visible;
4164 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4165 	dc_plane_state->global_alpha = plane_info.global_alpha;
4166 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4167 	dc_plane_state->dcc = plane_info.dcc;
4168 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4169 
4170 	/*
4171 	 * Always set input transfer function, since plane state is refreshed
4172 	 * every time.
4173 	 */
4174 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4175 	if (ret)
4176 		return ret;
4177 
4178 	return 0;
4179 }
4180 
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)4181 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4182 					   const struct dm_connector_state *dm_state,
4183 					   struct dc_stream_state *stream)
4184 {
4185 	enum amdgpu_rmx_type rmx_type;
4186 
4187 	struct rect src = { 0 }; /* viewport in composition space*/
4188 	struct rect dst = { 0 }; /* stream addressable area */
4189 
4190 	/* no mode. nothing to be done */
4191 	if (!mode)
4192 		return;
4193 
4194 	/* Full screen scaling by default */
4195 	src.width = mode->hdisplay;
4196 	src.height = mode->vdisplay;
4197 	dst.width = stream->timing.h_addressable;
4198 	dst.height = stream->timing.v_addressable;
4199 
4200 	if (dm_state) {
4201 		rmx_type = dm_state->scaling;
4202 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4203 			if (src.width * dst.height <
4204 					src.height * dst.width) {
4205 				/* height needs less upscaling/more downscaling */
4206 				dst.width = src.width *
4207 						dst.height / src.height;
4208 			} else {
4209 				/* width needs less upscaling/more downscaling */
4210 				dst.height = src.height *
4211 						dst.width / src.width;
4212 			}
4213 		} else if (rmx_type == RMX_CENTER) {
4214 			dst = src;
4215 		}
4216 
4217 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
4218 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
4219 
4220 		if (dm_state->underscan_enable) {
4221 			dst.x += dm_state->underscan_hborder / 2;
4222 			dst.y += dm_state->underscan_vborder / 2;
4223 			dst.width -= dm_state->underscan_hborder;
4224 			dst.height -= dm_state->underscan_vborder;
4225 		}
4226 	}
4227 
4228 	stream->src = src;
4229 	stream->dst = dst;
4230 
4231 	DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4232 			dst.x, dst.y, dst.width, dst.height);
4233 
4234 }
4235 
4236 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)4237 convert_color_depth_from_display_info(const struct drm_connector *connector,
4238 				      bool is_y420, int requested_bpc)
4239 {
4240 	uint8_t bpc;
4241 
4242 	if (is_y420) {
4243 		bpc = 8;
4244 
4245 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
4246 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4247 			bpc = 16;
4248 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4249 			bpc = 12;
4250 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4251 			bpc = 10;
4252 	} else {
4253 		bpc = (uint8_t)connector->display_info.bpc;
4254 		/* Assume 8 bpc by default if no bpc is specified. */
4255 		bpc = bpc ? bpc : 8;
4256 	}
4257 
4258 	if (requested_bpc > 0) {
4259 		/*
4260 		 * Cap display bpc based on the user requested value.
4261 		 *
4262 		 * The value for state->max_bpc may not correctly updated
4263 		 * depending on when the connector gets added to the state
4264 		 * or if this was called outside of atomic check, so it
4265 		 * can't be used directly.
4266 		 */
4267 		bpc = min_t(u8, bpc, requested_bpc);
4268 
4269 		/* Round down to the nearest even number. */
4270 		bpc = bpc - (bpc & 1);
4271 	}
4272 
4273 	switch (bpc) {
4274 	case 0:
4275 		/*
4276 		 * Temporary Work around, DRM doesn't parse color depth for
4277 		 * EDID revision before 1.4
4278 		 * TODO: Fix edid parsing
4279 		 */
4280 		return COLOR_DEPTH_888;
4281 	case 6:
4282 		return COLOR_DEPTH_666;
4283 	case 8:
4284 		return COLOR_DEPTH_888;
4285 	case 10:
4286 		return COLOR_DEPTH_101010;
4287 	case 12:
4288 		return COLOR_DEPTH_121212;
4289 	case 14:
4290 		return COLOR_DEPTH_141414;
4291 	case 16:
4292 		return COLOR_DEPTH_161616;
4293 	default:
4294 		return COLOR_DEPTH_UNDEFINED;
4295 	}
4296 }
4297 
4298 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)4299 get_aspect_ratio(const struct drm_display_mode *mode_in)
4300 {
4301 	/* 1-1 mapping, since both enums follow the HDMI spec. */
4302 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4303 }
4304 
4305 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing)4306 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4307 {
4308 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
4309 
4310 	switch (dc_crtc_timing->pixel_encoding)	{
4311 	case PIXEL_ENCODING_YCBCR422:
4312 	case PIXEL_ENCODING_YCBCR444:
4313 	case PIXEL_ENCODING_YCBCR420:
4314 	{
4315 		/*
4316 		 * 27030khz is the separation point between HDTV and SDTV
4317 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
4318 		 * respectively
4319 		 */
4320 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
4321 			if (dc_crtc_timing->flags.Y_ONLY)
4322 				color_space =
4323 					COLOR_SPACE_YCBCR709_LIMITED;
4324 			else
4325 				color_space = COLOR_SPACE_YCBCR709;
4326 		} else {
4327 			if (dc_crtc_timing->flags.Y_ONLY)
4328 				color_space =
4329 					COLOR_SPACE_YCBCR601_LIMITED;
4330 			else
4331 				color_space = COLOR_SPACE_YCBCR601;
4332 		}
4333 
4334 	}
4335 	break;
4336 	case PIXEL_ENCODING_RGB:
4337 		color_space = COLOR_SPACE_SRGB;
4338 		break;
4339 
4340 	default:
4341 		WARN_ON(1);
4342 		break;
4343 	}
4344 
4345 	return color_space;
4346 }
4347 
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)4348 static bool adjust_colour_depth_from_display_info(
4349 	struct dc_crtc_timing *timing_out,
4350 	const struct drm_display_info *info)
4351 {
4352 	enum dc_color_depth depth = timing_out->display_color_depth;
4353 	int normalized_clk;
4354 	do {
4355 		normalized_clk = timing_out->pix_clk_100hz / 10;
4356 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4357 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4358 			normalized_clk /= 2;
4359 		/* Adjusting pix clock following on HDMI spec based on colour depth */
4360 		switch (depth) {
4361 		case COLOR_DEPTH_888:
4362 			break;
4363 		case COLOR_DEPTH_101010:
4364 			normalized_clk = (normalized_clk * 30) / 24;
4365 			break;
4366 		case COLOR_DEPTH_121212:
4367 			normalized_clk = (normalized_clk * 36) / 24;
4368 			break;
4369 		case COLOR_DEPTH_161616:
4370 			normalized_clk = (normalized_clk * 48) / 24;
4371 			break;
4372 		default:
4373 			/* The above depths are the only ones valid for HDMI. */
4374 			return false;
4375 		}
4376 		if (normalized_clk <= info->max_tmds_clock) {
4377 			timing_out->display_color_depth = depth;
4378 			return true;
4379 		}
4380 	} while (--depth > COLOR_DEPTH_666);
4381 	return false;
4382 }
4383 
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)4384 static void fill_stream_properties_from_drm_display_mode(
4385 	struct dc_stream_state *stream,
4386 	const struct drm_display_mode *mode_in,
4387 	const struct drm_connector *connector,
4388 	const struct drm_connector_state *connector_state,
4389 	const struct dc_stream_state *old_stream,
4390 	int requested_bpc)
4391 {
4392 	struct dc_crtc_timing *timing_out = &stream->timing;
4393 	const struct drm_display_info *info = &connector->display_info;
4394 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4395 	struct hdmi_vendor_infoframe hv_frame;
4396 	struct hdmi_avi_infoframe avi_frame;
4397 
4398 	memset(&hv_frame, 0, sizeof(hv_frame));
4399 	memset(&avi_frame, 0, sizeof(avi_frame));
4400 
4401 	timing_out->h_border_left = 0;
4402 	timing_out->h_border_right = 0;
4403 	timing_out->v_border_top = 0;
4404 	timing_out->v_border_bottom = 0;
4405 	/* TODO: un-hardcode */
4406 	if (drm_mode_is_420_only(info, mode_in)
4407 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4408 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4409 	else if (drm_mode_is_420_also(info, mode_in)
4410 			&& aconnector->force_yuv420_output)
4411 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4412 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4413 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4414 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4415 	else
4416 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4417 
4418 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4419 	timing_out->display_color_depth = convert_color_depth_from_display_info(
4420 		connector,
4421 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4422 		requested_bpc);
4423 	timing_out->scan_type = SCANNING_TYPE_NODATA;
4424 	timing_out->hdmi_vic = 0;
4425 
4426 	if(old_stream) {
4427 		timing_out->vic = old_stream->timing.vic;
4428 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4429 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4430 	} else {
4431 		timing_out->vic = drm_match_cea_mode(mode_in);
4432 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4433 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4434 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4435 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4436 	}
4437 
4438 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4439 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4440 		timing_out->vic = avi_frame.video_code;
4441 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4442 		timing_out->hdmi_vic = hv_frame.vic;
4443 	}
4444 
4445 	timing_out->h_addressable = mode_in->crtc_hdisplay;
4446 	timing_out->h_total = mode_in->crtc_htotal;
4447 	timing_out->h_sync_width =
4448 		mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4449 	timing_out->h_front_porch =
4450 		mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4451 	timing_out->v_total = mode_in->crtc_vtotal;
4452 	timing_out->v_addressable = mode_in->crtc_vdisplay;
4453 	timing_out->v_front_porch =
4454 		mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4455 	timing_out->v_sync_width =
4456 		mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4457 	timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4458 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4459 
4460 	stream->output_color_space = get_output_color_space(timing_out);
4461 
4462 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4463 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4464 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4465 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4466 		    drm_mode_is_420_also(info, mode_in) &&
4467 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4468 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4469 			adjust_colour_depth_from_display_info(timing_out, info);
4470 		}
4471 	}
4472 }
4473 
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)4474 static void fill_audio_info(struct audio_info *audio_info,
4475 			    const struct drm_connector *drm_connector,
4476 			    const struct dc_sink *dc_sink)
4477 {
4478 	int i = 0;
4479 	int cea_revision = 0;
4480 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4481 
4482 	audio_info->manufacture_id = edid_caps->manufacturer_id;
4483 	audio_info->product_id = edid_caps->product_id;
4484 
4485 	cea_revision = drm_connector->display_info.cea_rev;
4486 
4487 	strscpy(audio_info->display_name,
4488 		edid_caps->display_name,
4489 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4490 
4491 	if (cea_revision >= 3) {
4492 		audio_info->mode_count = edid_caps->audio_mode_count;
4493 
4494 		for (i = 0; i < audio_info->mode_count; ++i) {
4495 			audio_info->modes[i].format_code =
4496 					(enum audio_format_code)
4497 					(edid_caps->audio_modes[i].format_code);
4498 			audio_info->modes[i].channel_count =
4499 					edid_caps->audio_modes[i].channel_count;
4500 			audio_info->modes[i].sample_rates.all =
4501 					edid_caps->audio_modes[i].sample_rate;
4502 			audio_info->modes[i].sample_size =
4503 					edid_caps->audio_modes[i].sample_size;
4504 		}
4505 	}
4506 
4507 	audio_info->flags.all = edid_caps->speaker_flags;
4508 
4509 	/* TODO: We only check for the progressive mode, check for interlace mode too */
4510 	if (drm_connector->latency_present[0]) {
4511 		audio_info->video_latency = drm_connector->video_latency[0];
4512 		audio_info->audio_latency = drm_connector->audio_latency[0];
4513 	}
4514 
4515 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4516 
4517 }
4518 
4519 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)4520 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4521 				      struct drm_display_mode *dst_mode)
4522 {
4523 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4524 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4525 	dst_mode->crtc_clock = src_mode->crtc_clock;
4526 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4527 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4528 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
4529 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4530 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
4531 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
4532 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4533 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4534 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4535 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4536 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4537 }
4538 
4539 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)4540 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4541 					const struct drm_display_mode *native_mode,
4542 					bool scale_enabled)
4543 {
4544 	if (scale_enabled) {
4545 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4546 	} else if (native_mode->clock == drm_mode->clock &&
4547 			native_mode->htotal == drm_mode->htotal &&
4548 			native_mode->vtotal == drm_mode->vtotal) {
4549 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4550 	} else {
4551 		/* no scaling nor amdgpu inserted, no need to patch */
4552 	}
4553 }
4554 
4555 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)4556 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4557 {
4558 	struct dc_sink_init_data sink_init_data = { 0 };
4559 	struct dc_sink *sink = NULL;
4560 	sink_init_data.link = aconnector->dc_link;
4561 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
4562 
4563 	sink = dc_sink_create(&sink_init_data);
4564 	if (!sink) {
4565 		DRM_ERROR("Failed to create sink!\n");
4566 		return NULL;
4567 	}
4568 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
4569 
4570 	return sink;
4571 }
4572 
set_multisync_trigger_params(struct dc_stream_state * stream)4573 static void set_multisync_trigger_params(
4574 		struct dc_stream_state *stream)
4575 {
4576 	if (stream->triggered_crtc_reset.enabled) {
4577 		stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
4578 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
4579 	}
4580 }
4581 
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)4582 static void set_master_stream(struct dc_stream_state *stream_set[],
4583 			      int stream_count)
4584 {
4585 	int j, highest_rfr = 0, master_stream = 0;
4586 
4587 	for (j = 0;  j < stream_count; j++) {
4588 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
4589 			int refresh_rate = 0;
4590 
4591 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
4592 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
4593 			if (refresh_rate > highest_rfr) {
4594 				highest_rfr = refresh_rate;
4595 				master_stream = j;
4596 			}
4597 		}
4598 	}
4599 	for (j = 0;  j < stream_count; j++) {
4600 		if (stream_set[j])
4601 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
4602 	}
4603 }
4604 
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)4605 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
4606 {
4607 	int i = 0;
4608 
4609 	if (context->stream_count < 2)
4610 		return;
4611 	for (i = 0; i < context->stream_count ; i++) {
4612 		if (!context->streams[i])
4613 			continue;
4614 		/*
4615 		 * TODO: add a function to read AMD VSDB bits and set
4616 		 * crtc_sync_master.multi_sync_enabled flag
4617 		 * For now it's set to false
4618 		 */
4619 		set_multisync_trigger_params(context->streams[i]);
4620 	}
4621 	set_master_stream(context->streams, context->stream_count);
4622 }
4623 
4624 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)4625 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
4626 		       const struct drm_display_mode *drm_mode,
4627 		       const struct dm_connector_state *dm_state,
4628 		       const struct dc_stream_state *old_stream,
4629 		       int requested_bpc)
4630 {
4631 	struct drm_display_mode *preferred_mode = NULL;
4632 	struct drm_connector *drm_connector;
4633 	const struct drm_connector_state *con_state =
4634 		dm_state ? &dm_state->base : NULL;
4635 	struct dc_stream_state *stream = NULL;
4636 	struct drm_display_mode mode = *drm_mode;
4637 	bool native_mode_found = false;
4638 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
4639 	int mode_refresh;
4640 	int preferred_refresh = 0;
4641 #if defined(CONFIG_DRM_AMD_DC_DCN)
4642 	struct dsc_dec_dpcd_caps dsc_caps;
4643 #endif
4644 	uint32_t link_bandwidth_kbps;
4645 
4646 	struct dc_sink *sink = NULL;
4647 	if (aconnector == NULL) {
4648 		DRM_ERROR("aconnector is NULL!\n");
4649 		return stream;
4650 	}
4651 
4652 	drm_connector = &aconnector->base;
4653 
4654 	if (!aconnector->dc_sink) {
4655 		sink = create_fake_sink(aconnector);
4656 		if (!sink)
4657 			return stream;
4658 	} else {
4659 		sink = aconnector->dc_sink;
4660 		dc_sink_retain(sink);
4661 	}
4662 
4663 	stream = dc_create_stream_for_sink(sink);
4664 
4665 	if (stream == NULL) {
4666 		DRM_ERROR("Failed to create stream for sink!\n");
4667 		goto finish;
4668 	}
4669 
4670 	stream->dm_stream_context = aconnector;
4671 
4672 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
4673 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
4674 
4675 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
4676 		/* Search for preferred mode */
4677 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
4678 			native_mode_found = true;
4679 			break;
4680 		}
4681 	}
4682 	if (!native_mode_found)
4683 		preferred_mode = list_first_entry_or_null(
4684 				&aconnector->base.modes,
4685 				struct drm_display_mode,
4686 				head);
4687 
4688 	mode_refresh = drm_mode_vrefresh(&mode);
4689 
4690 	if (preferred_mode == NULL) {
4691 		/*
4692 		 * This may not be an error, the use case is when we have no
4693 		 * usermode calls to reset and set mode upon hotplug. In this
4694 		 * case, we call set mode ourselves to restore the previous mode
4695 		 * and the modelist may not be filled in in time.
4696 		 */
4697 		DRM_DEBUG_DRIVER("No preferred mode found\n");
4698 	} else {
4699 		decide_crtc_timing_for_drm_display_mode(
4700 				&mode, preferred_mode,
4701 				dm_state ? (dm_state->scaling != RMX_OFF) : false);
4702 		preferred_refresh = drm_mode_vrefresh(preferred_mode);
4703 	}
4704 
4705 	if (!dm_state)
4706 		drm_mode_set_crtcinfo(&mode, 0);
4707 
4708 	/*
4709 	* If scaling is enabled and refresh rate didn't change
4710 	* we copy the vic and polarities of the old timings
4711 	*/
4712 	if (!scale || mode_refresh != preferred_refresh)
4713 		fill_stream_properties_from_drm_display_mode(stream,
4714 			&mode, &aconnector->base, con_state, NULL, requested_bpc);
4715 	else
4716 		fill_stream_properties_from_drm_display_mode(stream,
4717 			&mode, &aconnector->base, con_state, old_stream, requested_bpc);
4718 
4719 	stream->timing.flags.DSC = 0;
4720 
4721 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
4722 #if defined(CONFIG_DRM_AMD_DC_DCN)
4723 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
4724 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
4725 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
4726 				      &dsc_caps);
4727 #endif
4728 		link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
4729 							     dc_link_get_link_cap(aconnector->dc_link));
4730 
4731 #if defined(CONFIG_DRM_AMD_DC_DCN)
4732 		if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
4733 			/* Set DSC policy according to dsc_clock_en */
4734 			dc_dsc_policy_set_enable_dsc_when_not_needed(
4735 				aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
4736 
4737 			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
4738 						  &dsc_caps,
4739 						  aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
4740 						  link_bandwidth_kbps,
4741 						  &stream->timing,
4742 						  &stream->timing.dsc_cfg))
4743 				stream->timing.flags.DSC = 1;
4744 			/* Overwrite the stream flag if DSC is enabled through debugfs */
4745 			if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
4746 				stream->timing.flags.DSC = 1;
4747 
4748 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
4749 				stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
4750 
4751 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
4752 				stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
4753 
4754 			if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
4755 				stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
4756 		}
4757 #endif
4758 	}
4759 
4760 	update_stream_scaling_settings(&mode, dm_state, stream);
4761 
4762 	fill_audio_info(
4763 		&stream->audio_info,
4764 		drm_connector,
4765 		sink);
4766 
4767 	update_stream_signal(stream, sink);
4768 
4769 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4770 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
4771 
4772 	if (stream->link->psr_settings.psr_feature_enabled) {
4773 		//
4774 		// should decide stream support vsc sdp colorimetry capability
4775 		// before building vsc info packet
4776 		//
4777 		stream->use_vsc_sdp_for_colorimetry = false;
4778 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
4779 			stream->use_vsc_sdp_for_colorimetry =
4780 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
4781 		} else {
4782 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
4783 				stream->use_vsc_sdp_for_colorimetry = true;
4784 		}
4785 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
4786 	}
4787 finish:
4788 	dc_sink_release(sink);
4789 
4790 	return stream;
4791 }
4792 
amdgpu_dm_crtc_destroy(struct drm_crtc * crtc)4793 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
4794 {
4795 	drm_crtc_cleanup(crtc);
4796 	kfree(crtc);
4797 }
4798 
dm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)4799 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
4800 				  struct drm_crtc_state *state)
4801 {
4802 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
4803 
4804 	/* TODO Destroy dc_stream objects are stream object is flattened */
4805 	if (cur->stream)
4806 		dc_stream_release(cur->stream);
4807 
4808 
4809 	__drm_atomic_helper_crtc_destroy_state(state);
4810 
4811 
4812 	kfree(state);
4813 }
4814 
dm_crtc_reset_state(struct drm_crtc * crtc)4815 static void dm_crtc_reset_state(struct drm_crtc *crtc)
4816 {
4817 	struct dm_crtc_state *state;
4818 
4819 	if (crtc->state)
4820 		dm_crtc_destroy_state(crtc, crtc->state);
4821 
4822 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4823 	if (WARN_ON(!state))
4824 		return;
4825 
4826 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
4827 }
4828 
4829 static struct drm_crtc_state *
dm_crtc_duplicate_state(struct drm_crtc * crtc)4830 dm_crtc_duplicate_state(struct drm_crtc *crtc)
4831 {
4832 	struct dm_crtc_state *state, *cur;
4833 
4834 	cur = to_dm_crtc_state(crtc->state);
4835 
4836 	if (WARN_ON(!crtc->state))
4837 		return NULL;
4838 
4839 	state = kzalloc(sizeof(*state), GFP_KERNEL);
4840 	if (!state)
4841 		return NULL;
4842 
4843 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
4844 
4845 	if (cur->stream) {
4846 		state->stream = cur->stream;
4847 		dc_stream_retain(state->stream);
4848 	}
4849 
4850 	state->active_planes = cur->active_planes;
4851 	state->vrr_infopacket = cur->vrr_infopacket;
4852 	state->abm_level = cur->abm_level;
4853 	state->vrr_supported = cur->vrr_supported;
4854 	state->freesync_config = cur->freesync_config;
4855 	state->crc_src = cur->crc_src;
4856 	state->cm_has_degamma = cur->cm_has_degamma;
4857 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
4858 
4859 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
4860 
4861 	return &state->base;
4862 }
4863 
dm_set_vupdate_irq(struct drm_crtc * crtc,bool enable)4864 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
4865 {
4866 	enum dc_irq_source irq_source;
4867 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4868 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4869 	int rc;
4870 
4871 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
4872 
4873 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4874 
4875 	DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
4876 			 acrtc->crtc_id, enable ? "en" : "dis", rc);
4877 	return rc;
4878 }
4879 
dm_set_vblank(struct drm_crtc * crtc,bool enable)4880 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
4881 {
4882 	enum dc_irq_source irq_source;
4883 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
4884 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
4885 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
4886 	int rc = 0;
4887 
4888 	if (enable) {
4889 		/* vblank irq on -> Only need vupdate irq in vrr mode */
4890 		if (amdgpu_dm_vrr_active(acrtc_state))
4891 			rc = dm_set_vupdate_irq(crtc, true);
4892 	} else {
4893 		/* vblank irq off -> vupdate irq off */
4894 		rc = dm_set_vupdate_irq(crtc, false);
4895 	}
4896 
4897 	if (rc)
4898 		return rc;
4899 
4900 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
4901 	return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
4902 }
4903 
dm_enable_vblank(struct drm_crtc * crtc)4904 static int dm_enable_vblank(struct drm_crtc *crtc)
4905 {
4906 	return dm_set_vblank(crtc, true);
4907 }
4908 
dm_disable_vblank(struct drm_crtc * crtc)4909 static void dm_disable_vblank(struct drm_crtc *crtc)
4910 {
4911 	dm_set_vblank(crtc, false);
4912 }
4913 
4914 /* Implemented only the options currently availible for the driver */
4915 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
4916 	.reset = dm_crtc_reset_state,
4917 	.destroy = amdgpu_dm_crtc_destroy,
4918 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
4919 	.set_config = drm_atomic_helper_set_config,
4920 	.page_flip = drm_atomic_helper_page_flip,
4921 	.atomic_duplicate_state = dm_crtc_duplicate_state,
4922 	.atomic_destroy_state = dm_crtc_destroy_state,
4923 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
4924 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
4925 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
4926 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
4927 	.enable_vblank = dm_enable_vblank,
4928 	.disable_vblank = dm_disable_vblank,
4929 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
4930 };
4931 
4932 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)4933 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
4934 {
4935 	bool connected;
4936 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4937 
4938 	/*
4939 	 * Notes:
4940 	 * 1. This interface is NOT called in context of HPD irq.
4941 	 * 2. This interface *is called* in context of user-mode ioctl. Which
4942 	 * makes it a bad place for *any* MST-related activity.
4943 	 */
4944 
4945 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
4946 	    !aconnector->fake_enable)
4947 		connected = (aconnector->dc_sink != NULL);
4948 	else
4949 		connected = (aconnector->base.force == DRM_FORCE_ON);
4950 
4951 	update_subconnector_property(aconnector);
4952 
4953 	return (connected ? connector_status_connected :
4954 			connector_status_disconnected);
4955 }
4956 
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)4957 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
4958 					    struct drm_connector_state *connector_state,
4959 					    struct drm_property *property,
4960 					    uint64_t val)
4961 {
4962 	struct drm_device *dev = connector->dev;
4963 	struct amdgpu_device *adev = drm_to_adev(dev);
4964 	struct dm_connector_state *dm_old_state =
4965 		to_dm_connector_state(connector->state);
4966 	struct dm_connector_state *dm_new_state =
4967 		to_dm_connector_state(connector_state);
4968 
4969 	int ret = -EINVAL;
4970 
4971 	if (property == dev->mode_config.scaling_mode_property) {
4972 		enum amdgpu_rmx_type rmx_type;
4973 
4974 		switch (val) {
4975 		case DRM_MODE_SCALE_CENTER:
4976 			rmx_type = RMX_CENTER;
4977 			break;
4978 		case DRM_MODE_SCALE_ASPECT:
4979 			rmx_type = RMX_ASPECT;
4980 			break;
4981 		case DRM_MODE_SCALE_FULLSCREEN:
4982 			rmx_type = RMX_FULL;
4983 			break;
4984 		case DRM_MODE_SCALE_NONE:
4985 		default:
4986 			rmx_type = RMX_OFF;
4987 			break;
4988 		}
4989 
4990 		if (dm_old_state->scaling == rmx_type)
4991 			return 0;
4992 
4993 		dm_new_state->scaling = rmx_type;
4994 		ret = 0;
4995 	} else if (property == adev->mode_info.underscan_hborder_property) {
4996 		dm_new_state->underscan_hborder = val;
4997 		ret = 0;
4998 	} else if (property == adev->mode_info.underscan_vborder_property) {
4999 		dm_new_state->underscan_vborder = val;
5000 		ret = 0;
5001 	} else if (property == adev->mode_info.underscan_property) {
5002 		dm_new_state->underscan_enable = val;
5003 		ret = 0;
5004 	} else if (property == adev->mode_info.abm_level_property) {
5005 		dm_new_state->abm_level = val;
5006 		ret = 0;
5007 	}
5008 
5009 	return ret;
5010 }
5011 
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)5012 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5013 					    const struct drm_connector_state *state,
5014 					    struct drm_property *property,
5015 					    uint64_t *val)
5016 {
5017 	struct drm_device *dev = connector->dev;
5018 	struct amdgpu_device *adev = drm_to_adev(dev);
5019 	struct dm_connector_state *dm_state =
5020 		to_dm_connector_state(state);
5021 	int ret = -EINVAL;
5022 
5023 	if (property == dev->mode_config.scaling_mode_property) {
5024 		switch (dm_state->scaling) {
5025 		case RMX_CENTER:
5026 			*val = DRM_MODE_SCALE_CENTER;
5027 			break;
5028 		case RMX_ASPECT:
5029 			*val = DRM_MODE_SCALE_ASPECT;
5030 			break;
5031 		case RMX_FULL:
5032 			*val = DRM_MODE_SCALE_FULLSCREEN;
5033 			break;
5034 		case RMX_OFF:
5035 		default:
5036 			*val = DRM_MODE_SCALE_NONE;
5037 			break;
5038 		}
5039 		ret = 0;
5040 	} else if (property == adev->mode_info.underscan_hborder_property) {
5041 		*val = dm_state->underscan_hborder;
5042 		ret = 0;
5043 	} else if (property == adev->mode_info.underscan_vborder_property) {
5044 		*val = dm_state->underscan_vborder;
5045 		ret = 0;
5046 	} else if (property == adev->mode_info.underscan_property) {
5047 		*val = dm_state->underscan_enable;
5048 		ret = 0;
5049 	} else if (property == adev->mode_info.abm_level_property) {
5050 		*val = dm_state->abm_level;
5051 		ret = 0;
5052 	}
5053 
5054 	return ret;
5055 }
5056 
amdgpu_dm_connector_unregister(struct drm_connector * connector)5057 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5058 {
5059 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5060 
5061 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5062 }
5063 
amdgpu_dm_connector_destroy(struct drm_connector * connector)5064 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5065 {
5066 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5067 	const struct dc_link *link = aconnector->dc_link;
5068 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5069 	struct amdgpu_display_manager *dm = &adev->dm;
5070 
5071 	/*
5072 	 * Call only if mst_mgr was iniitalized before since it's not done
5073 	 * for all connector types.
5074 	 */
5075 	if (aconnector->mst_mgr.dev)
5076 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5077 
5078 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5079 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5080 
5081 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5082 	    link->type != dc_connection_none &&
5083 	    dm->backlight_dev) {
5084 		backlight_device_unregister(dm->backlight_dev);
5085 		dm->backlight_dev = NULL;
5086 	}
5087 #endif
5088 
5089 	if (aconnector->dc_em_sink)
5090 		dc_sink_release(aconnector->dc_em_sink);
5091 	aconnector->dc_em_sink = NULL;
5092 	if (aconnector->dc_sink)
5093 		dc_sink_release(aconnector->dc_sink);
5094 	aconnector->dc_sink = NULL;
5095 
5096 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5097 	drm_connector_unregister(connector);
5098 	drm_connector_cleanup(connector);
5099 	if (aconnector->i2c) {
5100 		i2c_del_adapter(&aconnector->i2c->base);
5101 		kfree(aconnector->i2c);
5102 	}
5103 	kfree(aconnector->dm_dp_aux.aux.name);
5104 
5105 	kfree(connector);
5106 }
5107 
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)5108 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5109 {
5110 	struct dm_connector_state *state =
5111 		to_dm_connector_state(connector->state);
5112 
5113 	if (connector->state)
5114 		__drm_atomic_helper_connector_destroy_state(connector->state);
5115 
5116 	kfree(state);
5117 
5118 	state = kzalloc(sizeof(*state), GFP_KERNEL);
5119 
5120 	if (state) {
5121 		state->scaling = RMX_OFF;
5122 		state->underscan_enable = false;
5123 		state->underscan_hborder = 0;
5124 		state->underscan_vborder = 0;
5125 		state->base.max_requested_bpc = 8;
5126 		state->vcpi_slots = 0;
5127 		state->pbn = 0;
5128 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5129 			state->abm_level = amdgpu_dm_abm_level;
5130 
5131 		__drm_atomic_helper_connector_reset(connector, &state->base);
5132 	}
5133 }
5134 
5135 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)5136 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5137 {
5138 	struct dm_connector_state *state =
5139 		to_dm_connector_state(connector->state);
5140 
5141 	struct dm_connector_state *new_state =
5142 			kmemdup(state, sizeof(*state), GFP_KERNEL);
5143 
5144 	if (!new_state)
5145 		return NULL;
5146 
5147 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5148 
5149 	new_state->freesync_capable = state->freesync_capable;
5150 	new_state->abm_level = state->abm_level;
5151 	new_state->scaling = state->scaling;
5152 	new_state->underscan_enable = state->underscan_enable;
5153 	new_state->underscan_hborder = state->underscan_hborder;
5154 	new_state->underscan_vborder = state->underscan_vborder;
5155 	new_state->vcpi_slots = state->vcpi_slots;
5156 	new_state->pbn = state->pbn;
5157 	return &new_state->base;
5158 }
5159 
5160 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)5161 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5162 {
5163 	struct amdgpu_dm_connector *amdgpu_dm_connector =
5164 		to_amdgpu_dm_connector(connector);
5165 	int r;
5166 
5167 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5168 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5169 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5170 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5171 		if (r)
5172 			return r;
5173 	}
5174 
5175 #if defined(CONFIG_DEBUG_FS)
5176 	connector_debugfs_init(amdgpu_dm_connector);
5177 #endif
5178 
5179 	return 0;
5180 }
5181 
5182 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5183 	.reset = amdgpu_dm_connector_funcs_reset,
5184 	.detect = amdgpu_dm_connector_detect,
5185 	.fill_modes = drm_helper_probe_single_connector_modes,
5186 	.destroy = amdgpu_dm_connector_destroy,
5187 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5188 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5189 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5190 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5191 	.late_register = amdgpu_dm_connector_late_register,
5192 	.early_unregister = amdgpu_dm_connector_unregister
5193 };
5194 
get_modes(struct drm_connector * connector)5195 static int get_modes(struct drm_connector *connector)
5196 {
5197 	return amdgpu_dm_connector_get_modes(connector);
5198 }
5199 
create_eml_sink(struct amdgpu_dm_connector * aconnector)5200 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5201 {
5202 	struct dc_sink_init_data init_params = {
5203 			.link = aconnector->dc_link,
5204 			.sink_signal = SIGNAL_TYPE_VIRTUAL
5205 	};
5206 	struct edid *edid;
5207 
5208 	if (!aconnector->base.edid_blob_ptr) {
5209 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5210 				aconnector->base.name);
5211 
5212 		aconnector->base.force = DRM_FORCE_OFF;
5213 		aconnector->base.override_edid = false;
5214 		return;
5215 	}
5216 
5217 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5218 
5219 	aconnector->edid = edid;
5220 
5221 	aconnector->dc_em_sink = dc_link_add_remote_sink(
5222 		aconnector->dc_link,
5223 		(uint8_t *)edid,
5224 		(edid->extensions + 1) * EDID_LENGTH,
5225 		&init_params);
5226 
5227 	if (aconnector->base.force == DRM_FORCE_ON) {
5228 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
5229 		aconnector->dc_link->local_sink :
5230 		aconnector->dc_em_sink;
5231 		dc_sink_retain(aconnector->dc_sink);
5232 	}
5233 }
5234 
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)5235 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5236 {
5237 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5238 
5239 	/*
5240 	 * In case of headless boot with force on for DP managed connector
5241 	 * Those settings have to be != 0 to get initial modeset
5242 	 */
5243 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5244 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5245 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5246 	}
5247 
5248 
5249 	aconnector->base.override_edid = true;
5250 	create_eml_sink(aconnector);
5251 }
5252 
5253 static struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)5254 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5255 				const struct drm_display_mode *drm_mode,
5256 				const struct dm_connector_state *dm_state,
5257 				const struct dc_stream_state *old_stream)
5258 {
5259 	struct drm_connector *connector = &aconnector->base;
5260 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
5261 	struct dc_stream_state *stream;
5262 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5263 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5264 	enum dc_status dc_result = DC_OK;
5265 
5266 	do {
5267 		stream = create_stream_for_sink(aconnector, drm_mode,
5268 						dm_state, old_stream,
5269 						requested_bpc);
5270 		if (stream == NULL) {
5271 			DRM_ERROR("Failed to create stream for sink!\n");
5272 			break;
5273 		}
5274 
5275 		dc_result = dc_validate_stream(adev->dm.dc, stream);
5276 
5277 		if (dc_result != DC_OK) {
5278 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5279 				      drm_mode->hdisplay,
5280 				      drm_mode->vdisplay,
5281 				      drm_mode->clock,
5282 				      dc_result,
5283 				      dc_status_to_str(dc_result));
5284 
5285 			dc_stream_release(stream);
5286 			stream = NULL;
5287 			requested_bpc -= 2; /* lower bpc to retry validation */
5288 		}
5289 
5290 	} while (stream == NULL && requested_bpc >= 6);
5291 
5292 	return stream;
5293 }
5294 
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)5295 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5296 				   struct drm_display_mode *mode)
5297 {
5298 	int result = MODE_ERROR;
5299 	struct dc_sink *dc_sink;
5300 	/* TODO: Unhardcode stream count */
5301 	struct dc_stream_state *stream;
5302 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5303 
5304 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5305 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
5306 		return result;
5307 
5308 	/*
5309 	 * Only run this the first time mode_valid is called to initilialize
5310 	 * EDID mgmt
5311 	 */
5312 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5313 		!aconnector->dc_em_sink)
5314 		handle_edid_mgmt(aconnector);
5315 
5316 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5317 
5318 	if (dc_sink == NULL) {
5319 		DRM_ERROR("dc_sink is NULL!\n");
5320 		goto fail;
5321 	}
5322 
5323 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5324 	if (stream) {
5325 		dc_stream_release(stream);
5326 		result = MODE_OK;
5327 	}
5328 
5329 fail:
5330 	/* TODO: error handling*/
5331 	return result;
5332 }
5333 
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)5334 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5335 				struct dc_info_packet *out)
5336 {
5337 	struct hdmi_drm_infoframe frame;
5338 	unsigned char buf[30]; /* 26 + 4 */
5339 	ssize_t len;
5340 	int ret, i;
5341 
5342 	memset(out, 0, sizeof(*out));
5343 
5344 	if (!state->hdr_output_metadata)
5345 		return 0;
5346 
5347 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5348 	if (ret)
5349 		return ret;
5350 
5351 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5352 	if (len < 0)
5353 		return (int)len;
5354 
5355 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
5356 	if (len != 30)
5357 		return -EINVAL;
5358 
5359 	/* Prepare the infopacket for DC. */
5360 	switch (state->connector->connector_type) {
5361 	case DRM_MODE_CONNECTOR_HDMIA:
5362 		out->hb0 = 0x87; /* type */
5363 		out->hb1 = 0x01; /* version */
5364 		out->hb2 = 0x1A; /* length */
5365 		out->sb[0] = buf[3]; /* checksum */
5366 		i = 1;
5367 		break;
5368 
5369 	case DRM_MODE_CONNECTOR_DisplayPort:
5370 	case DRM_MODE_CONNECTOR_eDP:
5371 		out->hb0 = 0x00; /* sdp id, zero */
5372 		out->hb1 = 0x87; /* type */
5373 		out->hb2 = 0x1D; /* payload len - 1 */
5374 		out->hb3 = (0x13 << 2); /* sdp version */
5375 		out->sb[0] = 0x01; /* version */
5376 		out->sb[1] = 0x1A; /* length */
5377 		i = 2;
5378 		break;
5379 
5380 	default:
5381 		return -EINVAL;
5382 	}
5383 
5384 	memcpy(&out->sb[i], &buf[4], 26);
5385 	out->valid = true;
5386 
5387 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5388 		       sizeof(out->sb), false);
5389 
5390 	return 0;
5391 }
5392 
5393 static bool
is_hdr_metadata_different(const struct drm_connector_state * old_state,const struct drm_connector_state * new_state)5394 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5395 			  const struct drm_connector_state *new_state)
5396 {
5397 	struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5398 	struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5399 
5400 	if (old_blob != new_blob) {
5401 		if (old_blob && new_blob &&
5402 		    old_blob->length == new_blob->length)
5403 			return memcmp(old_blob->data, new_blob->data,
5404 				      old_blob->length);
5405 
5406 		return true;
5407 	}
5408 
5409 	return false;
5410 }
5411 
5412 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)5413 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5414 				 struct drm_atomic_state *state)
5415 {
5416 	struct drm_connector_state *new_con_state =
5417 		drm_atomic_get_new_connector_state(state, conn);
5418 	struct drm_connector_state *old_con_state =
5419 		drm_atomic_get_old_connector_state(state, conn);
5420 	struct drm_crtc *crtc = new_con_state->crtc;
5421 	struct drm_crtc_state *new_crtc_state;
5422 	int ret;
5423 
5424 	if (!crtc)
5425 		return 0;
5426 
5427 	if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5428 		struct dc_info_packet hdr_infopacket;
5429 
5430 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5431 		if (ret)
5432 			return ret;
5433 
5434 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5435 		if (IS_ERR(new_crtc_state))
5436 			return PTR_ERR(new_crtc_state);
5437 
5438 		/*
5439 		 * DC considers the stream backends changed if the
5440 		 * static metadata changes. Forcing the modeset also
5441 		 * gives a simple way for userspace to switch from
5442 		 * 8bpc to 10bpc when setting the metadata to enter
5443 		 * or exit HDR.
5444 		 *
5445 		 * Changing the static metadata after it's been
5446 		 * set is permissible, however. So only force a
5447 		 * modeset if we're entering or exiting HDR.
5448 		 */
5449 		new_crtc_state->mode_changed =
5450 			!old_con_state->hdr_output_metadata ||
5451 			!new_con_state->hdr_output_metadata;
5452 	}
5453 
5454 	return 0;
5455 }
5456 
5457 static const struct drm_connector_helper_funcs
5458 amdgpu_dm_connector_helper_funcs = {
5459 	/*
5460 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5461 	 * modes will be filtered by drm_mode_validate_size(), and those modes
5462 	 * are missing after user start lightdm. So we need to renew modes list.
5463 	 * in get_modes call back, not just return the modes count
5464 	 */
5465 	.get_modes = get_modes,
5466 	.mode_valid = amdgpu_dm_connector_mode_valid,
5467 	.atomic_check = amdgpu_dm_connector_atomic_check,
5468 };
5469 
dm_crtc_helper_disable(struct drm_crtc * crtc)5470 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5471 {
5472 }
5473 
count_crtc_active_planes(struct drm_crtc_state * new_crtc_state)5474 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5475 {
5476 	struct drm_atomic_state *state = new_crtc_state->state;
5477 	struct drm_plane *plane;
5478 	int num_active = 0;
5479 
5480 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5481 		struct drm_plane_state *new_plane_state;
5482 
5483 		/* Cursor planes are "fake". */
5484 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
5485 			continue;
5486 
5487 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5488 
5489 		if (!new_plane_state) {
5490 			/*
5491 			 * The plane is enable on the CRTC and hasn't changed
5492 			 * state. This means that it previously passed
5493 			 * validation and is therefore enabled.
5494 			 */
5495 			num_active += 1;
5496 			continue;
5497 		}
5498 
5499 		/* We need a framebuffer to be considered enabled. */
5500 		num_active += (new_plane_state->fb != NULL);
5501 	}
5502 
5503 	return num_active;
5504 }
5505 
dm_update_crtc_active_planes(struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)5506 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5507 					 struct drm_crtc_state *new_crtc_state)
5508 {
5509 	struct dm_crtc_state *dm_new_crtc_state =
5510 		to_dm_crtc_state(new_crtc_state);
5511 
5512 	dm_new_crtc_state->active_planes = 0;
5513 
5514 	if (!dm_new_crtc_state->stream)
5515 		return;
5516 
5517 	dm_new_crtc_state->active_planes =
5518 		count_crtc_active_planes(new_crtc_state);
5519 }
5520 
dm_crtc_helper_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)5521 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5522 				       struct drm_crtc_state *state)
5523 {
5524 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5525 	struct dc *dc = adev->dm.dc;
5526 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
5527 	int ret = -EINVAL;
5528 
5529 	dm_update_crtc_active_planes(crtc, state);
5530 
5531 	if (unlikely(!dm_crtc_state->stream &&
5532 		     modeset_required(state, NULL, dm_crtc_state->stream))) {
5533 		WARN_ON(1);
5534 		return ret;
5535 	}
5536 
5537 	/*
5538 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5539 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5540 	 * planes are disabled, which is not supported by the hardware. And there is legacy
5541 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5542 	 */
5543 	if (state->enable &&
5544 	    !(state->plane_mask & drm_plane_mask(crtc->primary)))
5545 		return -EINVAL;
5546 
5547 	/* In some use cases, like reset, no stream is attached */
5548 	if (!dm_crtc_state->stream)
5549 		return 0;
5550 
5551 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
5552 		return 0;
5553 
5554 	return ret;
5555 }
5556 
dm_crtc_helper_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)5557 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
5558 				      const struct drm_display_mode *mode,
5559 				      struct drm_display_mode *adjusted_mode)
5560 {
5561 	return true;
5562 }
5563 
5564 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
5565 	.disable = dm_crtc_helper_disable,
5566 	.atomic_check = dm_crtc_helper_atomic_check,
5567 	.mode_fixup = dm_crtc_helper_mode_fixup,
5568 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
5569 };
5570 
dm_encoder_helper_disable(struct drm_encoder * encoder)5571 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
5572 {
5573 
5574 }
5575 
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)5576 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
5577 {
5578 	switch (display_color_depth) {
5579 		case COLOR_DEPTH_666:
5580 			return 6;
5581 		case COLOR_DEPTH_888:
5582 			return 8;
5583 		case COLOR_DEPTH_101010:
5584 			return 10;
5585 		case COLOR_DEPTH_121212:
5586 			return 12;
5587 		case COLOR_DEPTH_141414:
5588 			return 14;
5589 		case COLOR_DEPTH_161616:
5590 			return 16;
5591 		default:
5592 			break;
5593 		}
5594 	return 0;
5595 }
5596 
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)5597 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
5598 					  struct drm_crtc_state *crtc_state,
5599 					  struct drm_connector_state *conn_state)
5600 {
5601 	struct drm_atomic_state *state = crtc_state->state;
5602 	struct drm_connector *connector = conn_state->connector;
5603 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5604 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
5605 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
5606 	struct drm_dp_mst_topology_mgr *mst_mgr;
5607 	struct drm_dp_mst_port *mst_port;
5608 	enum dc_color_depth color_depth;
5609 	int clock, bpp = 0;
5610 	bool is_y420 = false;
5611 
5612 	if (!aconnector->port || !aconnector->dc_sink)
5613 		return 0;
5614 
5615 	mst_port = aconnector->port;
5616 	mst_mgr = &aconnector->mst_port->mst_mgr;
5617 
5618 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
5619 		return 0;
5620 
5621 	if (!state->duplicated) {
5622 		int max_bpc = conn_state->max_requested_bpc;
5623 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
5624 				aconnector->force_yuv420_output;
5625 		color_depth = convert_color_depth_from_display_info(connector,
5626 								    is_y420,
5627 								    max_bpc);
5628 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
5629 		clock = adjusted_mode->clock;
5630 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
5631 	}
5632 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
5633 									   mst_mgr,
5634 									   mst_port,
5635 									   dm_new_connector_state->pbn,
5636 									   dm_mst_get_pbn_divider(aconnector->dc_link));
5637 	if (dm_new_connector_state->vcpi_slots < 0) {
5638 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
5639 		return dm_new_connector_state->vcpi_slots;
5640 	}
5641 	return 0;
5642 }
5643 
5644 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
5645 	.disable = dm_encoder_helper_disable,
5646 	.atomic_check = dm_encoder_helper_atomic_check
5647 };
5648 
5649 #if defined(CONFIG_DRM_AMD_DC_DCN)
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state)5650 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
5651 					    struct dc_state *dc_state)
5652 {
5653 	struct dc_stream_state *stream = NULL;
5654 	struct drm_connector *connector;
5655 	struct drm_connector_state *new_con_state, *old_con_state;
5656 	struct amdgpu_dm_connector *aconnector;
5657 	struct dm_connector_state *dm_conn_state;
5658 	int i, j, clock, bpp;
5659 	int vcpi, pbn_div, pbn = 0;
5660 
5661 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
5662 
5663 		aconnector = to_amdgpu_dm_connector(connector);
5664 
5665 		if (!aconnector->port)
5666 			continue;
5667 
5668 		if (!new_con_state || !new_con_state->crtc)
5669 			continue;
5670 
5671 		dm_conn_state = to_dm_connector_state(new_con_state);
5672 
5673 		for (j = 0; j < dc_state->stream_count; j++) {
5674 			stream = dc_state->streams[j];
5675 			if (!stream)
5676 				continue;
5677 
5678 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
5679 				break;
5680 
5681 			stream = NULL;
5682 		}
5683 
5684 		if (!stream)
5685 			continue;
5686 
5687 		if (stream->timing.flags.DSC != 1) {
5688 			drm_dp_mst_atomic_enable_dsc(state,
5689 						     aconnector->port,
5690 						     dm_conn_state->pbn,
5691 						     0,
5692 						     false);
5693 			continue;
5694 		}
5695 
5696 		pbn_div = dm_mst_get_pbn_divider(stream->link);
5697 		bpp = stream->timing.dsc_cfg.bits_per_pixel;
5698 		clock = stream->timing.pix_clk_100hz / 10;
5699 		pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
5700 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
5701 						    aconnector->port,
5702 						    pbn, pbn_div,
5703 						    true);
5704 		if (vcpi < 0)
5705 			return vcpi;
5706 
5707 		dm_conn_state->pbn = pbn;
5708 		dm_conn_state->vcpi_slots = vcpi;
5709 	}
5710 	return 0;
5711 }
5712 #endif
5713 
dm_drm_plane_reset(struct drm_plane * plane)5714 static void dm_drm_plane_reset(struct drm_plane *plane)
5715 {
5716 	struct dm_plane_state *amdgpu_state = NULL;
5717 
5718 	if (plane->state)
5719 		plane->funcs->atomic_destroy_state(plane, plane->state);
5720 
5721 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
5722 	WARN_ON(amdgpu_state == NULL);
5723 
5724 	if (amdgpu_state)
5725 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
5726 }
5727 
5728 static struct drm_plane_state *
dm_drm_plane_duplicate_state(struct drm_plane * plane)5729 dm_drm_plane_duplicate_state(struct drm_plane *plane)
5730 {
5731 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
5732 
5733 	old_dm_plane_state = to_dm_plane_state(plane->state);
5734 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
5735 	if (!dm_plane_state)
5736 		return NULL;
5737 
5738 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
5739 
5740 	if (old_dm_plane_state->dc_state) {
5741 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
5742 		dc_plane_state_retain(dm_plane_state->dc_state);
5743 	}
5744 
5745 	/* Framebuffer hasn't been updated yet, so retain old flags. */
5746 	dm_plane_state->tiling_flags = old_dm_plane_state->tiling_flags;
5747 	dm_plane_state->tmz_surface = old_dm_plane_state->tmz_surface;
5748 
5749 	return &dm_plane_state->base;
5750 }
5751 
dm_drm_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)5752 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
5753 				struct drm_plane_state *state)
5754 {
5755 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
5756 
5757 	if (dm_plane_state->dc_state)
5758 		dc_plane_state_release(dm_plane_state->dc_state);
5759 
5760 	drm_atomic_helper_plane_destroy_state(plane, state);
5761 }
5762 
5763 static const struct drm_plane_funcs dm_plane_funcs = {
5764 	.update_plane	= drm_atomic_helper_update_plane,
5765 	.disable_plane	= drm_atomic_helper_disable_plane,
5766 	.destroy	= drm_primary_helper_destroy,
5767 	.reset = dm_drm_plane_reset,
5768 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
5769 	.atomic_destroy_state = dm_drm_plane_destroy_state,
5770 };
5771 
dm_plane_helper_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)5772 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
5773 				      struct drm_plane_state *new_state)
5774 {
5775 	struct amdgpu_framebuffer *afb;
5776 	struct drm_gem_object *obj;
5777 	struct amdgpu_device *adev;
5778 	struct amdgpu_bo *rbo;
5779 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
5780 	struct list_head list;
5781 	struct ttm_validate_buffer tv;
5782 	struct ww_acquire_ctx ticket;
5783 	uint32_t domain;
5784 	int r;
5785 
5786 	if (!new_state->fb) {
5787 		DRM_DEBUG_DRIVER("No FB bound\n");
5788 		return 0;
5789 	}
5790 
5791 	afb = to_amdgpu_framebuffer(new_state->fb);
5792 	obj = new_state->fb->obj[0];
5793 	rbo = gem_to_amdgpu_bo(obj);
5794 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
5795 	INIT_LIST_HEAD(&list);
5796 
5797 	tv.bo = &rbo->tbo;
5798 	tv.num_shared = 1;
5799 	list_add(&tv.head, &list);
5800 
5801 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
5802 	if (r) {
5803 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
5804 		return r;
5805 	}
5806 
5807 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5808 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
5809 	else
5810 		domain = AMDGPU_GEM_DOMAIN_VRAM;
5811 
5812 	r = amdgpu_bo_pin(rbo, domain);
5813 	if (unlikely(r != 0)) {
5814 		if (r != -ERESTARTSYS)
5815 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
5816 		ttm_eu_backoff_reservation(&ticket, &list);
5817 		return r;
5818 	}
5819 
5820 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
5821 	if (unlikely(r != 0)) {
5822 		amdgpu_bo_unpin(rbo);
5823 		ttm_eu_backoff_reservation(&ticket, &list);
5824 		DRM_ERROR("%p bind failed\n", rbo);
5825 		return r;
5826 	}
5827 
5828 	ttm_eu_backoff_reservation(&ticket, &list);
5829 
5830 	afb->address = amdgpu_bo_gpu_offset(rbo);
5831 
5832 	amdgpu_bo_ref(rbo);
5833 
5834 	/**
5835 	 * We don't do surface updates on planes that have been newly created,
5836 	 * but we also don't have the afb->address during atomic check.
5837 	 *
5838 	 * Fill in buffer attributes depending on the address here, but only on
5839 	 * newly created planes since they're not being used by DC yet and this
5840 	 * won't modify global state.
5841 	 */
5842 	dm_plane_state_old = to_dm_plane_state(plane->state);
5843 	dm_plane_state_new = to_dm_plane_state(new_state);
5844 
5845 	if (dm_plane_state_new->dc_state &&
5846 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
5847 		struct dc_plane_state *plane_state =
5848 			dm_plane_state_new->dc_state;
5849 		bool force_disable_dcc = !plane_state->dcc.enable;
5850 
5851 		fill_plane_buffer_attributes(
5852 			adev, afb, plane_state->format, plane_state->rotation,
5853 			dm_plane_state_new->tiling_flags,
5854 			&plane_state->tiling_info, &plane_state->plane_size,
5855 			&plane_state->dcc, &plane_state->address,
5856 			dm_plane_state_new->tmz_surface, force_disable_dcc);
5857 	}
5858 
5859 	return 0;
5860 }
5861 
dm_plane_helper_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)5862 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
5863 				       struct drm_plane_state *old_state)
5864 {
5865 	struct amdgpu_bo *rbo;
5866 	int r;
5867 
5868 	if (!old_state->fb)
5869 		return;
5870 
5871 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
5872 	r = amdgpu_bo_reserve(rbo, false);
5873 	if (unlikely(r)) {
5874 		DRM_ERROR("failed to reserve rbo before unpin\n");
5875 		return;
5876 	}
5877 
5878 	amdgpu_bo_unpin(rbo);
5879 	amdgpu_bo_unreserve(rbo);
5880 	amdgpu_bo_unref(&rbo);
5881 }
5882 
dm_plane_helper_check_state(struct drm_plane_state * state,struct drm_crtc_state * new_crtc_state)5883 static int dm_plane_helper_check_state(struct drm_plane_state *state,
5884 				       struct drm_crtc_state *new_crtc_state)
5885 {
5886 	int max_downscale = 0;
5887 	int max_upscale = INT_MAX;
5888 
5889 	/* TODO: These should be checked against DC plane caps */
5890 	return drm_atomic_helper_check_plane_state(
5891 		state, new_crtc_state, max_downscale, max_upscale, true, true);
5892 }
5893 
dm_plane_atomic_check(struct drm_plane * plane,struct drm_plane_state * state)5894 static int dm_plane_atomic_check(struct drm_plane *plane,
5895 				 struct drm_plane_state *state)
5896 {
5897 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
5898 	struct dc *dc = adev->dm.dc;
5899 	struct dm_plane_state *dm_plane_state;
5900 	struct dc_scaling_info scaling_info;
5901 	struct drm_crtc_state *new_crtc_state;
5902 	int ret;
5903 
5904 	dm_plane_state = to_dm_plane_state(state);
5905 
5906 	if (!dm_plane_state->dc_state)
5907 		return 0;
5908 
5909 	new_crtc_state =
5910 		drm_atomic_get_new_crtc_state(state->state, state->crtc);
5911 	if (!new_crtc_state)
5912 		return -EINVAL;
5913 
5914 	ret = dm_plane_helper_check_state(state, new_crtc_state);
5915 	if (ret)
5916 		return ret;
5917 
5918 	ret = fill_dc_scaling_info(state, &scaling_info);
5919 	if (ret)
5920 		return ret;
5921 
5922 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
5923 		return 0;
5924 
5925 	return -EINVAL;
5926 }
5927 
dm_plane_atomic_async_check(struct drm_plane * plane,struct drm_plane_state * new_plane_state)5928 static int dm_plane_atomic_async_check(struct drm_plane *plane,
5929 				       struct drm_plane_state *new_plane_state)
5930 {
5931 	/* Only support async updates on cursor planes. */
5932 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
5933 		return -EINVAL;
5934 
5935 	return 0;
5936 }
5937 
dm_plane_atomic_async_update(struct drm_plane * plane,struct drm_plane_state * new_state)5938 static void dm_plane_atomic_async_update(struct drm_plane *plane,
5939 					 struct drm_plane_state *new_state)
5940 {
5941 	struct drm_plane_state *old_state =
5942 		drm_atomic_get_old_plane_state(new_state->state, plane);
5943 
5944 	swap(plane->state->fb, new_state->fb);
5945 
5946 	plane->state->src_x = new_state->src_x;
5947 	plane->state->src_y = new_state->src_y;
5948 	plane->state->src_w = new_state->src_w;
5949 	plane->state->src_h = new_state->src_h;
5950 	plane->state->crtc_x = new_state->crtc_x;
5951 	plane->state->crtc_y = new_state->crtc_y;
5952 	plane->state->crtc_w = new_state->crtc_w;
5953 	plane->state->crtc_h = new_state->crtc_h;
5954 
5955 	handle_cursor_update(plane, old_state);
5956 }
5957 
5958 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
5959 	.prepare_fb = dm_plane_helper_prepare_fb,
5960 	.cleanup_fb = dm_plane_helper_cleanup_fb,
5961 	.atomic_check = dm_plane_atomic_check,
5962 	.atomic_async_check = dm_plane_atomic_async_check,
5963 	.atomic_async_update = dm_plane_atomic_async_update
5964 };
5965 
5966 /*
5967  * TODO: these are currently initialized to rgb formats only.
5968  * For future use cases we should either initialize them dynamically based on
5969  * plane capabilities, or initialize this array to all formats, so internal drm
5970  * check will succeed, and let DC implement proper check
5971  */
5972 static const uint32_t rgb_formats[] = {
5973 	DRM_FORMAT_XRGB8888,
5974 	DRM_FORMAT_ARGB8888,
5975 	DRM_FORMAT_RGBA8888,
5976 	DRM_FORMAT_XRGB2101010,
5977 	DRM_FORMAT_XBGR2101010,
5978 	DRM_FORMAT_ARGB2101010,
5979 	DRM_FORMAT_ABGR2101010,
5980 	DRM_FORMAT_XBGR8888,
5981 	DRM_FORMAT_ABGR8888,
5982 	DRM_FORMAT_RGB565,
5983 };
5984 
5985 static const uint32_t overlay_formats[] = {
5986 	DRM_FORMAT_XRGB8888,
5987 	DRM_FORMAT_ARGB8888,
5988 	DRM_FORMAT_RGBA8888,
5989 	DRM_FORMAT_XBGR8888,
5990 	DRM_FORMAT_ABGR8888,
5991 	DRM_FORMAT_RGB565
5992 };
5993 
5994 static const u32 cursor_formats[] = {
5995 	DRM_FORMAT_ARGB8888
5996 };
5997 
get_plane_formats(const struct drm_plane * plane,const struct dc_plane_cap * plane_cap,uint32_t * formats,int max_formats)5998 static int get_plane_formats(const struct drm_plane *plane,
5999 			     const struct dc_plane_cap *plane_cap,
6000 			     uint32_t *formats, int max_formats)
6001 {
6002 	int i, num_formats = 0;
6003 
6004 	/*
6005 	 * TODO: Query support for each group of formats directly from
6006 	 * DC plane caps. This will require adding more formats to the
6007 	 * caps list.
6008 	 */
6009 
6010 	switch (plane->type) {
6011 	case DRM_PLANE_TYPE_PRIMARY:
6012 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6013 			if (num_formats >= max_formats)
6014 				break;
6015 
6016 			formats[num_formats++] = rgb_formats[i];
6017 		}
6018 
6019 		if (plane_cap && plane_cap->pixel_format_support.nv12)
6020 			formats[num_formats++] = DRM_FORMAT_NV12;
6021 		if (plane_cap && plane_cap->pixel_format_support.p010)
6022 			formats[num_formats++] = DRM_FORMAT_P010;
6023 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
6024 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6025 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6026 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6027 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6028 		}
6029 		break;
6030 
6031 	case DRM_PLANE_TYPE_OVERLAY:
6032 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6033 			if (num_formats >= max_formats)
6034 				break;
6035 
6036 			formats[num_formats++] = overlay_formats[i];
6037 		}
6038 		break;
6039 
6040 	case DRM_PLANE_TYPE_CURSOR:
6041 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6042 			if (num_formats >= max_formats)
6043 				break;
6044 
6045 			formats[num_formats++] = cursor_formats[i];
6046 		}
6047 		break;
6048 	}
6049 
6050 	return num_formats;
6051 }
6052 
amdgpu_dm_plane_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,unsigned long possible_crtcs,const struct dc_plane_cap * plane_cap)6053 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6054 				struct drm_plane *plane,
6055 				unsigned long possible_crtcs,
6056 				const struct dc_plane_cap *plane_cap)
6057 {
6058 	uint32_t formats[32];
6059 	int num_formats;
6060 	int res = -EPERM;
6061 	unsigned int supported_rotations;
6062 
6063 	num_formats = get_plane_formats(plane, plane_cap, formats,
6064 					ARRAY_SIZE(formats));
6065 
6066 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6067 				       &dm_plane_funcs, formats, num_formats,
6068 				       NULL, plane->type, NULL);
6069 	if (res)
6070 		return res;
6071 
6072 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6073 	    plane_cap && plane_cap->per_pixel_alpha) {
6074 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6075 					  BIT(DRM_MODE_BLEND_PREMULTI);
6076 
6077 		drm_plane_create_alpha_property(plane);
6078 		drm_plane_create_blend_mode_property(plane, blend_caps);
6079 	}
6080 
6081 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6082 	    plane_cap &&
6083 	    (plane_cap->pixel_format_support.nv12 ||
6084 	     plane_cap->pixel_format_support.p010)) {
6085 		/* This only affects YUV formats. */
6086 		drm_plane_create_color_properties(
6087 			plane,
6088 			BIT(DRM_COLOR_YCBCR_BT601) |
6089 			BIT(DRM_COLOR_YCBCR_BT709) |
6090 			BIT(DRM_COLOR_YCBCR_BT2020),
6091 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6092 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6093 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6094 	}
6095 
6096 	supported_rotations =
6097 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6098 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6099 
6100 	if (dm->adev->asic_type >= CHIP_BONAIRE)
6101 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6102 						   supported_rotations);
6103 
6104 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6105 
6106 	/* Create (reset) the plane state */
6107 	if (plane->funcs->reset)
6108 		plane->funcs->reset(plane);
6109 
6110 	return 0;
6111 }
6112 
amdgpu_dm_crtc_init(struct amdgpu_display_manager * dm,struct drm_plane * plane,uint32_t crtc_index)6113 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6114 			       struct drm_plane *plane,
6115 			       uint32_t crtc_index)
6116 {
6117 	struct amdgpu_crtc *acrtc = NULL;
6118 	struct drm_plane *cursor_plane;
6119 
6120 	int res = -ENOMEM;
6121 
6122 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6123 	if (!cursor_plane)
6124 		goto fail;
6125 
6126 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6127 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6128 
6129 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6130 	if (!acrtc)
6131 		goto fail;
6132 
6133 	res = drm_crtc_init_with_planes(
6134 			dm->ddev,
6135 			&acrtc->base,
6136 			plane,
6137 			cursor_plane,
6138 			&amdgpu_dm_crtc_funcs, NULL);
6139 
6140 	if (res)
6141 		goto fail;
6142 
6143 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6144 
6145 	/* Create (reset) the plane state */
6146 	if (acrtc->base.funcs->reset)
6147 		acrtc->base.funcs->reset(&acrtc->base);
6148 
6149 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6150 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6151 
6152 	acrtc->crtc_id = crtc_index;
6153 	acrtc->base.enabled = false;
6154 	acrtc->otg_inst = -1;
6155 
6156 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6157 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6158 				   true, MAX_COLOR_LUT_ENTRIES);
6159 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6160 
6161 	return 0;
6162 
6163 fail:
6164 	kfree(acrtc);
6165 	kfree(cursor_plane);
6166 	return res;
6167 }
6168 
6169 
to_drm_connector_type(enum signal_type st)6170 static int to_drm_connector_type(enum signal_type st)
6171 {
6172 	switch (st) {
6173 	case SIGNAL_TYPE_HDMI_TYPE_A:
6174 		return DRM_MODE_CONNECTOR_HDMIA;
6175 	case SIGNAL_TYPE_EDP:
6176 		return DRM_MODE_CONNECTOR_eDP;
6177 	case SIGNAL_TYPE_LVDS:
6178 		return DRM_MODE_CONNECTOR_LVDS;
6179 	case SIGNAL_TYPE_RGB:
6180 		return DRM_MODE_CONNECTOR_VGA;
6181 	case SIGNAL_TYPE_DISPLAY_PORT:
6182 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6183 		return DRM_MODE_CONNECTOR_DisplayPort;
6184 	case SIGNAL_TYPE_DVI_DUAL_LINK:
6185 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6186 		return DRM_MODE_CONNECTOR_DVID;
6187 	case SIGNAL_TYPE_VIRTUAL:
6188 		return DRM_MODE_CONNECTOR_VIRTUAL;
6189 
6190 	default:
6191 		return DRM_MODE_CONNECTOR_Unknown;
6192 	}
6193 }
6194 
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6195 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6196 {
6197 	struct drm_encoder *encoder;
6198 
6199 	/* There is only one encoder per connector */
6200 	drm_connector_for_each_possible_encoder(connector, encoder)
6201 		return encoder;
6202 
6203 	return NULL;
6204 }
6205 
amdgpu_dm_get_native_mode(struct drm_connector * connector)6206 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6207 {
6208 	struct drm_encoder *encoder;
6209 	struct amdgpu_encoder *amdgpu_encoder;
6210 
6211 	encoder = amdgpu_dm_connector_to_encoder(connector);
6212 
6213 	if (encoder == NULL)
6214 		return;
6215 
6216 	amdgpu_encoder = to_amdgpu_encoder(encoder);
6217 
6218 	amdgpu_encoder->native_mode.clock = 0;
6219 
6220 	if (!list_empty(&connector->probed_modes)) {
6221 		struct drm_display_mode *preferred_mode = NULL;
6222 
6223 		list_for_each_entry(preferred_mode,
6224 				    &connector->probed_modes,
6225 				    head) {
6226 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6227 				amdgpu_encoder->native_mode = *preferred_mode;
6228 
6229 			break;
6230 		}
6231 
6232 	}
6233 }
6234 
6235 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)6236 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6237 			     char *name,
6238 			     int hdisplay, int vdisplay)
6239 {
6240 	struct drm_device *dev = encoder->dev;
6241 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6242 	struct drm_display_mode *mode = NULL;
6243 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6244 
6245 	mode = drm_mode_duplicate(dev, native_mode);
6246 
6247 	if (mode == NULL)
6248 		return NULL;
6249 
6250 	mode->hdisplay = hdisplay;
6251 	mode->vdisplay = vdisplay;
6252 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6253 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6254 
6255 	return mode;
6256 
6257 }
6258 
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)6259 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6260 						 struct drm_connector *connector)
6261 {
6262 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6263 	struct drm_display_mode *mode = NULL;
6264 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6265 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6266 				to_amdgpu_dm_connector(connector);
6267 	int i;
6268 	int n;
6269 	struct mode_size {
6270 		char name[DRM_DISPLAY_MODE_LEN];
6271 		int w;
6272 		int h;
6273 	} common_modes[] = {
6274 		{  "640x480",  640,  480},
6275 		{  "800x600",  800,  600},
6276 		{ "1024x768", 1024,  768},
6277 		{ "1280x720", 1280,  720},
6278 		{ "1280x800", 1280,  800},
6279 		{"1280x1024", 1280, 1024},
6280 		{ "1440x900", 1440,  900},
6281 		{"1680x1050", 1680, 1050},
6282 		{"1600x1200", 1600, 1200},
6283 		{"1920x1080", 1920, 1080},
6284 		{"1920x1200", 1920, 1200}
6285 	};
6286 
6287 	n = ARRAY_SIZE(common_modes);
6288 
6289 	for (i = 0; i < n; i++) {
6290 		struct drm_display_mode *curmode = NULL;
6291 		bool mode_existed = false;
6292 
6293 		if (common_modes[i].w > native_mode->hdisplay ||
6294 		    common_modes[i].h > native_mode->vdisplay ||
6295 		   (common_modes[i].w == native_mode->hdisplay &&
6296 		    common_modes[i].h == native_mode->vdisplay))
6297 			continue;
6298 
6299 		list_for_each_entry(curmode, &connector->probed_modes, head) {
6300 			if (common_modes[i].w == curmode->hdisplay &&
6301 			    common_modes[i].h == curmode->vdisplay) {
6302 				mode_existed = true;
6303 				break;
6304 			}
6305 		}
6306 
6307 		if (mode_existed)
6308 			continue;
6309 
6310 		mode = amdgpu_dm_create_common_mode(encoder,
6311 				common_modes[i].name, common_modes[i].w,
6312 				common_modes[i].h);
6313 		drm_mode_probed_add(connector, mode);
6314 		amdgpu_dm_connector->num_modes++;
6315 	}
6316 }
6317 
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)6318 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6319 					      struct edid *edid)
6320 {
6321 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6322 			to_amdgpu_dm_connector(connector);
6323 
6324 	if (edid) {
6325 		/* empty probed_modes */
6326 		INIT_LIST_HEAD(&connector->probed_modes);
6327 		amdgpu_dm_connector->num_modes =
6328 				drm_add_edid_modes(connector, edid);
6329 
6330 		/* sorting the probed modes before calling function
6331 		 * amdgpu_dm_get_native_mode() since EDID can have
6332 		 * more than one preferred mode. The modes that are
6333 		 * later in the probed mode list could be of higher
6334 		 * and preferred resolution. For example, 3840x2160
6335 		 * resolution in base EDID preferred timing and 4096x2160
6336 		 * preferred resolution in DID extension block later.
6337 		 */
6338 		drm_mode_sort(&connector->probed_modes);
6339 		amdgpu_dm_get_native_mode(connector);
6340 	} else {
6341 		amdgpu_dm_connector->num_modes = 0;
6342 	}
6343 }
6344 
amdgpu_dm_connector_get_modes(struct drm_connector * connector)6345 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6346 {
6347 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6348 			to_amdgpu_dm_connector(connector);
6349 	struct drm_encoder *encoder;
6350 	struct edid *edid = amdgpu_dm_connector->edid;
6351 
6352 	encoder = amdgpu_dm_connector_to_encoder(connector);
6353 
6354 	if (!edid || !drm_edid_is_valid(edid)) {
6355 		amdgpu_dm_connector->num_modes =
6356 				drm_add_modes_noedid(connector, 640, 480);
6357 	} else {
6358 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
6359 		amdgpu_dm_connector_add_common_modes(encoder, connector);
6360 	}
6361 	amdgpu_dm_fbc_init(connector);
6362 
6363 	return amdgpu_dm_connector->num_modes;
6364 }
6365 
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)6366 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6367 				     struct amdgpu_dm_connector *aconnector,
6368 				     int connector_type,
6369 				     struct dc_link *link,
6370 				     int link_index)
6371 {
6372 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6373 
6374 	/*
6375 	 * Some of the properties below require access to state, like bpc.
6376 	 * Allocate some default initial connector state with our reset helper.
6377 	 */
6378 	if (aconnector->base.funcs->reset)
6379 		aconnector->base.funcs->reset(&aconnector->base);
6380 
6381 	aconnector->connector_id = link_index;
6382 	aconnector->dc_link = link;
6383 	aconnector->base.interlace_allowed = false;
6384 	aconnector->base.doublescan_allowed = false;
6385 	aconnector->base.stereo_allowed = false;
6386 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6387 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6388 	aconnector->audio_inst = -1;
6389 	mutex_init(&aconnector->hpd_lock);
6390 
6391 	/*
6392 	 * configure support HPD hot plug connector_>polled default value is 0
6393 	 * which means HPD hot plug not supported
6394 	 */
6395 	switch (connector_type) {
6396 	case DRM_MODE_CONNECTOR_HDMIA:
6397 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6398 		aconnector->base.ycbcr_420_allowed =
6399 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6400 		break;
6401 	case DRM_MODE_CONNECTOR_DisplayPort:
6402 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6403 		aconnector->base.ycbcr_420_allowed =
6404 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
6405 		break;
6406 	case DRM_MODE_CONNECTOR_DVID:
6407 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6408 		break;
6409 	default:
6410 		break;
6411 	}
6412 
6413 	drm_object_attach_property(&aconnector->base.base,
6414 				dm->ddev->mode_config.scaling_mode_property,
6415 				DRM_MODE_SCALE_NONE);
6416 
6417 	drm_object_attach_property(&aconnector->base.base,
6418 				adev->mode_info.underscan_property,
6419 				UNDERSCAN_OFF);
6420 	drm_object_attach_property(&aconnector->base.base,
6421 				adev->mode_info.underscan_hborder_property,
6422 				0);
6423 	drm_object_attach_property(&aconnector->base.base,
6424 				adev->mode_info.underscan_vborder_property,
6425 				0);
6426 
6427 	if (!aconnector->mst_port)
6428 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6429 
6430 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
6431 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6432 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6433 
6434 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6435 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6436 		drm_object_attach_property(&aconnector->base.base,
6437 				adev->mode_info.abm_level_property, 0);
6438 	}
6439 
6440 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6441 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6442 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
6443 		drm_object_attach_property(
6444 			&aconnector->base.base,
6445 			dm->ddev->mode_config.hdr_output_metadata_property, 0);
6446 
6447 		if (!aconnector->mst_port)
6448 			drm_connector_attach_vrr_capable_property(&aconnector->base);
6449 
6450 #ifdef CONFIG_DRM_AMD_DC_HDCP
6451 		if (adev->dm.hdcp_workqueue)
6452 			drm_connector_attach_content_protection_property(&aconnector->base, true);
6453 #endif
6454 	}
6455 }
6456 
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)6457 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6458 			      struct i2c_msg *msgs, int num)
6459 {
6460 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6461 	struct ddc_service *ddc_service = i2c->ddc_service;
6462 	struct i2c_command cmd;
6463 	int i;
6464 	int result = -EIO;
6465 
6466 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6467 
6468 	if (!cmd.payloads)
6469 		return result;
6470 
6471 	cmd.number_of_payloads = num;
6472 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6473 	cmd.speed = 100;
6474 
6475 	for (i = 0; i < num; i++) {
6476 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6477 		cmd.payloads[i].address = msgs[i].addr;
6478 		cmd.payloads[i].length = msgs[i].len;
6479 		cmd.payloads[i].data = msgs[i].buf;
6480 	}
6481 
6482 	if (dc_submit_i2c(
6483 			ddc_service->ctx->dc,
6484 			ddc_service->ddc_pin->hw_info.ddc_channel,
6485 			&cmd))
6486 		result = num;
6487 
6488 	kfree(cmd.payloads);
6489 	return result;
6490 }
6491 
amdgpu_dm_i2c_func(struct i2c_adapter * adap)6492 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6493 {
6494 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6495 }
6496 
6497 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6498 	.master_xfer = amdgpu_dm_i2c_xfer,
6499 	.functionality = amdgpu_dm_i2c_func,
6500 };
6501 
6502 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)6503 create_i2c(struct ddc_service *ddc_service,
6504 	   int link_index,
6505 	   int *res)
6506 {
6507 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6508 	struct amdgpu_i2c_adapter *i2c;
6509 
6510 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6511 	if (!i2c)
6512 		return NULL;
6513 	i2c->base.owner = THIS_MODULE;
6514 	i2c->base.class = I2C_CLASS_DDC;
6515 	i2c->base.dev.parent = &adev->pdev->dev;
6516 	i2c->base.algo = &amdgpu_dm_i2c_algo;
6517 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6518 	i2c_set_adapdata(&i2c->base, i2c);
6519 	i2c->ddc_service = ddc_service;
6520 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6521 
6522 	return i2c;
6523 }
6524 
6525 
6526 /*
6527  * Note: this function assumes that dc_link_detect() was called for the
6528  * dc_link which will be represented by this aconnector.
6529  */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,uint32_t link_index,struct amdgpu_encoder * aencoder)6530 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6531 				    struct amdgpu_dm_connector *aconnector,
6532 				    uint32_t link_index,
6533 				    struct amdgpu_encoder *aencoder)
6534 {
6535 	int res = 0;
6536 	int connector_type;
6537 	struct dc *dc = dm->dc;
6538 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
6539 	struct amdgpu_i2c_adapter *i2c;
6540 
6541 	link->priv = aconnector;
6542 
6543 	DRM_DEBUG_DRIVER("%s()\n", __func__);
6544 
6545 	i2c = create_i2c(link->ddc, link->link_index, &res);
6546 	if (!i2c) {
6547 		DRM_ERROR("Failed to create i2c adapter data\n");
6548 		return -ENOMEM;
6549 	}
6550 
6551 	aconnector->i2c = i2c;
6552 	res = i2c_add_adapter(&i2c->base);
6553 
6554 	if (res) {
6555 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
6556 		goto out_free;
6557 	}
6558 
6559 	connector_type = to_drm_connector_type(link->connector_signal);
6560 
6561 	res = drm_connector_init_with_ddc(
6562 			dm->ddev,
6563 			&aconnector->base,
6564 			&amdgpu_dm_connector_funcs,
6565 			connector_type,
6566 			&i2c->base);
6567 
6568 	if (res) {
6569 		DRM_ERROR("connector_init failed\n");
6570 		aconnector->connector_id = -1;
6571 		goto out_free;
6572 	}
6573 
6574 	drm_connector_helper_add(
6575 			&aconnector->base,
6576 			&amdgpu_dm_connector_helper_funcs);
6577 
6578 	amdgpu_dm_connector_init_helper(
6579 		dm,
6580 		aconnector,
6581 		connector_type,
6582 		link,
6583 		link_index);
6584 
6585 	drm_connector_attach_encoder(
6586 		&aconnector->base, &aencoder->base);
6587 
6588 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
6589 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
6590 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
6591 
6592 out_free:
6593 	if (res) {
6594 		kfree(i2c);
6595 		aconnector->i2c = NULL;
6596 	}
6597 	return res;
6598 }
6599 
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)6600 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
6601 {
6602 	switch (adev->mode_info.num_crtc) {
6603 	case 1:
6604 		return 0x1;
6605 	case 2:
6606 		return 0x3;
6607 	case 3:
6608 		return 0x7;
6609 	case 4:
6610 		return 0xf;
6611 	case 5:
6612 		return 0x1f;
6613 	case 6:
6614 	default:
6615 		return 0x3f;
6616 	}
6617 }
6618 
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)6619 static int amdgpu_dm_encoder_init(struct drm_device *dev,
6620 				  struct amdgpu_encoder *aencoder,
6621 				  uint32_t link_index)
6622 {
6623 	struct amdgpu_device *adev = drm_to_adev(dev);
6624 
6625 	int res = drm_encoder_init(dev,
6626 				   &aencoder->base,
6627 				   &amdgpu_dm_encoder_funcs,
6628 				   DRM_MODE_ENCODER_TMDS,
6629 				   NULL);
6630 
6631 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
6632 
6633 	if (!res)
6634 		aencoder->encoder_id = link_index;
6635 	else
6636 		aencoder->encoder_id = -1;
6637 
6638 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
6639 
6640 	return res;
6641 }
6642 
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)6643 static void manage_dm_interrupts(struct amdgpu_device *adev,
6644 				 struct amdgpu_crtc *acrtc,
6645 				 bool enable)
6646 {
6647 	/*
6648 	 * We have no guarantee that the frontend index maps to the same
6649 	 * backend index - some even map to more than one.
6650 	 *
6651 	 * TODO: Use a different interrupt or check DC itself for the mapping.
6652 	 */
6653 	int irq_type =
6654 		amdgpu_display_crtc_idx_to_irq_type(
6655 			adev,
6656 			acrtc->crtc_id);
6657 
6658 	if (enable) {
6659 		drm_crtc_vblank_on(&acrtc->base);
6660 		amdgpu_irq_get(
6661 			adev,
6662 			&adev->pageflip_irq,
6663 			irq_type);
6664 	} else {
6665 
6666 		amdgpu_irq_put(
6667 			adev,
6668 			&adev->pageflip_irq,
6669 			irq_type);
6670 		drm_crtc_vblank_off(&acrtc->base);
6671 	}
6672 }
6673 
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)6674 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
6675 				      struct amdgpu_crtc *acrtc)
6676 {
6677 	int irq_type =
6678 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
6679 
6680 	/**
6681 	 * This reads the current state for the IRQ and force reapplies
6682 	 * the setting to hardware.
6683 	 */
6684 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
6685 }
6686 
6687 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)6688 is_scaling_state_different(const struct dm_connector_state *dm_state,
6689 			   const struct dm_connector_state *old_dm_state)
6690 {
6691 	if (dm_state->scaling != old_dm_state->scaling)
6692 		return true;
6693 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
6694 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
6695 			return true;
6696 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
6697 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
6698 			return true;
6699 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
6700 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
6701 		return true;
6702 	return false;
6703 }
6704 
6705 #ifdef CONFIG_DRM_AMD_DC_HDCP
is_content_protection_different(struct drm_connector_state * state,const struct drm_connector_state * old_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)6706 static bool is_content_protection_different(struct drm_connector_state *state,
6707 					    const struct drm_connector_state *old_state,
6708 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
6709 {
6710 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6711 
6712 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
6713 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
6714 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6715 		return true;
6716 	}
6717 
6718 	/* CP is being re enabled, ignore this */
6719 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
6720 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
6721 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
6722 		return false;
6723 	}
6724 
6725 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */
6726 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
6727 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
6728 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
6729 
6730 	/* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
6731 	 * hot-plug, headless s3, dpms
6732 	 */
6733 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON &&
6734 	    aconnector->dc_sink != NULL)
6735 		return true;
6736 
6737 	if (old_state->content_protection == state->content_protection)
6738 		return false;
6739 
6740 	if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
6741 		return true;
6742 
6743 	return false;
6744 }
6745 
6746 #endif
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)6747 static void remove_stream(struct amdgpu_device *adev,
6748 			  struct amdgpu_crtc *acrtc,
6749 			  struct dc_stream_state *stream)
6750 {
6751 	/* this is the update mode case */
6752 
6753 	acrtc->otg_inst = -1;
6754 	acrtc->enabled = false;
6755 }
6756 
get_cursor_position(struct drm_plane * plane,struct drm_crtc * crtc,struct dc_cursor_position * position)6757 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
6758 			       struct dc_cursor_position *position)
6759 {
6760 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6761 	int x, y;
6762 	int xorigin = 0, yorigin = 0;
6763 
6764 	position->enable = false;
6765 	position->x = 0;
6766 	position->y = 0;
6767 
6768 	if (!crtc || !plane->state->fb)
6769 		return 0;
6770 
6771 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
6772 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
6773 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
6774 			  __func__,
6775 			  plane->state->crtc_w,
6776 			  plane->state->crtc_h);
6777 		return -EINVAL;
6778 	}
6779 
6780 	x = plane->state->crtc_x;
6781 	y = plane->state->crtc_y;
6782 
6783 	if (x <= -amdgpu_crtc->max_cursor_width ||
6784 	    y <= -amdgpu_crtc->max_cursor_height)
6785 		return 0;
6786 
6787 	if (x < 0) {
6788 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
6789 		x = 0;
6790 	}
6791 	if (y < 0) {
6792 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
6793 		y = 0;
6794 	}
6795 	position->enable = true;
6796 	position->translate_by_source = true;
6797 	position->x = x;
6798 	position->y = y;
6799 	position->x_hotspot = xorigin;
6800 	position->y_hotspot = yorigin;
6801 
6802 	return 0;
6803 }
6804 
handle_cursor_update(struct drm_plane * plane,struct drm_plane_state * old_plane_state)6805 static void handle_cursor_update(struct drm_plane *plane,
6806 				 struct drm_plane_state *old_plane_state)
6807 {
6808 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
6809 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
6810 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
6811 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
6812 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
6813 	uint64_t address = afb ? afb->address : 0;
6814 	struct dc_cursor_position position;
6815 	struct dc_cursor_attributes attributes;
6816 	int ret;
6817 
6818 	if (!plane->state->fb && !old_plane_state->fb)
6819 		return;
6820 
6821 	DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
6822 			 __func__,
6823 			 amdgpu_crtc->crtc_id,
6824 			 plane->state->crtc_w,
6825 			 plane->state->crtc_h);
6826 
6827 	ret = get_cursor_position(plane, crtc, &position);
6828 	if (ret)
6829 		return;
6830 
6831 	if (!position.enable) {
6832 		/* turn off cursor */
6833 		if (crtc_state && crtc_state->stream) {
6834 			mutex_lock(&adev->dm.dc_lock);
6835 			dc_stream_set_cursor_position(crtc_state->stream,
6836 						      &position);
6837 			mutex_unlock(&adev->dm.dc_lock);
6838 		}
6839 		return;
6840 	}
6841 
6842 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
6843 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
6844 
6845 	memset(&attributes, 0, sizeof(attributes));
6846 	attributes.address.high_part = upper_32_bits(address);
6847 	attributes.address.low_part  = lower_32_bits(address);
6848 	attributes.width             = plane->state->crtc_w;
6849 	attributes.height            = plane->state->crtc_h;
6850 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
6851 	attributes.rotation_angle    = 0;
6852 	attributes.attribute_flags.value = 0;
6853 
6854 	attributes.pitch = attributes.width;
6855 
6856 	if (crtc_state->stream) {
6857 		mutex_lock(&adev->dm.dc_lock);
6858 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
6859 							 &attributes))
6860 			DRM_ERROR("DC failed to set cursor attributes\n");
6861 
6862 		if (!dc_stream_set_cursor_position(crtc_state->stream,
6863 						   &position))
6864 			DRM_ERROR("DC failed to set cursor position\n");
6865 		mutex_unlock(&adev->dm.dc_lock);
6866 	}
6867 }
6868 
prepare_flip_isr(struct amdgpu_crtc * acrtc)6869 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
6870 {
6871 
6872 	assert_spin_locked(&acrtc->base.dev->event_lock);
6873 	WARN_ON(acrtc->event);
6874 
6875 	acrtc->event = acrtc->base.state->event;
6876 
6877 	/* Set the flip status */
6878 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
6879 
6880 	/* Mark this event as consumed */
6881 	acrtc->base.state->event = NULL;
6882 
6883 	DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
6884 						 acrtc->crtc_id);
6885 }
6886 
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)6887 static void update_freesync_state_on_stream(
6888 	struct amdgpu_display_manager *dm,
6889 	struct dm_crtc_state *new_crtc_state,
6890 	struct dc_stream_state *new_stream,
6891 	struct dc_plane_state *surface,
6892 	u32 flip_timestamp_in_us)
6893 {
6894 	struct mod_vrr_params vrr_params;
6895 	struct dc_info_packet vrr_infopacket = {0};
6896 	struct amdgpu_device *adev = dm->adev;
6897 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6898 	unsigned long flags;
6899 
6900 	if (!new_stream)
6901 		return;
6902 
6903 	/*
6904 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6905 	 * For now it's sufficient to just guard against these conditions.
6906 	 */
6907 
6908 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6909 		return;
6910 
6911 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6912         vrr_params = acrtc->dm_irq_params.vrr_params;
6913 
6914 	if (surface) {
6915 		mod_freesync_handle_preflip(
6916 			dm->freesync_module,
6917 			surface,
6918 			new_stream,
6919 			flip_timestamp_in_us,
6920 			&vrr_params);
6921 
6922 		if (adev->family < AMDGPU_FAMILY_AI &&
6923 		    amdgpu_dm_vrr_active(new_crtc_state)) {
6924 			mod_freesync_handle_v_update(dm->freesync_module,
6925 						     new_stream, &vrr_params);
6926 
6927 			/* Need to call this before the frame ends. */
6928 			dc_stream_adjust_vmin_vmax(dm->dc,
6929 						   new_crtc_state->stream,
6930 						   &vrr_params.adjust);
6931 		}
6932 	}
6933 
6934 	mod_freesync_build_vrr_infopacket(
6935 		dm->freesync_module,
6936 		new_stream,
6937 		&vrr_params,
6938 		PACKET_TYPE_VRR,
6939 		TRANSFER_FUNC_UNKNOWN,
6940 		&vrr_infopacket);
6941 
6942 	new_crtc_state->freesync_timing_changed |=
6943 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
6944 			&vrr_params.adjust,
6945 			sizeof(vrr_params.adjust)) != 0);
6946 
6947 	new_crtc_state->freesync_vrr_info_changed |=
6948 		(memcmp(&new_crtc_state->vrr_infopacket,
6949 			&vrr_infopacket,
6950 			sizeof(vrr_infopacket)) != 0);
6951 
6952 	acrtc->dm_irq_params.vrr_params = vrr_params;
6953 	new_crtc_state->vrr_infopacket = vrr_infopacket;
6954 
6955 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
6956 	new_stream->vrr_infopacket = vrr_infopacket;
6957 
6958 	if (new_crtc_state->freesync_vrr_info_changed)
6959 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
6960 			      new_crtc_state->base.crtc->base.id,
6961 			      (int)new_crtc_state->base.vrr_enabled,
6962 			      (int)vrr_params.state);
6963 
6964 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
6965 }
6966 
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)6967 static void update_stream_irq_parameters(
6968 	struct amdgpu_display_manager *dm,
6969 	struct dm_crtc_state *new_crtc_state)
6970 {
6971 	struct dc_stream_state *new_stream = new_crtc_state->stream;
6972 	struct mod_vrr_params vrr_params;
6973 	struct mod_freesync_config config = new_crtc_state->freesync_config;
6974 	struct amdgpu_device *adev = dm->adev;
6975 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
6976 	unsigned long flags;
6977 
6978 	if (!new_stream)
6979 		return;
6980 
6981 	/*
6982 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
6983 	 * For now it's sufficient to just guard against these conditions.
6984 	 */
6985 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
6986 		return;
6987 
6988 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
6989 	vrr_params = acrtc->dm_irq_params.vrr_params;
6990 
6991 	if (new_crtc_state->vrr_supported &&
6992 	    config.min_refresh_in_uhz &&
6993 	    config.max_refresh_in_uhz) {
6994 		config.state = new_crtc_state->base.vrr_enabled ?
6995 			VRR_STATE_ACTIVE_VARIABLE :
6996 			VRR_STATE_INACTIVE;
6997 	} else {
6998 		config.state = VRR_STATE_UNSUPPORTED;
6999 	}
7000 
7001 	mod_freesync_build_vrr_params(dm->freesync_module,
7002 				      new_stream,
7003 				      &config, &vrr_params);
7004 
7005 	new_crtc_state->freesync_timing_changed |=
7006 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7007 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7008 
7009 	new_crtc_state->freesync_config = config;
7010 	/* Copy state for access from DM IRQ handler */
7011 	acrtc->dm_irq_params.freesync_config = config;
7012 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7013 	acrtc->dm_irq_params.vrr_params = vrr_params;
7014 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7015 }
7016 
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7017 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7018 					    struct dm_crtc_state *new_state)
7019 {
7020 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7021 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7022 
7023 	if (!old_vrr_active && new_vrr_active) {
7024 		/* Transition VRR inactive -> active:
7025 		 * While VRR is active, we must not disable vblank irq, as a
7026 		 * reenable after disable would compute bogus vblank/pflip
7027 		 * timestamps if it likely happened inside display front-porch.
7028 		 *
7029 		 * We also need vupdate irq for the actual core vblank handling
7030 		 * at end of vblank.
7031 		 */
7032 		dm_set_vupdate_irq(new_state->base.crtc, true);
7033 		drm_crtc_vblank_get(new_state->base.crtc);
7034 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7035 				 __func__, new_state->base.crtc->base.id);
7036 	} else if (old_vrr_active && !new_vrr_active) {
7037 		/* Transition VRR active -> inactive:
7038 		 * Allow vblank irq disable again for fixed refresh rate.
7039 		 */
7040 		dm_set_vupdate_irq(new_state->base.crtc, false);
7041 		drm_crtc_vblank_put(new_state->base.crtc);
7042 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7043 				 __func__, new_state->base.crtc->base.id);
7044 	}
7045 }
7046 
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)7047 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7048 {
7049 	struct drm_plane *plane;
7050 	struct drm_plane_state *old_plane_state, *new_plane_state;
7051 	int i;
7052 
7053 	/*
7054 	 * TODO: Make this per-stream so we don't issue redundant updates for
7055 	 * commits with multiple streams.
7056 	 */
7057 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7058 				       new_plane_state, i)
7059 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7060 			handle_cursor_update(plane, old_plane_state);
7061 }
7062 
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct dc_state * dc_state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)7063 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7064 				    struct dc_state *dc_state,
7065 				    struct drm_device *dev,
7066 				    struct amdgpu_display_manager *dm,
7067 				    struct drm_crtc *pcrtc,
7068 				    bool wait_for_vblank)
7069 {
7070 	uint32_t i;
7071 	uint64_t timestamp_ns;
7072 	struct drm_plane *plane;
7073 	struct drm_plane_state *old_plane_state, *new_plane_state;
7074 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7075 	struct drm_crtc_state *new_pcrtc_state =
7076 			drm_atomic_get_new_crtc_state(state, pcrtc);
7077 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7078 	struct dm_crtc_state *dm_old_crtc_state =
7079 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7080 	int planes_count = 0, vpos, hpos;
7081 	long r;
7082 	unsigned long flags;
7083 	struct amdgpu_bo *abo;
7084 	uint32_t target_vblank, last_flip_vblank;
7085 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7086 	bool pflip_present = false;
7087 	struct {
7088 		struct dc_surface_update surface_updates[MAX_SURFACES];
7089 		struct dc_plane_info plane_infos[MAX_SURFACES];
7090 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
7091 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7092 		struct dc_stream_update stream_update;
7093 	} *bundle;
7094 
7095 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7096 
7097 	if (!bundle) {
7098 		dm_error("Failed to allocate update bundle\n");
7099 		goto cleanup;
7100 	}
7101 
7102 	/*
7103 	 * Disable the cursor first if we're disabling all the planes.
7104 	 * It'll remain on the screen after the planes are re-enabled
7105 	 * if we don't.
7106 	 */
7107 	if (acrtc_state->active_planes == 0)
7108 		amdgpu_dm_commit_cursors(state);
7109 
7110 	/* update planes when needed */
7111 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7112 		struct drm_crtc *crtc = new_plane_state->crtc;
7113 		struct drm_crtc_state *new_crtc_state;
7114 		struct drm_framebuffer *fb = new_plane_state->fb;
7115 		bool plane_needs_flip;
7116 		struct dc_plane_state *dc_plane;
7117 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7118 
7119 		/* Cursor plane is handled after stream updates */
7120 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7121 			continue;
7122 
7123 		if (!fb || !crtc || pcrtc != crtc)
7124 			continue;
7125 
7126 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7127 		if (!new_crtc_state->active)
7128 			continue;
7129 
7130 		dc_plane = dm_new_plane_state->dc_state;
7131 
7132 		bundle->surface_updates[planes_count].surface = dc_plane;
7133 		if (new_pcrtc_state->color_mgmt_changed) {
7134 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7135 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7136 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7137 		}
7138 
7139 		fill_dc_scaling_info(new_plane_state,
7140 				     &bundle->scaling_infos[planes_count]);
7141 
7142 		bundle->surface_updates[planes_count].scaling_info =
7143 			&bundle->scaling_infos[planes_count];
7144 
7145 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7146 
7147 		pflip_present = pflip_present || plane_needs_flip;
7148 
7149 		if (!plane_needs_flip) {
7150 			planes_count += 1;
7151 			continue;
7152 		}
7153 
7154 		abo = gem_to_amdgpu_bo(fb->obj[0]);
7155 
7156 		/*
7157 		 * Wait for all fences on this FB. Do limited wait to avoid
7158 		 * deadlock during GPU reset when this fence will not signal
7159 		 * but we hold reservation lock for the BO.
7160 		 */
7161 		r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7162 							false,
7163 							msecs_to_jiffies(5000));
7164 		if (unlikely(r <= 0))
7165 			DRM_ERROR("Waiting for fences timed out!");
7166 
7167 		fill_dc_plane_info_and_addr(
7168 			dm->adev, new_plane_state,
7169 			dm_new_plane_state->tiling_flags,
7170 			&bundle->plane_infos[planes_count],
7171 			&bundle->flip_addrs[planes_count].address,
7172 			dm_new_plane_state->tmz_surface, false);
7173 
7174 		DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7175 				 new_plane_state->plane->index,
7176 				 bundle->plane_infos[planes_count].dcc.enable);
7177 
7178 		bundle->surface_updates[planes_count].plane_info =
7179 			&bundle->plane_infos[planes_count];
7180 
7181 		/*
7182 		 * Only allow immediate flips for fast updates that don't
7183 		 * change FB pitch, DCC state, rotation or mirroing.
7184 		 */
7185 		bundle->flip_addrs[planes_count].flip_immediate =
7186 			crtc->state->async_flip &&
7187 			acrtc_state->update_type == UPDATE_TYPE_FAST;
7188 
7189 		timestamp_ns = ktime_get_ns();
7190 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7191 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7192 		bundle->surface_updates[planes_count].surface = dc_plane;
7193 
7194 		if (!bundle->surface_updates[planes_count].surface) {
7195 			DRM_ERROR("No surface for CRTC: id=%d\n",
7196 					acrtc_attach->crtc_id);
7197 			continue;
7198 		}
7199 
7200 		if (plane == pcrtc->primary)
7201 			update_freesync_state_on_stream(
7202 				dm,
7203 				acrtc_state,
7204 				acrtc_state->stream,
7205 				dc_plane,
7206 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7207 
7208 		DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7209 				 __func__,
7210 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7211 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7212 
7213 		planes_count += 1;
7214 
7215 	}
7216 
7217 	if (pflip_present) {
7218 		if (!vrr_active) {
7219 			/* Use old throttling in non-vrr fixed refresh rate mode
7220 			 * to keep flip scheduling based on target vblank counts
7221 			 * working in a backwards compatible way, e.g., for
7222 			 * clients using the GLX_OML_sync_control extension or
7223 			 * DRI3/Present extension with defined target_msc.
7224 			 */
7225 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7226 		}
7227 		else {
7228 			/* For variable refresh rate mode only:
7229 			 * Get vblank of last completed flip to avoid > 1 vrr
7230 			 * flips per video frame by use of throttling, but allow
7231 			 * flip programming anywhere in the possibly large
7232 			 * variable vrr vblank interval for fine-grained flip
7233 			 * timing control and more opportunity to avoid stutter
7234 			 * on late submission of flips.
7235 			 */
7236 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7237 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7238 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7239 		}
7240 
7241 		target_vblank = last_flip_vblank + wait_for_vblank;
7242 
7243 		/*
7244 		 * Wait until we're out of the vertical blank period before the one
7245 		 * targeted by the flip
7246 		 */
7247 		while ((acrtc_attach->enabled &&
7248 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7249 							    0, &vpos, &hpos, NULL,
7250 							    NULL, &pcrtc->hwmode)
7251 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7252 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7253 			(int)(target_vblank -
7254 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7255 			usleep_range(1000, 1100);
7256 		}
7257 
7258 		/**
7259 		 * Prepare the flip event for the pageflip interrupt to handle.
7260 		 *
7261 		 * This only works in the case where we've already turned on the
7262 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
7263 		 * from 0 -> n planes we have to skip a hardware generated event
7264 		 * and rely on sending it from software.
7265 		 */
7266 		if (acrtc_attach->base.state->event &&
7267 		    acrtc_state->active_planes > 0) {
7268 			drm_crtc_vblank_get(pcrtc);
7269 
7270 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7271 
7272 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7273 			prepare_flip_isr(acrtc_attach);
7274 
7275 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7276 		}
7277 
7278 		if (acrtc_state->stream) {
7279 			if (acrtc_state->freesync_vrr_info_changed)
7280 				bundle->stream_update.vrr_infopacket =
7281 					&acrtc_state->stream->vrr_infopacket;
7282 		}
7283 	}
7284 
7285 	/* Update the planes if changed or disable if we don't have any. */
7286 	if ((planes_count || acrtc_state->active_planes == 0) &&
7287 		acrtc_state->stream) {
7288 		bundle->stream_update.stream = acrtc_state->stream;
7289 		if (new_pcrtc_state->mode_changed) {
7290 			bundle->stream_update.src = acrtc_state->stream->src;
7291 			bundle->stream_update.dst = acrtc_state->stream->dst;
7292 		}
7293 
7294 		if (new_pcrtc_state->color_mgmt_changed) {
7295 			/*
7296 			 * TODO: This isn't fully correct since we've actually
7297 			 * already modified the stream in place.
7298 			 */
7299 			bundle->stream_update.gamut_remap =
7300 				&acrtc_state->stream->gamut_remap_matrix;
7301 			bundle->stream_update.output_csc_transform =
7302 				&acrtc_state->stream->csc_color_matrix;
7303 			bundle->stream_update.out_transfer_func =
7304 				acrtc_state->stream->out_transfer_func;
7305 		}
7306 
7307 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
7308 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7309 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
7310 
7311 		/*
7312 		 * If FreeSync state on the stream has changed then we need to
7313 		 * re-adjust the min/max bounds now that DC doesn't handle this
7314 		 * as part of commit.
7315 		 */
7316 		if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7317 		    amdgpu_dm_vrr_active(acrtc_state)) {
7318 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7319 			dc_stream_adjust_vmin_vmax(
7320 				dm->dc, acrtc_state->stream,
7321 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
7322 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7323 		}
7324 		mutex_lock(&dm->dc_lock);
7325 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7326 				acrtc_state->stream->link->psr_settings.psr_allow_active)
7327 			amdgpu_dm_psr_disable(acrtc_state->stream);
7328 
7329 		dc_commit_updates_for_stream(dm->dc,
7330 						     bundle->surface_updates,
7331 						     planes_count,
7332 						     acrtc_state->stream,
7333 						     &bundle->stream_update,
7334 						     dc_state);
7335 
7336 		/**
7337 		 * Enable or disable the interrupts on the backend.
7338 		 *
7339 		 * Most pipes are put into power gating when unused.
7340 		 *
7341 		 * When power gating is enabled on a pipe we lose the
7342 		 * interrupt enablement state when power gating is disabled.
7343 		 *
7344 		 * So we need to update the IRQ control state in hardware
7345 		 * whenever the pipe turns on (since it could be previously
7346 		 * power gated) or off (since some pipes can't be power gated
7347 		 * on some ASICs).
7348 		 */
7349 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7350 			dm_update_pflip_irq_state(drm_to_adev(dev),
7351 						  acrtc_attach);
7352 
7353 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7354 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7355 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7356 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
7357 		else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7358 				acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7359 				!acrtc_state->stream->link->psr_settings.psr_allow_active) {
7360 			amdgpu_dm_psr_enable(acrtc_state->stream);
7361 		}
7362 
7363 		mutex_unlock(&dm->dc_lock);
7364 	}
7365 
7366 	/*
7367 	 * Update cursor state *after* programming all the planes.
7368 	 * This avoids redundant programming in the case where we're going
7369 	 * to be disabling a single plane - those pipes are being disabled.
7370 	 */
7371 	if (acrtc_state->active_planes)
7372 		amdgpu_dm_commit_cursors(state);
7373 
7374 cleanup:
7375 	kfree(bundle);
7376 }
7377 
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)7378 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7379 				   struct drm_atomic_state *state)
7380 {
7381 	struct amdgpu_device *adev = drm_to_adev(dev);
7382 	struct amdgpu_dm_connector *aconnector;
7383 	struct drm_connector *connector;
7384 	struct drm_connector_state *old_con_state, *new_con_state;
7385 	struct drm_crtc_state *new_crtc_state;
7386 	struct dm_crtc_state *new_dm_crtc_state;
7387 	const struct dc_stream_status *status;
7388 	int i, inst;
7389 
7390 	/* Notify device removals. */
7391 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7392 		if (old_con_state->crtc != new_con_state->crtc) {
7393 			/* CRTC changes require notification. */
7394 			goto notify;
7395 		}
7396 
7397 		if (!new_con_state->crtc)
7398 			continue;
7399 
7400 		new_crtc_state = drm_atomic_get_new_crtc_state(
7401 			state, new_con_state->crtc);
7402 
7403 		if (!new_crtc_state)
7404 			continue;
7405 
7406 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7407 			continue;
7408 
7409 	notify:
7410 		aconnector = to_amdgpu_dm_connector(connector);
7411 
7412 		mutex_lock(&adev->dm.audio_lock);
7413 		inst = aconnector->audio_inst;
7414 		aconnector->audio_inst = -1;
7415 		mutex_unlock(&adev->dm.audio_lock);
7416 
7417 		amdgpu_dm_audio_eld_notify(adev, inst);
7418 	}
7419 
7420 	/* Notify audio device additions. */
7421 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7422 		if (!new_con_state->crtc)
7423 			continue;
7424 
7425 		new_crtc_state = drm_atomic_get_new_crtc_state(
7426 			state, new_con_state->crtc);
7427 
7428 		if (!new_crtc_state)
7429 			continue;
7430 
7431 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7432 			continue;
7433 
7434 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7435 		if (!new_dm_crtc_state->stream)
7436 			continue;
7437 
7438 		status = dc_stream_get_status(new_dm_crtc_state->stream);
7439 		if (!status)
7440 			continue;
7441 
7442 		aconnector = to_amdgpu_dm_connector(connector);
7443 
7444 		mutex_lock(&adev->dm.audio_lock);
7445 		inst = status->audio_inst;
7446 		aconnector->audio_inst = inst;
7447 		mutex_unlock(&adev->dm.audio_lock);
7448 
7449 		amdgpu_dm_audio_eld_notify(adev, inst);
7450 	}
7451 }
7452 
7453 /*
7454  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7455  * @crtc_state: the DRM CRTC state
7456  * @stream_state: the DC stream state.
7457  *
7458  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7459  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7460  */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)7461 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7462 						struct dc_stream_state *stream_state)
7463 {
7464 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7465 }
7466 
amdgpu_dm_atomic_commit(struct drm_device * dev,struct drm_atomic_state * state,bool nonblock)7467 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
7468 				   struct drm_atomic_state *state,
7469 				   bool nonblock)
7470 {
7471 	/*
7472 	 * Add check here for SoC's that support hardware cursor plane, to
7473 	 * unset legacy_cursor_update
7474 	 */
7475 
7476 	return drm_atomic_helper_commit(dev, state, nonblock);
7477 
7478 	/*TODO Handle EINTR, reenable IRQ*/
7479 }
7480 
7481 /**
7482  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7483  * @state: The atomic state to commit
7484  *
7485  * This will tell DC to commit the constructed DC state from atomic_check,
7486  * programming the hardware. Any failures here implies a hardware failure, since
7487  * atomic check should have filtered anything non-kosher.
7488  */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)7489 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7490 {
7491 	struct drm_device *dev = state->dev;
7492 	struct amdgpu_device *adev = drm_to_adev(dev);
7493 	struct amdgpu_display_manager *dm = &adev->dm;
7494 	struct dm_atomic_state *dm_state;
7495 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7496 	uint32_t i, j;
7497 	struct drm_crtc *crtc;
7498 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7499 	unsigned long flags;
7500 	bool wait_for_vblank = true;
7501 	struct drm_connector *connector;
7502 	struct drm_connector_state *old_con_state, *new_con_state;
7503 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7504 	int crtc_disable_count = 0;
7505 	bool mode_set_reset_required = false;
7506 
7507 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
7508 
7509 	dm_state = dm_atomic_get_new_state(state);
7510 	if (dm_state && dm_state->context) {
7511 		dc_state = dm_state->context;
7512 	} else {
7513 		/* No state changes, retain current state. */
7514 		dc_state_temp = dc_create_state(dm->dc);
7515 		ASSERT(dc_state_temp);
7516 		dc_state = dc_state_temp;
7517 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
7518 	}
7519 
7520 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7521 				       new_crtc_state, i) {
7522 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7523 
7524 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7525 
7526 		if (old_crtc_state->active &&
7527 		    (!new_crtc_state->active ||
7528 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7529 			manage_dm_interrupts(adev, acrtc, false);
7530 			dc_stream_release(dm_old_crtc_state->stream);
7531 		}
7532 	}
7533 
7534 	drm_atomic_helper_calc_timestamping_constants(state);
7535 
7536 	/* update changed items */
7537 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7538 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7539 
7540 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7541 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7542 
7543 		DRM_DEBUG_DRIVER(
7544 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
7545 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
7546 			"connectors_changed:%d\n",
7547 			acrtc->crtc_id,
7548 			new_crtc_state->enable,
7549 			new_crtc_state->active,
7550 			new_crtc_state->planes_changed,
7551 			new_crtc_state->mode_changed,
7552 			new_crtc_state->active_changed,
7553 			new_crtc_state->connectors_changed);
7554 
7555 		/* Copy all transient state flags into dc state */
7556 		if (dm_new_crtc_state->stream) {
7557 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
7558 							    dm_new_crtc_state->stream);
7559 		}
7560 
7561 		/* handles headless hotplug case, updating new_state and
7562 		 * aconnector as needed
7563 		 */
7564 
7565 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
7566 
7567 			DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
7568 
7569 			if (!dm_new_crtc_state->stream) {
7570 				/*
7571 				 * this could happen because of issues with
7572 				 * userspace notifications delivery.
7573 				 * In this case userspace tries to set mode on
7574 				 * display which is disconnected in fact.
7575 				 * dc_sink is NULL in this case on aconnector.
7576 				 * We expect reset mode will come soon.
7577 				 *
7578 				 * This can also happen when unplug is done
7579 				 * during resume sequence ended
7580 				 *
7581 				 * In this case, we want to pretend we still
7582 				 * have a sink to keep the pipe running so that
7583 				 * hw state is consistent with the sw state
7584 				 */
7585 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
7586 						__func__, acrtc->base.base.id);
7587 				continue;
7588 			}
7589 
7590 			if (dm_old_crtc_state->stream)
7591 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7592 
7593 			pm_runtime_get_noresume(dev->dev);
7594 
7595 			acrtc->enabled = true;
7596 			acrtc->hw_mode = new_crtc_state->mode;
7597 			crtc->hwmode = new_crtc_state->mode;
7598 			mode_set_reset_required = true;
7599 		} else if (modereset_required(new_crtc_state)) {
7600 			DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
7601 			/* i.e. reset mode */
7602 			if (dm_old_crtc_state->stream)
7603 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
7604 			mode_set_reset_required = true;
7605 		}
7606 	} /* for_each_crtc_in_state() */
7607 
7608 	if (dc_state) {
7609 		/* if there mode set or reset, disable eDP PSR */
7610 		if (mode_set_reset_required)
7611 			amdgpu_dm_psr_disable_all(dm);
7612 
7613 		dm_enable_per_frame_crtc_master_sync(dc_state);
7614 		mutex_lock(&dm->dc_lock);
7615 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
7616 		mutex_unlock(&dm->dc_lock);
7617 	}
7618 
7619 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7620 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7621 
7622 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7623 
7624 		if (dm_new_crtc_state->stream != NULL) {
7625 			const struct dc_stream_status *status =
7626 					dc_stream_get_status(dm_new_crtc_state->stream);
7627 
7628 			if (!status)
7629 				status = dc_stream_get_status_from_state(dc_state,
7630 									 dm_new_crtc_state->stream);
7631 			if (!status)
7632 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
7633 			else
7634 				acrtc->otg_inst = status->primary_otg_inst;
7635 		}
7636 	}
7637 #ifdef CONFIG_DRM_AMD_DC_HDCP
7638 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7639 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7640 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7641 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7642 
7643 		new_crtc_state = NULL;
7644 
7645 		if (acrtc)
7646 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7647 
7648 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7649 
7650 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
7651 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7652 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
7653 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7654 			continue;
7655 		}
7656 
7657 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
7658 			hdcp_update_display(
7659 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
7660 				new_con_state->hdcp_content_type,
7661 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
7662 													 : false);
7663 	}
7664 #endif
7665 
7666 	/* Handle connector state changes */
7667 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7668 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
7669 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
7670 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
7671 		struct dc_surface_update dummy_updates[MAX_SURFACES];
7672 		struct dc_stream_update stream_update;
7673 		struct dc_info_packet hdr_packet;
7674 		struct dc_stream_status *status = NULL;
7675 		bool abm_changed, hdr_changed, scaling_changed;
7676 
7677 		memset(&dummy_updates, 0, sizeof(dummy_updates));
7678 		memset(&stream_update, 0, sizeof(stream_update));
7679 
7680 		if (acrtc) {
7681 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
7682 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
7683 		}
7684 
7685 		/* Skip any modesets/resets */
7686 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
7687 			continue;
7688 
7689 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7690 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7691 
7692 		scaling_changed = is_scaling_state_different(dm_new_con_state,
7693 							     dm_old_con_state);
7694 
7695 		abm_changed = dm_new_crtc_state->abm_level !=
7696 			      dm_old_crtc_state->abm_level;
7697 
7698 		hdr_changed =
7699 			is_hdr_metadata_different(old_con_state, new_con_state);
7700 
7701 		if (!scaling_changed && !abm_changed && !hdr_changed)
7702 			continue;
7703 
7704 		stream_update.stream = dm_new_crtc_state->stream;
7705 		if (scaling_changed) {
7706 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
7707 					dm_new_con_state, dm_new_crtc_state->stream);
7708 
7709 			stream_update.src = dm_new_crtc_state->stream->src;
7710 			stream_update.dst = dm_new_crtc_state->stream->dst;
7711 		}
7712 
7713 		if (abm_changed) {
7714 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
7715 
7716 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
7717 		}
7718 
7719 		if (hdr_changed) {
7720 			fill_hdr_info_packet(new_con_state, &hdr_packet);
7721 			stream_update.hdr_static_metadata = &hdr_packet;
7722 		}
7723 
7724 		status = dc_stream_get_status(dm_new_crtc_state->stream);
7725 		WARN_ON(!status);
7726 		WARN_ON(!status->plane_count);
7727 
7728 		/*
7729 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
7730 		 * Here we create an empty update on each plane.
7731 		 * To fix this, DC should permit updating only stream properties.
7732 		 */
7733 		for (j = 0; j < status->plane_count; j++)
7734 			dummy_updates[j].surface = status->plane_states[0];
7735 
7736 
7737 		mutex_lock(&dm->dc_lock);
7738 		dc_commit_updates_for_stream(dm->dc,
7739 						     dummy_updates,
7740 						     status->plane_count,
7741 						     dm_new_crtc_state->stream,
7742 						     &stream_update,
7743 						     dc_state);
7744 		mutex_unlock(&dm->dc_lock);
7745 	}
7746 
7747 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
7748 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
7749 				      new_crtc_state, i) {
7750 		if (old_crtc_state->active && !new_crtc_state->active)
7751 			crtc_disable_count++;
7752 
7753 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7754 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7755 
7756 		/* For freesync config update on crtc state and params for irq */
7757 		update_stream_irq_parameters(dm, dm_new_crtc_state);
7758 
7759 		/* Handle vrr on->off / off->on transitions */
7760 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
7761 						dm_new_crtc_state);
7762 	}
7763 
7764 	/**
7765 	 * Enable interrupts for CRTCs that are newly enabled or went through
7766 	 * a modeset. It was intentionally deferred until after the front end
7767 	 * state was modified to wait until the OTG was on and so the IRQ
7768 	 * handlers didn't access stale or invalid state.
7769 	 */
7770 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
7771 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7772 
7773 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7774 
7775 		if (new_crtc_state->active &&
7776 		    (!old_crtc_state->active ||
7777 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
7778 			dc_stream_retain(dm_new_crtc_state->stream);
7779 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
7780 			manage_dm_interrupts(adev, acrtc, true);
7781 
7782 #ifdef CONFIG_DEBUG_FS
7783 			/**
7784 			 * Frontend may have changed so reapply the CRC capture
7785 			 * settings for the stream.
7786 			 */
7787 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7788 
7789 			if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
7790 				amdgpu_dm_crtc_configure_crc_source(
7791 					crtc, dm_new_crtc_state,
7792 					dm_new_crtc_state->crc_src);
7793 			}
7794 #endif
7795 		}
7796 	}
7797 
7798 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
7799 		if (new_crtc_state->async_flip)
7800 			wait_for_vblank = false;
7801 
7802 	/* update planes when needed per crtc*/
7803 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
7804 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
7805 
7806 		if (dm_new_crtc_state->stream)
7807 			amdgpu_dm_commit_planes(state, dc_state, dev,
7808 						dm, crtc, wait_for_vblank);
7809 	}
7810 
7811 	/* Update audio instances for each connector. */
7812 	amdgpu_dm_commit_audio(dev, state);
7813 
7814 	/*
7815 	 * send vblank event on all events not handled in flip and
7816 	 * mark consumed event for drm_atomic_helper_commit_hw_done
7817 	 */
7818 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7819 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
7820 
7821 		if (new_crtc_state->event)
7822 			drm_send_event_locked(dev, &new_crtc_state->event->base);
7823 
7824 		new_crtc_state->event = NULL;
7825 	}
7826 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7827 
7828 	/* Signal HW programming completion */
7829 	drm_atomic_helper_commit_hw_done(state);
7830 
7831 	if (wait_for_vblank)
7832 		drm_atomic_helper_wait_for_flip_done(dev, state);
7833 
7834 	drm_atomic_helper_cleanup_planes(dev, state);
7835 
7836 	/*
7837 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
7838 	 * so we can put the GPU into runtime suspend if we're not driving any
7839 	 * displays anymore
7840 	 */
7841 	for (i = 0; i < crtc_disable_count; i++)
7842 		pm_runtime_put_autosuspend(dev->dev);
7843 	pm_runtime_mark_last_busy(dev->dev);
7844 
7845 	if (dc_state_temp)
7846 		dc_release_state(dc_state_temp);
7847 }
7848 
7849 
dm_force_atomic_commit(struct drm_connector * connector)7850 static int dm_force_atomic_commit(struct drm_connector *connector)
7851 {
7852 	int ret = 0;
7853 	struct drm_device *ddev = connector->dev;
7854 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
7855 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7856 	struct drm_plane *plane = disconnected_acrtc->base.primary;
7857 	struct drm_connector_state *conn_state;
7858 	struct drm_crtc_state *crtc_state;
7859 	struct drm_plane_state *plane_state;
7860 
7861 	if (!state)
7862 		return -ENOMEM;
7863 
7864 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
7865 
7866 	/* Construct an atomic state to restore previous display setting */
7867 
7868 	/*
7869 	 * Attach connectors to drm_atomic_state
7870 	 */
7871 	conn_state = drm_atomic_get_connector_state(state, connector);
7872 
7873 	ret = PTR_ERR_OR_ZERO(conn_state);
7874 	if (ret)
7875 		goto err;
7876 
7877 	/* Attach crtc to drm_atomic_state*/
7878 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
7879 
7880 	ret = PTR_ERR_OR_ZERO(crtc_state);
7881 	if (ret)
7882 		goto err;
7883 
7884 	/* force a restore */
7885 	crtc_state->mode_changed = true;
7886 
7887 	/* Attach plane to drm_atomic_state */
7888 	plane_state = drm_atomic_get_plane_state(state, plane);
7889 
7890 	ret = PTR_ERR_OR_ZERO(plane_state);
7891 	if (ret)
7892 		goto err;
7893 
7894 
7895 	/* Call commit internally with the state we just constructed */
7896 	ret = drm_atomic_commit(state);
7897 	if (!ret)
7898 		return 0;
7899 
7900 err:
7901 	DRM_ERROR("Restoring old state failed with %i\n", ret);
7902 	drm_atomic_state_put(state);
7903 
7904 	return ret;
7905 }
7906 
7907 /*
7908  * This function handles all cases when set mode does not come upon hotplug.
7909  * This includes when a display is unplugged then plugged back into the
7910  * same port and when running without usermode desktop manager supprot
7911  */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)7912 void dm_restore_drm_connector_state(struct drm_device *dev,
7913 				    struct drm_connector *connector)
7914 {
7915 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7916 	struct amdgpu_crtc *disconnected_acrtc;
7917 	struct dm_crtc_state *acrtc_state;
7918 
7919 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
7920 		return;
7921 
7922 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
7923 	if (!disconnected_acrtc)
7924 		return;
7925 
7926 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
7927 	if (!acrtc_state->stream)
7928 		return;
7929 
7930 	/*
7931 	 * If the previous sink is not released and different from the current,
7932 	 * we deduce we are in a state where we can not rely on usermode call
7933 	 * to turn on the display, so we do it here
7934 	 */
7935 	if (acrtc_state->stream->sink != aconnector->dc_sink)
7936 		dm_force_atomic_commit(&aconnector->base);
7937 }
7938 
7939 /*
7940  * Grabs all modesetting locks to serialize against any blocking commits,
7941  * Waits for completion of all non blocking commits.
7942  */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)7943 static int do_aquire_global_lock(struct drm_device *dev,
7944 				 struct drm_atomic_state *state)
7945 {
7946 	struct drm_crtc *crtc;
7947 	struct drm_crtc_commit *commit;
7948 	long ret;
7949 
7950 	/*
7951 	 * Adding all modeset locks to aquire_ctx will
7952 	 * ensure that when the framework release it the
7953 	 * extra locks we are locking here will get released to
7954 	 */
7955 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
7956 	if (ret)
7957 		return ret;
7958 
7959 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7960 		spin_lock(&crtc->commit_lock);
7961 		commit = list_first_entry_or_null(&crtc->commit_list,
7962 				struct drm_crtc_commit, commit_entry);
7963 		if (commit)
7964 			drm_crtc_commit_get(commit);
7965 		spin_unlock(&crtc->commit_lock);
7966 
7967 		if (!commit)
7968 			continue;
7969 
7970 		/*
7971 		 * Make sure all pending HW programming completed and
7972 		 * page flips done
7973 		 */
7974 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
7975 
7976 		if (ret > 0)
7977 			ret = wait_for_completion_interruptible_timeout(
7978 					&commit->flip_done, 10*HZ);
7979 
7980 		if (ret == 0)
7981 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
7982 				  "timed out\n", crtc->base.id, crtc->name);
7983 
7984 		drm_crtc_commit_put(commit);
7985 	}
7986 
7987 	return ret < 0 ? ret : 0;
7988 }
7989 
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)7990 static void get_freesync_config_for_crtc(
7991 	struct dm_crtc_state *new_crtc_state,
7992 	struct dm_connector_state *new_con_state)
7993 {
7994 	struct mod_freesync_config config = {0};
7995 	struct amdgpu_dm_connector *aconnector =
7996 			to_amdgpu_dm_connector(new_con_state->base.connector);
7997 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
7998 	int vrefresh = drm_mode_vrefresh(mode);
7999 
8000 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8001 					vrefresh >= aconnector->min_vfreq &&
8002 					vrefresh <= aconnector->max_vfreq;
8003 
8004 	if (new_crtc_state->vrr_supported) {
8005 		new_crtc_state->stream->ignore_msa_timing_param = true;
8006 		config.state = new_crtc_state->base.vrr_enabled ?
8007 				VRR_STATE_ACTIVE_VARIABLE :
8008 				VRR_STATE_INACTIVE;
8009 		config.min_refresh_in_uhz =
8010 				aconnector->min_vfreq * 1000000;
8011 		config.max_refresh_in_uhz =
8012 				aconnector->max_vfreq * 1000000;
8013 		config.vsif_supported = true;
8014 		config.btr = true;
8015 	}
8016 
8017 	new_crtc_state->freesync_config = config;
8018 }
8019 
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)8020 static void reset_freesync_config_for_crtc(
8021 	struct dm_crtc_state *new_crtc_state)
8022 {
8023 	new_crtc_state->vrr_supported = false;
8024 
8025 	memset(&new_crtc_state->vrr_infopacket, 0,
8026 	       sizeof(new_crtc_state->vrr_infopacket));
8027 }
8028 
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)8029 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8030 				struct drm_atomic_state *state,
8031 				struct drm_crtc *crtc,
8032 				struct drm_crtc_state *old_crtc_state,
8033 				struct drm_crtc_state *new_crtc_state,
8034 				bool enable,
8035 				bool *lock_and_validation_needed)
8036 {
8037 	struct dm_atomic_state *dm_state = NULL;
8038 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8039 	struct dc_stream_state *new_stream;
8040 	int ret = 0;
8041 
8042 	/*
8043 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8044 	 * update changed items
8045 	 */
8046 	struct amdgpu_crtc *acrtc = NULL;
8047 	struct amdgpu_dm_connector *aconnector = NULL;
8048 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8049 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8050 
8051 	new_stream = NULL;
8052 
8053 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8054 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8055 	acrtc = to_amdgpu_crtc(crtc);
8056 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8057 
8058 	/* TODO This hack should go away */
8059 	if (aconnector && enable) {
8060 		/* Make sure fake sink is created in plug-in scenario */
8061 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8062 							    &aconnector->base);
8063 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8064 							    &aconnector->base);
8065 
8066 		if (IS_ERR(drm_new_conn_state)) {
8067 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8068 			goto fail;
8069 		}
8070 
8071 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8072 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8073 
8074 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8075 			goto skip_modeset;
8076 
8077 		new_stream = create_validate_stream_for_sink(aconnector,
8078 							     &new_crtc_state->mode,
8079 							     dm_new_conn_state,
8080 							     dm_old_crtc_state->stream);
8081 
8082 		/*
8083 		 * we can have no stream on ACTION_SET if a display
8084 		 * was disconnected during S3, in this case it is not an
8085 		 * error, the OS will be updated after detection, and
8086 		 * will do the right thing on next atomic commit
8087 		 */
8088 
8089 		if (!new_stream) {
8090 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8091 					__func__, acrtc->base.base.id);
8092 			ret = -ENOMEM;
8093 			goto fail;
8094 		}
8095 
8096 		/*
8097 		 * TODO: Check VSDB bits to decide whether this should
8098 		 * be enabled or not.
8099 		 */
8100 		new_stream->triggered_crtc_reset.enabled =
8101 			dm->force_timing_sync;
8102 
8103 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8104 
8105 		ret = fill_hdr_info_packet(drm_new_conn_state,
8106 					   &new_stream->hdr_static_metadata);
8107 		if (ret)
8108 			goto fail;
8109 
8110 		/*
8111 		 * If we already removed the old stream from the context
8112 		 * (and set the new stream to NULL) then we can't reuse
8113 		 * the old stream even if the stream and scaling are unchanged.
8114 		 * We'll hit the BUG_ON and black screen.
8115 		 *
8116 		 * TODO: Refactor this function to allow this check to work
8117 		 * in all conditions.
8118 		 */
8119 		if (dm_new_crtc_state->stream &&
8120 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8121 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8122 			new_crtc_state->mode_changed = false;
8123 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8124 					 new_crtc_state->mode_changed);
8125 		}
8126 	}
8127 
8128 	/* mode_changed flag may get updated above, need to check again */
8129 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8130 		goto skip_modeset;
8131 
8132 	DRM_DEBUG_DRIVER(
8133 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8134 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
8135 		"connectors_changed:%d\n",
8136 		acrtc->crtc_id,
8137 		new_crtc_state->enable,
8138 		new_crtc_state->active,
8139 		new_crtc_state->planes_changed,
8140 		new_crtc_state->mode_changed,
8141 		new_crtc_state->active_changed,
8142 		new_crtc_state->connectors_changed);
8143 
8144 	/* Remove stream for any changed/disabled CRTC */
8145 	if (!enable) {
8146 
8147 		if (!dm_old_crtc_state->stream)
8148 			goto skip_modeset;
8149 
8150 		ret = dm_atomic_get_state(state, &dm_state);
8151 		if (ret)
8152 			goto fail;
8153 
8154 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8155 				crtc->base.id);
8156 
8157 		/* i.e. reset mode */
8158 		if (dc_remove_stream_from_ctx(
8159 				dm->dc,
8160 				dm_state->context,
8161 				dm_old_crtc_state->stream) != DC_OK) {
8162 			ret = -EINVAL;
8163 			goto fail;
8164 		}
8165 
8166 		dc_stream_release(dm_old_crtc_state->stream);
8167 		dm_new_crtc_state->stream = NULL;
8168 
8169 		reset_freesync_config_for_crtc(dm_new_crtc_state);
8170 
8171 		*lock_and_validation_needed = true;
8172 
8173 	} else {/* Add stream for any updated/enabled CRTC */
8174 		/*
8175 		 * Quick fix to prevent NULL pointer on new_stream when
8176 		 * added MST connectors not found in existing crtc_state in the chained mode
8177 		 * TODO: need to dig out the root cause of that
8178 		 */
8179 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8180 			goto skip_modeset;
8181 
8182 		if (modereset_required(new_crtc_state))
8183 			goto skip_modeset;
8184 
8185 		if (modeset_required(new_crtc_state, new_stream,
8186 				     dm_old_crtc_state->stream)) {
8187 
8188 			WARN_ON(dm_new_crtc_state->stream);
8189 
8190 			ret = dm_atomic_get_state(state, &dm_state);
8191 			if (ret)
8192 				goto fail;
8193 
8194 			dm_new_crtc_state->stream = new_stream;
8195 
8196 			dc_stream_retain(new_stream);
8197 
8198 			DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8199 						crtc->base.id);
8200 
8201 			if (dc_add_stream_to_ctx(
8202 					dm->dc,
8203 					dm_state->context,
8204 					dm_new_crtc_state->stream) != DC_OK) {
8205 				ret = -EINVAL;
8206 				goto fail;
8207 			}
8208 
8209 			*lock_and_validation_needed = true;
8210 		}
8211 	}
8212 
8213 skip_modeset:
8214 	/* Release extra reference */
8215 	if (new_stream)
8216 		 dc_stream_release(new_stream);
8217 
8218 	/*
8219 	 * We want to do dc stream updates that do not require a
8220 	 * full modeset below.
8221 	 */
8222 	if (!(enable && aconnector && new_crtc_state->active))
8223 		return 0;
8224 	/*
8225 	 * Given above conditions, the dc state cannot be NULL because:
8226 	 * 1. We're in the process of enabling CRTCs (just been added
8227 	 *    to the dc context, or already is on the context)
8228 	 * 2. Has a valid connector attached, and
8229 	 * 3. Is currently active and enabled.
8230 	 * => The dc stream state currently exists.
8231 	 */
8232 	BUG_ON(dm_new_crtc_state->stream == NULL);
8233 
8234 	/* Scaling or underscan settings */
8235 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8236 		update_stream_scaling_settings(
8237 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8238 
8239 	/* ABM settings */
8240 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8241 
8242 	/*
8243 	 * Color management settings. We also update color properties
8244 	 * when a modeset is needed, to ensure it gets reprogrammed.
8245 	 */
8246 	if (dm_new_crtc_state->base.color_mgmt_changed ||
8247 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8248 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8249 		if (ret)
8250 			goto fail;
8251 	}
8252 
8253 	/* Update Freesync settings. */
8254 	get_freesync_config_for_crtc(dm_new_crtc_state,
8255 				     dm_new_conn_state);
8256 
8257 	return ret;
8258 
8259 fail:
8260 	if (new_stream)
8261 		dc_stream_release(new_stream);
8262 	return ret;
8263 }
8264 
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)8265 static bool should_reset_plane(struct drm_atomic_state *state,
8266 			       struct drm_plane *plane,
8267 			       struct drm_plane_state *old_plane_state,
8268 			       struct drm_plane_state *new_plane_state)
8269 {
8270 	struct drm_plane *other;
8271 	struct drm_plane_state *old_other_state, *new_other_state;
8272 	struct drm_crtc_state *new_crtc_state;
8273 	int i;
8274 
8275 	/*
8276 	 * TODO: Remove this hack once the checks below are sufficient
8277 	 * enough to determine when we need to reset all the planes on
8278 	 * the stream.
8279 	 */
8280 	if (state->allow_modeset)
8281 		return true;
8282 
8283 	/* Exit early if we know that we're adding or removing the plane. */
8284 	if (old_plane_state->crtc != new_plane_state->crtc)
8285 		return true;
8286 
8287 	/* old crtc == new_crtc == NULL, plane not in context. */
8288 	if (!new_plane_state->crtc)
8289 		return false;
8290 
8291 	new_crtc_state =
8292 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8293 
8294 	if (!new_crtc_state)
8295 		return true;
8296 
8297 	/* CRTC Degamma changes currently require us to recreate planes. */
8298 	if (new_crtc_state->color_mgmt_changed)
8299 		return true;
8300 
8301 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8302 		return true;
8303 
8304 	/*
8305 	 * If there are any new primary or overlay planes being added or
8306 	 * removed then the z-order can potentially change. To ensure
8307 	 * correct z-order and pipe acquisition the current DC architecture
8308 	 * requires us to remove and recreate all existing planes.
8309 	 *
8310 	 * TODO: Come up with a more elegant solution for this.
8311 	 */
8312 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8313 		struct dm_plane_state *old_dm_plane_state, *new_dm_plane_state;
8314 
8315 		if (other->type == DRM_PLANE_TYPE_CURSOR)
8316 			continue;
8317 
8318 		if (old_other_state->crtc != new_plane_state->crtc &&
8319 		    new_other_state->crtc != new_plane_state->crtc)
8320 			continue;
8321 
8322 		if (old_other_state->crtc != new_other_state->crtc)
8323 			return true;
8324 
8325 		/* Src/dst size and scaling updates. */
8326 		if (old_other_state->src_w != new_other_state->src_w ||
8327 		    old_other_state->src_h != new_other_state->src_h ||
8328 		    old_other_state->crtc_w != new_other_state->crtc_w ||
8329 		    old_other_state->crtc_h != new_other_state->crtc_h)
8330 			return true;
8331 
8332 		/* Rotation / mirroring updates. */
8333 		if (old_other_state->rotation != new_other_state->rotation)
8334 			return true;
8335 
8336 		/* Blending updates. */
8337 		if (old_other_state->pixel_blend_mode !=
8338 		    new_other_state->pixel_blend_mode)
8339 			return true;
8340 
8341 		/* Alpha updates. */
8342 		if (old_other_state->alpha != new_other_state->alpha)
8343 			return true;
8344 
8345 		/* Colorspace changes. */
8346 		if (old_other_state->color_range != new_other_state->color_range ||
8347 		    old_other_state->color_encoding != new_other_state->color_encoding)
8348 			return true;
8349 
8350 		/* Framebuffer checks fall at the end. */
8351 		if (!old_other_state->fb || !new_other_state->fb)
8352 			continue;
8353 
8354 		/* Pixel format changes can require bandwidth updates. */
8355 		if (old_other_state->fb->format != new_other_state->fb->format)
8356 			return true;
8357 
8358 		old_dm_plane_state = to_dm_plane_state(old_other_state);
8359 		new_dm_plane_state = to_dm_plane_state(new_other_state);
8360 
8361 		/* Tiling and DCC changes also require bandwidth updates. */
8362 		if (old_dm_plane_state->tiling_flags !=
8363 		    new_dm_plane_state->tiling_flags)
8364 			return true;
8365 	}
8366 
8367 	return false;
8368 }
8369 
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed)8370 static int dm_update_plane_state(struct dc *dc,
8371 				 struct drm_atomic_state *state,
8372 				 struct drm_plane *plane,
8373 				 struct drm_plane_state *old_plane_state,
8374 				 struct drm_plane_state *new_plane_state,
8375 				 bool enable,
8376 				 bool *lock_and_validation_needed)
8377 {
8378 
8379 	struct dm_atomic_state *dm_state = NULL;
8380 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8381 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8382 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8383 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8384 	struct amdgpu_crtc *new_acrtc;
8385 	bool needs_reset;
8386 	int ret = 0;
8387 
8388 
8389 	new_plane_crtc = new_plane_state->crtc;
8390 	old_plane_crtc = old_plane_state->crtc;
8391 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
8392 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
8393 
8394 	/*TODO Implement better atomic check for cursor plane */
8395 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8396 		if (!enable || !new_plane_crtc ||
8397 			drm_atomic_plane_disabling(plane->state, new_plane_state))
8398 			return 0;
8399 
8400 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8401 
8402 		if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
8403 			(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
8404 			DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
8405 							 new_plane_state->crtc_w, new_plane_state->crtc_h);
8406 			return -EINVAL;
8407 		}
8408 
8409 		return 0;
8410 	}
8411 
8412 	needs_reset = should_reset_plane(state, plane, old_plane_state,
8413 					 new_plane_state);
8414 
8415 	/* Remove any changed/removed planes */
8416 	if (!enable) {
8417 		if (!needs_reset)
8418 			return 0;
8419 
8420 		if (!old_plane_crtc)
8421 			return 0;
8422 
8423 		old_crtc_state = drm_atomic_get_old_crtc_state(
8424 				state, old_plane_crtc);
8425 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8426 
8427 		if (!dm_old_crtc_state->stream)
8428 			return 0;
8429 
8430 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8431 				plane->base.id, old_plane_crtc->base.id);
8432 
8433 		ret = dm_atomic_get_state(state, &dm_state);
8434 		if (ret)
8435 			return ret;
8436 
8437 		if (!dc_remove_plane_from_context(
8438 				dc,
8439 				dm_old_crtc_state->stream,
8440 				dm_old_plane_state->dc_state,
8441 				dm_state->context)) {
8442 
8443 			return -EINVAL;
8444 		}
8445 
8446 
8447 		dc_plane_state_release(dm_old_plane_state->dc_state);
8448 		dm_new_plane_state->dc_state = NULL;
8449 
8450 		*lock_and_validation_needed = true;
8451 
8452 	} else { /* Add new planes */
8453 		struct dc_plane_state *dc_new_plane_state;
8454 
8455 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
8456 			return 0;
8457 
8458 		if (!new_plane_crtc)
8459 			return 0;
8460 
8461 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
8462 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8463 
8464 		if (!dm_new_crtc_state->stream)
8465 			return 0;
8466 
8467 		if (!needs_reset)
8468 			return 0;
8469 
8470 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
8471 		if (ret)
8472 			return ret;
8473 
8474 		WARN_ON(dm_new_plane_state->dc_state);
8475 
8476 		dc_new_plane_state = dc_create_plane_state(dc);
8477 		if (!dc_new_plane_state)
8478 			return -ENOMEM;
8479 
8480 		DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
8481 				plane->base.id, new_plane_crtc->base.id);
8482 
8483 		ret = fill_dc_plane_attributes(
8484 			drm_to_adev(new_plane_crtc->dev),
8485 			dc_new_plane_state,
8486 			new_plane_state,
8487 			new_crtc_state);
8488 		if (ret) {
8489 			dc_plane_state_release(dc_new_plane_state);
8490 			return ret;
8491 		}
8492 
8493 		ret = dm_atomic_get_state(state, &dm_state);
8494 		if (ret) {
8495 			dc_plane_state_release(dc_new_plane_state);
8496 			return ret;
8497 		}
8498 
8499 		/*
8500 		 * Any atomic check errors that occur after this will
8501 		 * not need a release. The plane state will be attached
8502 		 * to the stream, and therefore part of the atomic
8503 		 * state. It'll be released when the atomic state is
8504 		 * cleaned.
8505 		 */
8506 		if (!dc_add_plane_to_context(
8507 				dc,
8508 				dm_new_crtc_state->stream,
8509 				dc_new_plane_state,
8510 				dm_state->context)) {
8511 
8512 			dc_plane_state_release(dc_new_plane_state);
8513 			return -EINVAL;
8514 		}
8515 
8516 		dm_new_plane_state->dc_state = dc_new_plane_state;
8517 
8518 		/* Tell DC to do a full surface update every time there
8519 		 * is a plane change. Inefficient, but works for now.
8520 		 */
8521 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
8522 
8523 		*lock_and_validation_needed = true;
8524 	}
8525 
8526 
8527 	return ret;
8528 }
8529 
8530 #if defined(CONFIG_DRM_AMD_DC_DCN)
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)8531 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
8532 {
8533 	struct drm_connector *connector;
8534 	struct drm_connector_state *conn_state;
8535 	struct amdgpu_dm_connector *aconnector = NULL;
8536 	int i;
8537 	for_each_new_connector_in_state(state, connector, conn_state, i) {
8538 		if (conn_state->crtc != crtc)
8539 			continue;
8540 
8541 		aconnector = to_amdgpu_dm_connector(connector);
8542 		if (!aconnector->port || !aconnector->mst_port)
8543 			aconnector = NULL;
8544 		else
8545 			break;
8546 	}
8547 
8548 	if (!aconnector)
8549 		return 0;
8550 
8551 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
8552 }
8553 #endif
8554 
8555 /**
8556  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
8557  * @dev: The DRM device
8558  * @state: The atomic state to commit
8559  *
8560  * Validate that the given atomic state is programmable by DC into hardware.
8561  * This involves constructing a &struct dc_state reflecting the new hardware
8562  * state we wish to commit, then querying DC to see if it is programmable. It's
8563  * important not to modify the existing DC state. Otherwise, atomic_check
8564  * may unexpectedly commit hardware changes.
8565  *
8566  * When validating the DC state, it's important that the right locks are
8567  * acquired. For full updates case which removes/adds/updates streams on one
8568  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
8569  * that any such full update commit will wait for completion of any outstanding
8570  * flip using DRMs synchronization events.
8571  *
8572  * Note that DM adds the affected connectors for all CRTCs in state, when that
8573  * might not seem necessary. This is because DC stream creation requires the
8574  * DC sink, which is tied to the DRM connector state. Cleaning this up should
8575  * be possible but non-trivial - a possible TODO item.
8576  *
8577  * Return: -Error code if validation failed.
8578  */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)8579 static int amdgpu_dm_atomic_check(struct drm_device *dev,
8580 				  struct drm_atomic_state *state)
8581 {
8582 	struct amdgpu_device *adev = drm_to_adev(dev);
8583 	struct dm_atomic_state *dm_state = NULL;
8584 	struct dc *dc = adev->dm.dc;
8585 	struct drm_connector *connector;
8586 	struct drm_connector_state *old_con_state, *new_con_state;
8587 	struct drm_crtc *crtc;
8588 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8589 	struct drm_plane *plane;
8590 	struct drm_plane_state *old_plane_state, *new_plane_state;
8591 	enum dc_status status;
8592 	int ret, i;
8593 	bool lock_and_validation_needed = false;
8594 
8595 	amdgpu_check_debugfs_connector_property_change(adev, state);
8596 
8597 	ret = drm_atomic_helper_check_modeset(dev, state);
8598 	if (ret)
8599 		goto fail;
8600 
8601 	/* Check connector changes */
8602 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8603 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8604 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8605 
8606 		/* Skip connectors that are disabled or part of modeset already. */
8607 		if (!old_con_state->crtc && !new_con_state->crtc)
8608 			continue;
8609 
8610 		if (!new_con_state->crtc)
8611 			continue;
8612 
8613 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
8614 		if (IS_ERR(new_crtc_state)) {
8615 			ret = PTR_ERR(new_crtc_state);
8616 			goto fail;
8617 		}
8618 
8619 		if (dm_old_con_state->abm_level !=
8620 		    dm_new_con_state->abm_level)
8621 			new_crtc_state->connectors_changed = true;
8622 	}
8623 
8624 #if defined(CONFIG_DRM_AMD_DC_DCN)
8625 	if (adev->asic_type >= CHIP_NAVI10) {
8626 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8627 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8628 				ret = add_affected_mst_dsc_crtcs(state, crtc);
8629 				if (ret)
8630 					goto fail;
8631 			}
8632 		}
8633 	}
8634 #endif
8635 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8636 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
8637 		    !new_crtc_state->color_mgmt_changed &&
8638 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
8639 			continue;
8640 
8641 		if (!new_crtc_state->enable)
8642 			continue;
8643 
8644 		ret = drm_atomic_add_affected_connectors(state, crtc);
8645 		if (ret)
8646 			return ret;
8647 
8648 		ret = drm_atomic_add_affected_planes(state, crtc);
8649 		if (ret)
8650 			goto fail;
8651 	}
8652 
8653 	/*
8654 	 * Add all primary and overlay planes on the CRTC to the state
8655 	 * whenever a plane is enabled to maintain correct z-ordering
8656 	 * and to enable fast surface updates.
8657 	 */
8658 	drm_for_each_crtc(crtc, dev) {
8659 		bool modified = false;
8660 
8661 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8662 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8663 				continue;
8664 
8665 			if (new_plane_state->crtc == crtc ||
8666 			    old_plane_state->crtc == crtc) {
8667 				modified = true;
8668 				break;
8669 			}
8670 		}
8671 
8672 		if (!modified)
8673 			continue;
8674 
8675 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
8676 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
8677 				continue;
8678 
8679 			new_plane_state =
8680 				drm_atomic_get_plane_state(state, plane);
8681 
8682 			if (IS_ERR(new_plane_state)) {
8683 				ret = PTR_ERR(new_plane_state);
8684 				goto fail;
8685 			}
8686 		}
8687 	}
8688 
8689 	/* Prepass for updating tiling flags on new planes. */
8690 	for_each_new_plane_in_state(state, plane, new_plane_state, i) {
8691 		struct dm_plane_state *new_dm_plane_state = to_dm_plane_state(new_plane_state);
8692 		struct amdgpu_framebuffer *new_afb = to_amdgpu_framebuffer(new_plane_state->fb);
8693 
8694 		ret = get_fb_info(new_afb, &new_dm_plane_state->tiling_flags,
8695 				  &new_dm_plane_state->tmz_surface);
8696 		if (ret)
8697 			goto fail;
8698 	}
8699 
8700 	/* Remove exiting planes if they are modified */
8701 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8702 		ret = dm_update_plane_state(dc, state, plane,
8703 					    old_plane_state,
8704 					    new_plane_state,
8705 					    false,
8706 					    &lock_and_validation_needed);
8707 		if (ret)
8708 			goto fail;
8709 	}
8710 
8711 	/* Disable all crtcs which require disable */
8712 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8713 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8714 					   old_crtc_state,
8715 					   new_crtc_state,
8716 					   false,
8717 					   &lock_and_validation_needed);
8718 		if (ret)
8719 			goto fail;
8720 	}
8721 
8722 	/* Enable all crtcs which require enable */
8723 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8724 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
8725 					   old_crtc_state,
8726 					   new_crtc_state,
8727 					   true,
8728 					   &lock_and_validation_needed);
8729 		if (ret)
8730 			goto fail;
8731 	}
8732 
8733 	/* Add new/modified planes */
8734 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
8735 		ret = dm_update_plane_state(dc, state, plane,
8736 					    old_plane_state,
8737 					    new_plane_state,
8738 					    true,
8739 					    &lock_and_validation_needed);
8740 		if (ret)
8741 			goto fail;
8742 	}
8743 
8744 	/* Run this here since we want to validate the streams we created */
8745 	ret = drm_atomic_helper_check_planes(dev, state);
8746 	if (ret)
8747 		goto fail;
8748 
8749 	if (state->legacy_cursor_update) {
8750 		/*
8751 		 * This is a fast cursor update coming from the plane update
8752 		 * helper, check if it can be done asynchronously for better
8753 		 * performance.
8754 		 */
8755 		state->async_update =
8756 			!drm_atomic_helper_async_check(dev, state);
8757 
8758 		/*
8759 		 * Skip the remaining global validation if this is an async
8760 		 * update. Cursor updates can be done without affecting
8761 		 * state or bandwidth calcs and this avoids the performance
8762 		 * penalty of locking the private state object and
8763 		 * allocating a new dc_state.
8764 		 */
8765 		if (state->async_update)
8766 			return 0;
8767 	}
8768 
8769 	/* Check scaling and underscan changes*/
8770 	/* TODO Removed scaling changes validation due to inability to commit
8771 	 * new stream into context w\o causing full reset. Need to
8772 	 * decide how to handle.
8773 	 */
8774 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8775 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8776 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8777 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8778 
8779 		/* Skip any modesets/resets */
8780 		if (!acrtc || drm_atomic_crtc_needs_modeset(
8781 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
8782 			continue;
8783 
8784 		/* Skip any thing not scale or underscan changes */
8785 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
8786 			continue;
8787 
8788 		lock_and_validation_needed = true;
8789 	}
8790 
8791 	/**
8792 	 * Streams and planes are reset when there are changes that affect
8793 	 * bandwidth. Anything that affects bandwidth needs to go through
8794 	 * DC global validation to ensure that the configuration can be applied
8795 	 * to hardware.
8796 	 *
8797 	 * We have to currently stall out here in atomic_check for outstanding
8798 	 * commits to finish in this case because our IRQ handlers reference
8799 	 * DRM state directly - we can end up disabling interrupts too early
8800 	 * if we don't.
8801 	 *
8802 	 * TODO: Remove this stall and drop DM state private objects.
8803 	 */
8804 	if (lock_and_validation_needed) {
8805 		ret = dm_atomic_get_state(state, &dm_state);
8806 		if (ret)
8807 			goto fail;
8808 
8809 		ret = do_aquire_global_lock(dev, state);
8810 		if (ret)
8811 			goto fail;
8812 
8813 #if defined(CONFIG_DRM_AMD_DC_DCN)
8814 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
8815 			goto fail;
8816 
8817 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
8818 		if (ret)
8819 			goto fail;
8820 #endif
8821 
8822 		/*
8823 		 * Perform validation of MST topology in the state:
8824 		 * We need to perform MST atomic check before calling
8825 		 * dc_validate_global_state(), or there is a chance
8826 		 * to get stuck in an infinite loop and hang eventually.
8827 		 */
8828 		ret = drm_dp_mst_atomic_check(state);
8829 		if (ret)
8830 			goto fail;
8831 		status = dc_validate_global_state(dc, dm_state->context, false);
8832 		if (status != DC_OK) {
8833 			DC_LOG_WARNING("DC global validation failure: %s (%d)",
8834 				       dc_status_to_str(status), status);
8835 			ret = -EINVAL;
8836 			goto fail;
8837 		}
8838 	} else {
8839 		/*
8840 		 * The commit is a fast update. Fast updates shouldn't change
8841 		 * the DC context, affect global validation, and can have their
8842 		 * commit work done in parallel with other commits not touching
8843 		 * the same resource. If we have a new DC context as part of
8844 		 * the DM atomic state from validation we need to free it and
8845 		 * retain the existing one instead.
8846 		 *
8847 		 * Furthermore, since the DM atomic state only contains the DC
8848 		 * context and can safely be annulled, we can free the state
8849 		 * and clear the associated private object now to free
8850 		 * some memory and avoid a possible use-after-free later.
8851 		 */
8852 
8853 		for (i = 0; i < state->num_private_objs; i++) {
8854 			struct drm_private_obj *obj = state->private_objs[i].ptr;
8855 
8856 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
8857 				int j = state->num_private_objs-1;
8858 
8859 				dm_atomic_destroy_state(obj,
8860 						state->private_objs[i].state);
8861 
8862 				/* If i is not at the end of the array then the
8863 				 * last element needs to be moved to where i was
8864 				 * before the array can safely be truncated.
8865 				 */
8866 				if (i != j)
8867 					state->private_objs[i] =
8868 						state->private_objs[j];
8869 
8870 				state->private_objs[j].ptr = NULL;
8871 				state->private_objs[j].state = NULL;
8872 				state->private_objs[j].old_state = NULL;
8873 				state->private_objs[j].new_state = NULL;
8874 
8875 				state->num_private_objs = j;
8876 				break;
8877 			}
8878 		}
8879 	}
8880 
8881 	/* Store the overall update type for use later in atomic check. */
8882 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
8883 		struct dm_crtc_state *dm_new_crtc_state =
8884 			to_dm_crtc_state(new_crtc_state);
8885 
8886 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
8887 							 UPDATE_TYPE_FULL :
8888 							 UPDATE_TYPE_FAST;
8889 	}
8890 
8891 	/* Must be success */
8892 	WARN_ON(ret);
8893 	return ret;
8894 
8895 fail:
8896 	if (ret == -EDEADLK)
8897 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
8898 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
8899 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
8900 	else
8901 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
8902 
8903 	return ret;
8904 }
8905 
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)8906 static bool is_dp_capable_without_timing_msa(struct dc *dc,
8907 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
8908 {
8909 	uint8_t dpcd_data;
8910 	bool capable = false;
8911 
8912 	if (amdgpu_dm_connector->dc_link &&
8913 		dm_helpers_dp_read_dpcd(
8914 				NULL,
8915 				amdgpu_dm_connector->dc_link,
8916 				DP_DOWN_STREAM_PORT_COUNT,
8917 				&dpcd_data,
8918 				sizeof(dpcd_data))) {
8919 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
8920 	}
8921 
8922 	return capable;
8923 }
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)8924 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
8925 					struct edid *edid)
8926 {
8927 	int i;
8928 	bool edid_check_required;
8929 	struct detailed_timing *timing;
8930 	struct detailed_non_pixel *data;
8931 	struct detailed_data_monitor_range *range;
8932 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8933 			to_amdgpu_dm_connector(connector);
8934 	struct dm_connector_state *dm_con_state = NULL;
8935 
8936 	struct drm_device *dev = connector->dev;
8937 	struct amdgpu_device *adev = drm_to_adev(dev);
8938 	bool freesync_capable = false;
8939 
8940 	if (!connector->state) {
8941 		DRM_ERROR("%s - Connector has no state", __func__);
8942 		goto update;
8943 	}
8944 
8945 	if (!edid) {
8946 		dm_con_state = to_dm_connector_state(connector->state);
8947 
8948 		amdgpu_dm_connector->min_vfreq = 0;
8949 		amdgpu_dm_connector->max_vfreq = 0;
8950 		amdgpu_dm_connector->pixel_clock_mhz = 0;
8951 
8952 		goto update;
8953 	}
8954 
8955 	dm_con_state = to_dm_connector_state(connector->state);
8956 
8957 	edid_check_required = false;
8958 	if (!amdgpu_dm_connector->dc_sink) {
8959 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
8960 		goto update;
8961 	}
8962 	if (!adev->dm.freesync_module)
8963 		goto update;
8964 	/*
8965 	 * if edid non zero restrict freesync only for dp and edp
8966 	 */
8967 	if (edid) {
8968 		if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
8969 			|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
8970 			edid_check_required = is_dp_capable_without_timing_msa(
8971 						adev->dm.dc,
8972 						amdgpu_dm_connector);
8973 		}
8974 	}
8975 	if (edid_check_required == true && (edid->version > 1 ||
8976 	   (edid->version == 1 && edid->revision > 1))) {
8977 		for (i = 0; i < 4; i++) {
8978 
8979 			timing	= &edid->detailed_timings[i];
8980 			data	= &timing->data.other_data;
8981 			range	= &data->data.range;
8982 			/*
8983 			 * Check if monitor has continuous frequency mode
8984 			 */
8985 			if (data->type != EDID_DETAIL_MONITOR_RANGE)
8986 				continue;
8987 			/*
8988 			 * Check for flag range limits only. If flag == 1 then
8989 			 * no additional timing information provided.
8990 			 * Default GTF, GTF Secondary curve and CVT are not
8991 			 * supported
8992 			 */
8993 			if (range->flags != 1)
8994 				continue;
8995 
8996 			amdgpu_dm_connector->min_vfreq = range->min_vfreq;
8997 			amdgpu_dm_connector->max_vfreq = range->max_vfreq;
8998 			amdgpu_dm_connector->pixel_clock_mhz =
8999 				range->pixel_clock_mhz * 10;
9000 			break;
9001 		}
9002 
9003 		if (amdgpu_dm_connector->max_vfreq -
9004 		    amdgpu_dm_connector->min_vfreq > 10) {
9005 
9006 			freesync_capable = true;
9007 		}
9008 	}
9009 
9010 update:
9011 	if (dm_con_state)
9012 		dm_con_state->freesync_capable = freesync_capable;
9013 
9014 	if (connector->vrr_capable_property)
9015 		drm_connector_set_vrr_capable_property(connector,
9016 						       freesync_capable);
9017 }
9018 
amdgpu_dm_set_psr_caps(struct dc_link * link)9019 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9020 {
9021 	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9022 
9023 	if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9024 		return;
9025 	if (link->type == dc_connection_none)
9026 		return;
9027 	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9028 					dpcd_data, sizeof(dpcd_data))) {
9029 		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9030 
9031 		if (dpcd_data[0] == 0) {
9032 			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9033 			link->psr_settings.psr_feature_enabled = false;
9034 		} else {
9035 			link->psr_settings.psr_version = DC_PSR_VERSION_1;
9036 			link->psr_settings.psr_feature_enabled = true;
9037 		}
9038 
9039 		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9040 	}
9041 }
9042 
9043 /*
9044  * amdgpu_dm_link_setup_psr() - configure psr link
9045  * @stream: stream state
9046  *
9047  * Return: true if success
9048  */
amdgpu_dm_link_setup_psr(struct dc_stream_state * stream)9049 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9050 {
9051 	struct dc_link *link = NULL;
9052 	struct psr_config psr_config = {0};
9053 	struct psr_context psr_context = {0};
9054 	bool ret = false;
9055 
9056 	if (stream == NULL)
9057 		return false;
9058 
9059 	link = stream->link;
9060 
9061 	psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9062 
9063 	if (psr_config.psr_version > 0) {
9064 		psr_config.psr_exit_link_training_required = 0x1;
9065 		psr_config.psr_frame_capture_indication_req = 0;
9066 		psr_config.psr_rfb_setup_time = 0x37;
9067 		psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9068 		psr_config.allow_smu_optimizations = 0x0;
9069 
9070 		ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9071 
9072 	}
9073 	DRM_DEBUG_DRIVER("PSR link: %d\n",	link->psr_settings.psr_feature_enabled);
9074 
9075 	return ret;
9076 }
9077 
9078 /*
9079  * amdgpu_dm_psr_enable() - enable psr f/w
9080  * @stream: stream state
9081  *
9082  * Return: true if success
9083  */
amdgpu_dm_psr_enable(struct dc_stream_state * stream)9084 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9085 {
9086 	struct dc_link *link = stream->link;
9087 	unsigned int vsync_rate_hz = 0;
9088 	struct dc_static_screen_params params = {0};
9089 	/* Calculate number of static frames before generating interrupt to
9090 	 * enter PSR.
9091 	 */
9092 	// Init fail safe of 2 frames static
9093 	unsigned int num_frames_static = 2;
9094 
9095 	DRM_DEBUG_DRIVER("Enabling psr...\n");
9096 
9097 	vsync_rate_hz = div64_u64(div64_u64((
9098 			stream->timing.pix_clk_100hz * 100),
9099 			stream->timing.v_total),
9100 			stream->timing.h_total);
9101 
9102 	/* Round up
9103 	 * Calculate number of frames such that at least 30 ms of time has
9104 	 * passed.
9105 	 */
9106 	if (vsync_rate_hz != 0) {
9107 		unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9108 		num_frames_static = (30000 / frame_time_microsec) + 1;
9109 	}
9110 
9111 	params.triggers.cursor_update = true;
9112 	params.triggers.overlay_update = true;
9113 	params.triggers.surface_update = true;
9114 	params.num_frames = num_frames_static;
9115 
9116 	dc_stream_set_static_screen_params(link->ctx->dc,
9117 					   &stream, 1,
9118 					   &params);
9119 
9120 	return dc_link_set_psr_allow_active(link, true, false);
9121 }
9122 
9123 /*
9124  * amdgpu_dm_psr_disable() - disable psr f/w
9125  * @stream:  stream state
9126  *
9127  * Return: true if success
9128  */
amdgpu_dm_psr_disable(struct dc_stream_state * stream)9129 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9130 {
9131 
9132 	DRM_DEBUG_DRIVER("Disabling psr...\n");
9133 
9134 	return dc_link_set_psr_allow_active(stream->link, false, true);
9135 }
9136 
9137 /*
9138  * amdgpu_dm_psr_disable() - disable psr f/w
9139  * if psr is enabled on any stream
9140  *
9141  * Return: true if success
9142  */
amdgpu_dm_psr_disable_all(struct amdgpu_display_manager * dm)9143 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9144 {
9145 	DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9146 	return dc_set_psr_allow_active(dm->dc, false);
9147 }
9148 
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)9149 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9150 {
9151 	struct amdgpu_device *adev = drm_to_adev(dev);
9152 	struct dc *dc = adev->dm.dc;
9153 	int i;
9154 
9155 	mutex_lock(&adev->dm.dc_lock);
9156 	if (dc->current_state) {
9157 		for (i = 0; i < dc->current_state->stream_count; ++i)
9158 			dc->current_state->streams[i]
9159 				->triggered_crtc_reset.enabled =
9160 				adev->dm.force_timing_sync;
9161 
9162 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
9163 		dc_trigger_sync(dc, dc->current_state);
9164 	}
9165 	mutex_unlock(&adev->dm.dc_lock);
9166 }
9167