1  /*
2   * Copyright 2015 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   * Authors: AMD
23   *
24   */
25  
26  /* The caprices of the preprocessor require that this be declared right here */
27  #define CREATE_TRACE_POINTS
28  
29  #include "dm_services_types.h"
30  #include "dc.h"
31  #include "link_enc_cfg.h"
32  #include "dc/inc/core_types.h"
33  #include "dal_asic_id.h"
34  #include "dmub/dmub_srv.h"
35  #include "dc/inc/hw/dmcu.h"
36  #include "dc/inc/hw/abm.h"
37  #include "dc/dc_dmub_srv.h"
38  #include "dc/dc_edid_parser.h"
39  #include "dc/dc_stat.h"
40  #include "amdgpu_dm_trace.h"
41  #include "dpcd_defs.h"
42  #include "link/protocols/link_dpcd.h"
43  #include "link_service_types.h"
44  #include "link/protocols/link_dp_capability.h"
45  #include "link/protocols/link_ddc.h"
46  
47  #include "vid.h"
48  #include "amdgpu.h"
49  #include "amdgpu_display.h"
50  #include "amdgpu_ucode.h"
51  #include "atom.h"
52  #include "amdgpu_dm.h"
53  #include "amdgpu_dm_plane.h"
54  #include "amdgpu_dm_crtc.h"
55  #include "amdgpu_dm_hdcp.h"
56  #include <drm/display/drm_hdcp_helper.h>
57  #include "amdgpu_pm.h"
58  #include "amdgpu_atombios.h"
59  
60  #include "amd_shared.h"
61  #include "amdgpu_dm_irq.h"
62  #include "dm_helpers.h"
63  #include "amdgpu_dm_mst_types.h"
64  #if defined(CONFIG_DEBUG_FS)
65  #include "amdgpu_dm_debugfs.h"
66  #endif
67  #include "amdgpu_dm_psr.h"
68  #include "amdgpu_dm_replay.h"
69  
70  #include "ivsrcid/ivsrcid_vislands30.h"
71  
72  #include <linux/backlight.h>
73  #include <linux/module.h>
74  #include <linux/moduleparam.h>
75  #include <linux/types.h>
76  #include <linux/pm_runtime.h>
77  #include <linux/pci.h>
78  #include <linux/firmware.h>
79  #include <linux/component.h>
80  #include <linux/dmi.h>
81  
82  #include <drm/display/drm_dp_mst_helper.h>
83  #include <drm/display/drm_hdmi_helper.h>
84  #include <drm/drm_atomic.h>
85  #include <drm/drm_atomic_uapi.h>
86  #include <drm/drm_atomic_helper.h>
87  #include <drm/drm_blend.h>
88  #include <drm/drm_fourcc.h>
89  #include <drm/drm_edid.h>
90  #include <drm/drm_vblank.h>
91  #include <drm/drm_audio_component.h>
92  #include <drm/drm_gem_atomic_helper.h>
93  #include <drm/drm_plane_helper.h>
94  
95  #include <acpi/video.h>
96  
97  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
98  
99  #include "dcn/dcn_1_0_offset.h"
100  #include "dcn/dcn_1_0_sh_mask.h"
101  #include "soc15_hw_ip.h"
102  #include "soc15_common.h"
103  #include "vega10_ip_offset.h"
104  
105  #include "gc/gc_11_0_0_offset.h"
106  #include "gc/gc_11_0_0_sh_mask.h"
107  
108  #include "modules/inc/mod_freesync.h"
109  #include "modules/power/power_helpers.h"
110  
111  #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
112  MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
113  #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
114  MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
115  #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
116  MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
117  #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
118  MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
119  #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
120  MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
121  #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
122  MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
123  #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
124  MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
125  #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
126  MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
127  #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
128  MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
129  #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
130  MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
131  #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
132  MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
133  
134  #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
135  MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
136  #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
137  MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
138  
139  #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
140  MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
141  
142  #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
143  MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
144  
145  /* Number of bytes in PSP header for firmware. */
146  #define PSP_HEADER_BYTES 0x100
147  
148  /* Number of bytes in PSP footer for firmware. */
149  #define PSP_FOOTER_BYTES 0x100
150  
151  /**
152   * DOC: overview
153   *
154   * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
155   * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
156   * requests into DC requests, and DC responses into DRM responses.
157   *
158   * The root control structure is &struct amdgpu_display_manager.
159   */
160  
161  /* basic init/fini API */
162  static int amdgpu_dm_init(struct amdgpu_device *adev);
163  static void amdgpu_dm_fini(struct amdgpu_device *adev);
164  static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
165  
get_subconnector_type(struct dc_link * link)166  static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
167  {
168  	switch (link->dpcd_caps.dongle_type) {
169  	case DISPLAY_DONGLE_NONE:
170  		return DRM_MODE_SUBCONNECTOR_Native;
171  	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
172  		return DRM_MODE_SUBCONNECTOR_VGA;
173  	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
174  	case DISPLAY_DONGLE_DP_DVI_DONGLE:
175  		return DRM_MODE_SUBCONNECTOR_DVID;
176  	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
177  	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
178  		return DRM_MODE_SUBCONNECTOR_HDMIA;
179  	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
180  	default:
181  		return DRM_MODE_SUBCONNECTOR_Unknown;
182  	}
183  }
184  
update_subconnector_property(struct amdgpu_dm_connector * aconnector)185  static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
186  {
187  	struct dc_link *link = aconnector->dc_link;
188  	struct drm_connector *connector = &aconnector->base;
189  	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
190  
191  	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
192  		return;
193  
194  	if (aconnector->dc_sink)
195  		subconnector = get_subconnector_type(link);
196  
197  	drm_object_property_set_value(&connector->base,
198  			connector->dev->mode_config.dp_subconnector_property,
199  			subconnector);
200  }
201  
202  /*
203   * initializes drm_device display related structures, based on the information
204   * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
205   * drm_encoder, drm_mode_config
206   *
207   * Returns 0 on success
208   */
209  static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
210  /* removes and deallocates the drm structures, created by the above function */
211  static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
212  
213  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
214  				    struct amdgpu_dm_connector *amdgpu_dm_connector,
215  				    u32 link_index,
216  				    struct amdgpu_encoder *amdgpu_encoder);
217  static int amdgpu_dm_encoder_init(struct drm_device *dev,
218  				  struct amdgpu_encoder *aencoder,
219  				  uint32_t link_index);
220  
221  static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
222  
223  static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
224  
225  static int amdgpu_dm_atomic_check(struct drm_device *dev,
226  				  struct drm_atomic_state *state);
227  
228  static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
229  static void handle_hpd_rx_irq(void *param);
230  
231  static bool
232  is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
233  				 struct drm_crtc_state *new_crtc_state);
234  /*
235   * dm_vblank_get_counter
236   *
237   * @brief
238   * Get counter for number of vertical blanks
239   *
240   * @param
241   * struct amdgpu_device *adev - [in] desired amdgpu device
242   * int disp_idx - [in] which CRTC to get the counter from
243   *
244   * @return
245   * Counter for vertical blanks
246   */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)247  static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
248  {
249  	struct amdgpu_crtc *acrtc = NULL;
250  
251  	if (crtc >= adev->mode_info.num_crtc)
252  		return 0;
253  
254  	acrtc = adev->mode_info.crtcs[crtc];
255  
256  	if (!acrtc->dm_irq_params.stream) {
257  		DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
258  			  crtc);
259  		return 0;
260  	}
261  
262  	return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
263  }
264  
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)265  static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
266  				  u32 *vbl, u32 *position)
267  {
268  	u32 v_blank_start, v_blank_end, h_position, v_position;
269  	struct amdgpu_crtc *acrtc = NULL;
270  
271  	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
272  		return -EINVAL;
273  
274  	acrtc = adev->mode_info.crtcs[crtc];
275  
276  	if (!acrtc->dm_irq_params.stream) {
277  		DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
278  			  crtc);
279  		return 0;
280  	}
281  
282  	/*
283  	 * TODO rework base driver to use values directly.
284  	 * for now parse it back into reg-format
285  	 */
286  	dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
287  				 &v_blank_start,
288  				 &v_blank_end,
289  				 &h_position,
290  				 &v_position);
291  
292  	*position = v_position | (h_position << 16);
293  	*vbl = v_blank_start | (v_blank_end << 16);
294  
295  	return 0;
296  }
297  
dm_is_idle(void * handle)298  static bool dm_is_idle(void *handle)
299  {
300  	/* XXX todo */
301  	return true;
302  }
303  
dm_wait_for_idle(void * handle)304  static int dm_wait_for_idle(void *handle)
305  {
306  	/* XXX todo */
307  	return 0;
308  }
309  
dm_check_soft_reset(void * handle)310  static bool dm_check_soft_reset(void *handle)
311  {
312  	return false;
313  }
314  
dm_soft_reset(void * handle)315  static int dm_soft_reset(void *handle)
316  {
317  	/* XXX todo */
318  	return 0;
319  }
320  
321  static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)322  get_crtc_by_otg_inst(struct amdgpu_device *adev,
323  		     int otg_inst)
324  {
325  	struct drm_device *dev = adev_to_drm(adev);
326  	struct drm_crtc *crtc;
327  	struct amdgpu_crtc *amdgpu_crtc;
328  
329  	if (WARN_ON(otg_inst == -1))
330  		return adev->mode_info.crtcs[0];
331  
332  	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
333  		amdgpu_crtc = to_amdgpu_crtc(crtc);
334  
335  		if (amdgpu_crtc->otg_inst == otg_inst)
336  			return amdgpu_crtc;
337  	}
338  
339  	return NULL;
340  }
341  
is_dc_timing_adjust_needed(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)342  static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
343  					      struct dm_crtc_state *new_state)
344  {
345  	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
346  		return true;
347  	else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
348  		return true;
349  	else
350  		return false;
351  }
352  
reverse_planes_order(struct dc_surface_update * array_of_surface_update,int planes_count)353  static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
354  					int planes_count)
355  {
356  	int i, j;
357  
358  	for (i = 0, j = planes_count - 1; i < j; i++, j--)
359  		swap(array_of_surface_update[i], array_of_surface_update[j]);
360  }
361  
362  /**
363   * update_planes_and_stream_adapter() - Send planes to be updated in DC
364   *
365   * DC has a generic way to update planes and stream via
366   * dc_update_planes_and_stream function; however, DM might need some
367   * adjustments and preparation before calling it. This function is a wrapper
368   * for the dc_update_planes_and_stream that does any required configuration
369   * before passing control to DC.
370   *
371   * @dc: Display Core control structure
372   * @update_type: specify whether it is FULL/MEDIUM/FAST update
373   * @planes_count: planes count to update
374   * @stream: stream state
375   * @stream_update: stream update
376   * @array_of_surface_update: dc surface update pointer
377   *
378   */
update_planes_and_stream_adapter(struct dc * dc,int update_type,int planes_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_surface_update * array_of_surface_update)379  static inline bool update_planes_and_stream_adapter(struct dc *dc,
380  						    int update_type,
381  						    int planes_count,
382  						    struct dc_stream_state *stream,
383  						    struct dc_stream_update *stream_update,
384  						    struct dc_surface_update *array_of_surface_update)
385  {
386  	reverse_planes_order(array_of_surface_update, planes_count);
387  
388  	/*
389  	 * Previous frame finished and HW is ready for optimization.
390  	 */
391  	if (update_type == UPDATE_TYPE_FAST)
392  		dc_post_update_surfaces_to_stream(dc);
393  
394  	return dc_update_planes_and_stream(dc,
395  					   array_of_surface_update,
396  					   planes_count,
397  					   stream,
398  					   stream_update);
399  }
400  
401  /**
402   * dm_pflip_high_irq() - Handle pageflip interrupt
403   * @interrupt_params: ignored
404   *
405   * Handles the pageflip interrupt by notifying all interested parties
406   * that the pageflip has been completed.
407   */
dm_pflip_high_irq(void * interrupt_params)408  static void dm_pflip_high_irq(void *interrupt_params)
409  {
410  	struct amdgpu_crtc *amdgpu_crtc;
411  	struct common_irq_params *irq_params = interrupt_params;
412  	struct amdgpu_device *adev = irq_params->adev;
413  	unsigned long flags;
414  	struct drm_pending_vblank_event *e;
415  	u32 vpos, hpos, v_blank_start, v_blank_end;
416  	bool vrr_active;
417  
418  	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
419  
420  	/* IRQ could occur when in initial stage */
421  	/* TODO work and BO cleanup */
422  	if (amdgpu_crtc == NULL) {
423  		DC_LOG_PFLIP("CRTC is null, returning.\n");
424  		return;
425  	}
426  
427  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
428  
429  	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
430  		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
431  			     amdgpu_crtc->pflip_status,
432  			     AMDGPU_FLIP_SUBMITTED,
433  			     amdgpu_crtc->crtc_id,
434  			     amdgpu_crtc);
435  		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
436  		return;
437  	}
438  
439  	/* page flip completed. */
440  	e = amdgpu_crtc->event;
441  	amdgpu_crtc->event = NULL;
442  
443  	WARN_ON(!e);
444  
445  	vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
446  
447  	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
448  	if (!vrr_active ||
449  	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
450  				      &v_blank_end, &hpos, &vpos) ||
451  	    (vpos < v_blank_start)) {
452  		/* Update to correct count and vblank timestamp if racing with
453  		 * vblank irq. This also updates to the correct vblank timestamp
454  		 * even in VRR mode, as scanout is past the front-porch atm.
455  		 */
456  		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
457  
458  		/* Wake up userspace by sending the pageflip event with proper
459  		 * count and timestamp of vblank of flip completion.
460  		 */
461  		if (e) {
462  			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
463  
464  			/* Event sent, so done with vblank for this flip */
465  			drm_crtc_vblank_put(&amdgpu_crtc->base);
466  		}
467  	} else if (e) {
468  		/* VRR active and inside front-porch: vblank count and
469  		 * timestamp for pageflip event will only be up to date after
470  		 * drm_crtc_handle_vblank() has been executed from late vblank
471  		 * irq handler after start of back-porch (vline 0). We queue the
472  		 * pageflip event for send-out by drm_crtc_handle_vblank() with
473  		 * updated timestamp and count, once it runs after us.
474  		 *
475  		 * We need to open-code this instead of using the helper
476  		 * drm_crtc_arm_vblank_event(), as that helper would
477  		 * call drm_crtc_accurate_vblank_count(), which we must
478  		 * not call in VRR mode while we are in front-porch!
479  		 */
480  
481  		/* sequence will be replaced by real count during send-out. */
482  		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
483  		e->pipe = amdgpu_crtc->crtc_id;
484  
485  		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
486  		e = NULL;
487  	}
488  
489  	/* Keep track of vblank of this flip for flip throttling. We use the
490  	 * cooked hw counter, as that one incremented at start of this vblank
491  	 * of pageflip completion, so last_flip_vblank is the forbidden count
492  	 * for queueing new pageflips if vsync + VRR is enabled.
493  	 */
494  	amdgpu_crtc->dm_irq_params.last_flip_vblank =
495  		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
496  
497  	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
498  	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
499  
500  	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
501  		     amdgpu_crtc->crtc_id, amdgpu_crtc,
502  		     vrr_active, (int) !e);
503  }
504  
dm_vupdate_high_irq(void * interrupt_params)505  static void dm_vupdate_high_irq(void *interrupt_params)
506  {
507  	struct common_irq_params *irq_params = interrupt_params;
508  	struct amdgpu_device *adev = irq_params->adev;
509  	struct amdgpu_crtc *acrtc;
510  	struct drm_device *drm_dev;
511  	struct drm_vblank_crtc *vblank;
512  	ktime_t frame_duration_ns, previous_timestamp;
513  	unsigned long flags;
514  	int vrr_active;
515  
516  	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
517  
518  	if (acrtc) {
519  		vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
520  		drm_dev = acrtc->base.dev;
521  		vblank = &drm_dev->vblank[acrtc->base.index];
522  		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
523  		frame_duration_ns = vblank->time - previous_timestamp;
524  
525  		if (frame_duration_ns > 0) {
526  			trace_amdgpu_refresh_rate_track(acrtc->base.index,
527  						frame_duration_ns,
528  						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
529  			atomic64_set(&irq_params->previous_timestamp, vblank->time);
530  		}
531  
532  		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
533  			      acrtc->crtc_id,
534  			      vrr_active);
535  
536  		/* Core vblank handling is done here after end of front-porch in
537  		 * vrr mode, as vblank timestamping will give valid results
538  		 * while now done after front-porch. This will also deliver
539  		 * page-flip completion events that have been queued to us
540  		 * if a pageflip happened inside front-porch.
541  		 */
542  		if (vrr_active) {
543  			amdgpu_dm_crtc_handle_vblank(acrtc);
544  
545  			/* BTR processing for pre-DCE12 ASICs */
546  			if (acrtc->dm_irq_params.stream &&
547  			    adev->family < AMDGPU_FAMILY_AI) {
548  				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
549  				mod_freesync_handle_v_update(
550  				    adev->dm.freesync_module,
551  				    acrtc->dm_irq_params.stream,
552  				    &acrtc->dm_irq_params.vrr_params);
553  
554  				dc_stream_adjust_vmin_vmax(
555  				    adev->dm.dc,
556  				    acrtc->dm_irq_params.stream,
557  				    &acrtc->dm_irq_params.vrr_params.adjust);
558  				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
559  			}
560  		}
561  	}
562  }
563  
564  /**
565   * dm_crtc_high_irq() - Handles CRTC interrupt
566   * @interrupt_params: used for determining the CRTC instance
567   *
568   * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
569   * event handler.
570   */
dm_crtc_high_irq(void * interrupt_params)571  static void dm_crtc_high_irq(void *interrupt_params)
572  {
573  	struct common_irq_params *irq_params = interrupt_params;
574  	struct amdgpu_device *adev = irq_params->adev;
575  	struct amdgpu_crtc *acrtc;
576  	unsigned long flags;
577  	int vrr_active;
578  
579  	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
580  	if (!acrtc)
581  		return;
582  
583  	vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
584  
585  	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
586  		      vrr_active, acrtc->dm_irq_params.active_planes);
587  
588  	/**
589  	 * Core vblank handling at start of front-porch is only possible
590  	 * in non-vrr mode, as only there vblank timestamping will give
591  	 * valid results while done in front-porch. Otherwise defer it
592  	 * to dm_vupdate_high_irq after end of front-porch.
593  	 */
594  	if (!vrr_active)
595  		amdgpu_dm_crtc_handle_vblank(acrtc);
596  
597  	/**
598  	 * Following stuff must happen at start of vblank, for crc
599  	 * computation and below-the-range btr support in vrr mode.
600  	 */
601  	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
602  
603  	/* BTR updates need to happen before VUPDATE on Vega and above. */
604  	if (adev->family < AMDGPU_FAMILY_AI)
605  		return;
606  
607  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
608  
609  	if (acrtc->dm_irq_params.stream &&
610  	    acrtc->dm_irq_params.vrr_params.supported &&
611  	    acrtc->dm_irq_params.freesync_config.state ==
612  		    VRR_STATE_ACTIVE_VARIABLE) {
613  		mod_freesync_handle_v_update(adev->dm.freesync_module,
614  					     acrtc->dm_irq_params.stream,
615  					     &acrtc->dm_irq_params.vrr_params);
616  
617  		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
618  					   &acrtc->dm_irq_params.vrr_params.adjust);
619  	}
620  
621  	/*
622  	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
623  	 * In that case, pageflip completion interrupts won't fire and pageflip
624  	 * completion events won't get delivered. Prevent this by sending
625  	 * pending pageflip events from here if a flip is still pending.
626  	 *
627  	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
628  	 * avoid race conditions between flip programming and completion,
629  	 * which could cause too early flip completion events.
630  	 */
631  	if (adev->family >= AMDGPU_FAMILY_RV &&
632  	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
633  	    acrtc->dm_irq_params.active_planes == 0) {
634  		if (acrtc->event) {
635  			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
636  			acrtc->event = NULL;
637  			drm_crtc_vblank_put(&acrtc->base);
638  		}
639  		acrtc->pflip_status = AMDGPU_FLIP_NONE;
640  	}
641  
642  	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
643  }
644  
645  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
646  /**
647   * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
648   * DCN generation ASICs
649   * @interrupt_params: interrupt parameters
650   *
651   * Used to set crc window/read out crc value at vertical line 0 position
652   */
dm_dcn_vertical_interrupt0_high_irq(void * interrupt_params)653  static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
654  {
655  	struct common_irq_params *irq_params = interrupt_params;
656  	struct amdgpu_device *adev = irq_params->adev;
657  	struct amdgpu_crtc *acrtc;
658  
659  	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
660  
661  	if (!acrtc)
662  		return;
663  
664  	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
665  }
666  #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
667  
668  /**
669   * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
670   * @adev: amdgpu_device pointer
671   * @notify: dmub notification structure
672   *
673   * Dmub AUX or SET_CONFIG command completion processing callback
674   * Copies dmub notification to DM which is to be read by AUX command.
675   * issuing thread and also signals the event to wake up the thread.
676   */
dmub_aux_setconfig_callback(struct amdgpu_device * adev,struct dmub_notification * notify)677  static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
678  					struct dmub_notification *notify)
679  {
680  	if (adev->dm.dmub_notify)
681  		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
682  	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
683  		complete(&adev->dm.dmub_aux_transfer_done);
684  }
685  
686  /**
687   * dmub_hpd_callback - DMUB HPD interrupt processing callback.
688   * @adev: amdgpu_device pointer
689   * @notify: dmub notification structure
690   *
691   * Dmub Hpd interrupt processing callback. Gets displayindex through the
692   * ink index and calls helper to do the processing.
693   */
dmub_hpd_callback(struct amdgpu_device * adev,struct dmub_notification * notify)694  static void dmub_hpd_callback(struct amdgpu_device *adev,
695  			      struct dmub_notification *notify)
696  {
697  	struct amdgpu_dm_connector *aconnector;
698  	struct amdgpu_dm_connector *hpd_aconnector = NULL;
699  	struct drm_connector *connector;
700  	struct drm_connector_list_iter iter;
701  	struct dc_link *link;
702  	u8 link_index = 0;
703  	struct drm_device *dev;
704  
705  	if (adev == NULL)
706  		return;
707  
708  	if (notify == NULL) {
709  		DRM_ERROR("DMUB HPD callback notification was NULL");
710  		return;
711  	}
712  
713  	if (notify->link_index > adev->dm.dc->link_count) {
714  		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
715  		return;
716  	}
717  
718  	link_index = notify->link_index;
719  	link = adev->dm.dc->links[link_index];
720  	dev = adev->dm.ddev;
721  
722  	drm_connector_list_iter_begin(dev, &iter);
723  	drm_for_each_connector_iter(connector, &iter) {
724  		aconnector = to_amdgpu_dm_connector(connector);
725  		if (link && aconnector->dc_link == link) {
726  			if (notify->type == DMUB_NOTIFICATION_HPD)
727  				DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
728  			else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
729  				DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
730  			else
731  				DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
732  						notify->type, link_index);
733  
734  			hpd_aconnector = aconnector;
735  			break;
736  		}
737  	}
738  	drm_connector_list_iter_end(&iter);
739  
740  	if (hpd_aconnector) {
741  		if (notify->type == DMUB_NOTIFICATION_HPD)
742  			handle_hpd_irq_helper(hpd_aconnector);
743  		else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
744  			handle_hpd_rx_irq(hpd_aconnector);
745  	}
746  }
747  
748  /**
749   * register_dmub_notify_callback - Sets callback for DMUB notify
750   * @adev: amdgpu_device pointer
751   * @type: Type of dmub notification
752   * @callback: Dmub interrupt callback function
753   * @dmub_int_thread_offload: offload indicator
754   *
755   * API to register a dmub callback handler for a dmub notification
756   * Also sets indicator whether callback processing to be offloaded.
757   * to dmub interrupt handling thread
758   * Return: true if successfully registered, false if there is existing registration
759   */
register_dmub_notify_callback(struct amdgpu_device * adev,enum dmub_notification_type type,dmub_notify_interrupt_callback_t callback,bool dmub_int_thread_offload)760  static bool register_dmub_notify_callback(struct amdgpu_device *adev,
761  					  enum dmub_notification_type type,
762  					  dmub_notify_interrupt_callback_t callback,
763  					  bool dmub_int_thread_offload)
764  {
765  	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
766  		adev->dm.dmub_callback[type] = callback;
767  		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
768  	} else
769  		return false;
770  
771  	return true;
772  }
773  
dm_handle_hpd_work(struct work_struct * work)774  static void dm_handle_hpd_work(struct work_struct *work)
775  {
776  	struct dmub_hpd_work *dmub_hpd_wrk;
777  
778  	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
779  
780  	if (!dmub_hpd_wrk->dmub_notify) {
781  		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
782  		return;
783  	}
784  
785  	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
786  		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
787  		dmub_hpd_wrk->dmub_notify);
788  	}
789  
790  	kfree(dmub_hpd_wrk->dmub_notify);
791  	kfree(dmub_hpd_wrk);
792  
793  }
794  
795  #define DMUB_TRACE_MAX_READ 64
796  /**
797   * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
798   * @interrupt_params: used for determining the Outbox instance
799   *
800   * Handles the Outbox Interrupt
801   * event handler.
802   */
dm_dmub_outbox1_low_irq(void * interrupt_params)803  static void dm_dmub_outbox1_low_irq(void *interrupt_params)
804  {
805  	struct dmub_notification notify;
806  	struct common_irq_params *irq_params = interrupt_params;
807  	struct amdgpu_device *adev = irq_params->adev;
808  	struct amdgpu_display_manager *dm = &adev->dm;
809  	struct dmcub_trace_buf_entry entry = { 0 };
810  	u32 count = 0;
811  	struct dmub_hpd_work *dmub_hpd_wrk;
812  	struct dc_link *plink = NULL;
813  
814  	if (dc_enable_dmub_notifications(adev->dm.dc) &&
815  		irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
816  
817  		do {
818  			dc_stat_get_dmub_notification(adev->dm.dc, &notify);
819  			if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
820  				DRM_ERROR("DM: notify type %d invalid!", notify.type);
821  				continue;
822  			}
823  			if (!dm->dmub_callback[notify.type]) {
824  				DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
825  				continue;
826  			}
827  			if (dm->dmub_thread_offload[notify.type] == true) {
828  				dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
829  				if (!dmub_hpd_wrk) {
830  					DRM_ERROR("Failed to allocate dmub_hpd_wrk");
831  					return;
832  				}
833  				dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
834  								    GFP_ATOMIC);
835  				if (!dmub_hpd_wrk->dmub_notify) {
836  					kfree(dmub_hpd_wrk);
837  					DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
838  					return;
839  				}
840  				INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
841  				dmub_hpd_wrk->adev = adev;
842  				if (notify.type == DMUB_NOTIFICATION_HPD) {
843  					plink = adev->dm.dc->links[notify.link_index];
844  					if (plink) {
845  						plink->hpd_status =
846  							notify.hpd_status == DP_HPD_PLUG;
847  					}
848  				}
849  				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
850  			} else {
851  				dm->dmub_callback[notify.type](adev, &notify);
852  			}
853  		} while (notify.pending_notification);
854  	}
855  
856  
857  	do {
858  		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
859  			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
860  							entry.param0, entry.param1);
861  
862  			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
863  				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
864  		} else
865  			break;
866  
867  		count++;
868  
869  	} while (count <= DMUB_TRACE_MAX_READ);
870  
871  	if (count > DMUB_TRACE_MAX_READ)
872  		DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
873  }
874  
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)875  static int dm_set_clockgating_state(void *handle,
876  		  enum amd_clockgating_state state)
877  {
878  	return 0;
879  }
880  
dm_set_powergating_state(void * handle,enum amd_powergating_state state)881  static int dm_set_powergating_state(void *handle,
882  		  enum amd_powergating_state state)
883  {
884  	return 0;
885  }
886  
887  /* Prototypes of private functions */
888  static int dm_early_init(void *handle);
889  
890  /* Allocate memory for FBC compressed data  */
amdgpu_dm_fbc_init(struct drm_connector * connector)891  static void amdgpu_dm_fbc_init(struct drm_connector *connector)
892  {
893  	struct drm_device *dev = connector->dev;
894  	struct amdgpu_device *adev = drm_to_adev(dev);
895  	struct dm_compressor_info *compressor = &adev->dm.compressor;
896  	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
897  	struct drm_display_mode *mode;
898  	unsigned long max_size = 0;
899  
900  	if (adev->dm.dc->fbc_compressor == NULL)
901  		return;
902  
903  	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
904  		return;
905  
906  	if (compressor->bo_ptr)
907  		return;
908  
909  
910  	list_for_each_entry(mode, &connector->modes, head) {
911  		if (max_size < mode->htotal * mode->vtotal)
912  			max_size = mode->htotal * mode->vtotal;
913  	}
914  
915  	if (max_size) {
916  		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
917  			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
918  			    &compressor->gpu_addr, &compressor->cpu_addr);
919  
920  		if (r)
921  			DRM_ERROR("DM: Failed to initialize FBC\n");
922  		else {
923  			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
924  			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
925  		}
926  
927  	}
928  
929  }
930  
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)931  static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
932  					  int pipe, bool *enabled,
933  					  unsigned char *buf, int max_bytes)
934  {
935  	struct drm_device *dev = dev_get_drvdata(kdev);
936  	struct amdgpu_device *adev = drm_to_adev(dev);
937  	struct drm_connector *connector;
938  	struct drm_connector_list_iter conn_iter;
939  	struct amdgpu_dm_connector *aconnector;
940  	int ret = 0;
941  
942  	*enabled = false;
943  
944  	mutex_lock(&adev->dm.audio_lock);
945  
946  	drm_connector_list_iter_begin(dev, &conn_iter);
947  	drm_for_each_connector_iter(connector, &conn_iter) {
948  		aconnector = to_amdgpu_dm_connector(connector);
949  		if (aconnector->audio_inst != port)
950  			continue;
951  
952  		*enabled = true;
953  		ret = drm_eld_size(connector->eld);
954  		memcpy(buf, connector->eld, min(max_bytes, ret));
955  
956  		break;
957  	}
958  	drm_connector_list_iter_end(&conn_iter);
959  
960  	mutex_unlock(&adev->dm.audio_lock);
961  
962  	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
963  
964  	return ret;
965  }
966  
967  static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
968  	.get_eld = amdgpu_dm_audio_component_get_eld,
969  };
970  
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)971  static int amdgpu_dm_audio_component_bind(struct device *kdev,
972  				       struct device *hda_kdev, void *data)
973  {
974  	struct drm_device *dev = dev_get_drvdata(kdev);
975  	struct amdgpu_device *adev = drm_to_adev(dev);
976  	struct drm_audio_component *acomp = data;
977  
978  	acomp->ops = &amdgpu_dm_audio_component_ops;
979  	acomp->dev = kdev;
980  	adev->dm.audio_component = acomp;
981  
982  	return 0;
983  }
984  
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)985  static void amdgpu_dm_audio_component_unbind(struct device *kdev,
986  					  struct device *hda_kdev, void *data)
987  {
988  	struct drm_device *dev = dev_get_drvdata(kdev);
989  	struct amdgpu_device *adev = drm_to_adev(dev);
990  	struct drm_audio_component *acomp = data;
991  
992  	acomp->ops = NULL;
993  	acomp->dev = NULL;
994  	adev->dm.audio_component = NULL;
995  }
996  
997  static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
998  	.bind	= amdgpu_dm_audio_component_bind,
999  	.unbind	= amdgpu_dm_audio_component_unbind,
1000  };
1001  
amdgpu_dm_audio_init(struct amdgpu_device * adev)1002  static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
1003  {
1004  	int i, ret;
1005  
1006  	if (!amdgpu_audio)
1007  		return 0;
1008  
1009  	adev->mode_info.audio.enabled = true;
1010  
1011  	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1012  
1013  	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1014  		adev->mode_info.audio.pin[i].channels = -1;
1015  		adev->mode_info.audio.pin[i].rate = -1;
1016  		adev->mode_info.audio.pin[i].bits_per_sample = -1;
1017  		adev->mode_info.audio.pin[i].status_bits = 0;
1018  		adev->mode_info.audio.pin[i].category_code = 0;
1019  		adev->mode_info.audio.pin[i].connected = false;
1020  		adev->mode_info.audio.pin[i].id =
1021  			adev->dm.dc->res_pool->audios[i]->inst;
1022  		adev->mode_info.audio.pin[i].offset = 0;
1023  	}
1024  
1025  	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1026  	if (ret < 0)
1027  		return ret;
1028  
1029  	adev->dm.audio_registered = true;
1030  
1031  	return 0;
1032  }
1033  
amdgpu_dm_audio_fini(struct amdgpu_device * adev)1034  static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1035  {
1036  	if (!amdgpu_audio)
1037  		return;
1038  
1039  	if (!adev->mode_info.audio.enabled)
1040  		return;
1041  
1042  	if (adev->dm.audio_registered) {
1043  		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1044  		adev->dm.audio_registered = false;
1045  	}
1046  
1047  	/* TODO: Disable audio? */
1048  
1049  	adev->mode_info.audio.enabled = false;
1050  }
1051  
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)1052  static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1053  {
1054  	struct drm_audio_component *acomp = adev->dm.audio_component;
1055  
1056  	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1057  		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1058  
1059  		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1060  						 pin, -1);
1061  	}
1062  }
1063  
dm_dmub_hw_init(struct amdgpu_device * adev)1064  static int dm_dmub_hw_init(struct amdgpu_device *adev)
1065  {
1066  	const struct dmcub_firmware_header_v1_0 *hdr;
1067  	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1068  	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1069  	const struct firmware *dmub_fw = adev->dm.dmub_fw;
1070  	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1071  	struct abm *abm = adev->dm.dc->res_pool->abm;
1072  	struct dmub_srv_hw_params hw_params;
1073  	enum dmub_status status;
1074  	const unsigned char *fw_inst_const, *fw_bss_data;
1075  	u32 i, fw_inst_const_size, fw_bss_data_size;
1076  	bool has_hw_support;
1077  
1078  	if (!dmub_srv)
1079  		/* DMUB isn't supported on the ASIC. */
1080  		return 0;
1081  
1082  	if (!fb_info) {
1083  		DRM_ERROR("No framebuffer info for DMUB service.\n");
1084  		return -EINVAL;
1085  	}
1086  
1087  	if (!dmub_fw) {
1088  		/* Firmware required for DMUB support. */
1089  		DRM_ERROR("No firmware provided for DMUB.\n");
1090  		return -EINVAL;
1091  	}
1092  
1093  	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1094  	if (status != DMUB_STATUS_OK) {
1095  		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1096  		return -EINVAL;
1097  	}
1098  
1099  	if (!has_hw_support) {
1100  		DRM_INFO("DMUB unsupported on ASIC\n");
1101  		return 0;
1102  	}
1103  
1104  	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1105  	status = dmub_srv_hw_reset(dmub_srv);
1106  	if (status != DMUB_STATUS_OK)
1107  		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1108  
1109  	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1110  
1111  	fw_inst_const = dmub_fw->data +
1112  			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1113  			PSP_HEADER_BYTES;
1114  
1115  	fw_bss_data = dmub_fw->data +
1116  		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1117  		      le32_to_cpu(hdr->inst_const_bytes);
1118  
1119  	/* Copy firmware and bios info into FB memory. */
1120  	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1121  			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1122  
1123  	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1124  
1125  	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1126  	 * amdgpu_ucode_init_single_fw will load dmub firmware
1127  	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1128  	 * will be done by dm_dmub_hw_init
1129  	 */
1130  	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1131  		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1132  				fw_inst_const_size);
1133  	}
1134  
1135  	if (fw_bss_data_size)
1136  		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1137  		       fw_bss_data, fw_bss_data_size);
1138  
1139  	/* Copy firmware bios info into FB memory. */
1140  	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1141  	       adev->bios_size);
1142  
1143  	/* Reset regions that need to be reset. */
1144  	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1145  	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1146  
1147  	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1148  	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1149  
1150  	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1151  	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1152  
1153  	/* Initialize hardware. */
1154  	memset(&hw_params, 0, sizeof(hw_params));
1155  	hw_params.fb_base = adev->gmc.fb_start;
1156  	hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1157  
1158  	/* backdoor load firmware and trigger dmub running */
1159  	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1160  		hw_params.load_inst_const = true;
1161  
1162  	if (dmcu)
1163  		hw_params.psp_version = dmcu->psp_version;
1164  
1165  	for (i = 0; i < fb_info->num_fb; ++i)
1166  		hw_params.fb[i] = &fb_info->fb[i];
1167  
1168  	switch (adev->ip_versions[DCE_HWIP][0]) {
1169  	case IP_VERSION(3, 1, 3):
1170  	case IP_VERSION(3, 1, 4):
1171  		hw_params.dpia_supported = true;
1172  		hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1173  		break;
1174  	default:
1175  		break;
1176  	}
1177  
1178  	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1179  	if (status != DMUB_STATUS_OK) {
1180  		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1181  		return -EINVAL;
1182  	}
1183  
1184  	/* Wait for firmware load to finish. */
1185  	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1186  	if (status != DMUB_STATUS_OK)
1187  		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1188  
1189  	/* Init DMCU and ABM if available. */
1190  	if (dmcu && abm) {
1191  		dmcu->funcs->dmcu_init(dmcu);
1192  		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1193  	}
1194  
1195  	if (!adev->dm.dc->ctx->dmub_srv)
1196  		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1197  	if (!adev->dm.dc->ctx->dmub_srv) {
1198  		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1199  		return -ENOMEM;
1200  	}
1201  
1202  	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1203  		 adev->dm.dmcub_fw_version);
1204  
1205  	return 0;
1206  }
1207  
dm_dmub_hw_resume(struct amdgpu_device * adev)1208  static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1209  {
1210  	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1211  	enum dmub_status status;
1212  	bool init;
1213  
1214  	if (!dmub_srv) {
1215  		/* DMUB isn't supported on the ASIC. */
1216  		return;
1217  	}
1218  
1219  	status = dmub_srv_is_hw_init(dmub_srv, &init);
1220  	if (status != DMUB_STATUS_OK)
1221  		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1222  
1223  	if (status == DMUB_STATUS_OK && init) {
1224  		/* Wait for firmware load to finish. */
1225  		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1226  		if (status != DMUB_STATUS_OK)
1227  			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1228  	} else {
1229  		/* Perform the full hardware initialization. */
1230  		dm_dmub_hw_init(adev);
1231  	}
1232  }
1233  
mmhub_read_system_context(struct amdgpu_device * adev,struct dc_phy_addr_space_config * pa_config)1234  static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1235  {
1236  	u64 pt_base;
1237  	u32 logical_addr_low;
1238  	u32 logical_addr_high;
1239  	u32 agp_base, agp_bot, agp_top;
1240  	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1241  
1242  	memset(pa_config, 0, sizeof(*pa_config));
1243  
1244  	agp_base = 0;
1245  	agp_bot = adev->gmc.agp_start >> 24;
1246  	agp_top = adev->gmc.agp_end >> 24;
1247  
1248  	/* AGP aperture is disabled */
1249  	if (agp_bot == agp_top) {
1250  		logical_addr_low = adev->gmc.fb_start >> 18;
1251  		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1252  			/*
1253  			 * Raven2 has a HW issue that it is unable to use the vram which
1254  			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1255  			 * workaround that increase system aperture high address (add 1)
1256  			 * to get rid of the VM fault and hardware hang.
1257  			 */
1258  			logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1259  		else
1260  			logical_addr_high = adev->gmc.fb_end >> 18;
1261  	} else {
1262  		logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1263  		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1264  			/*
1265  			 * Raven2 has a HW issue that it is unable to use the vram which
1266  			 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1267  			 * workaround that increase system aperture high address (add 1)
1268  			 * to get rid of the VM fault and hardware hang.
1269  			 */
1270  			logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1271  		else
1272  			logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1273  	}
1274  
1275  	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1276  
1277  	page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1278  						   AMDGPU_GPU_PAGE_SHIFT);
1279  	page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1280  						  AMDGPU_GPU_PAGE_SHIFT);
1281  	page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1282  						 AMDGPU_GPU_PAGE_SHIFT);
1283  	page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1284  						AMDGPU_GPU_PAGE_SHIFT);
1285  	page_table_base.high_part = upper_32_bits(pt_base);
1286  	page_table_base.low_part = lower_32_bits(pt_base);
1287  
1288  	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1289  	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1290  
1291  	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
1292  	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1293  	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1294  
1295  	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1296  	pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1297  	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1298  
1299  	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1300  	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1301  	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1302  
1303  	pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
1304  
1305  }
1306  
force_connector_state(struct amdgpu_dm_connector * aconnector,enum drm_connector_force force_state)1307  static void force_connector_state(
1308  	struct amdgpu_dm_connector *aconnector,
1309  	enum drm_connector_force force_state)
1310  {
1311  	struct drm_connector *connector = &aconnector->base;
1312  
1313  	mutex_lock(&connector->dev->mode_config.mutex);
1314  	aconnector->base.force = force_state;
1315  	mutex_unlock(&connector->dev->mode_config.mutex);
1316  
1317  	mutex_lock(&aconnector->hpd_lock);
1318  	drm_kms_helper_connector_hotplug_event(connector);
1319  	mutex_unlock(&aconnector->hpd_lock);
1320  }
1321  
dm_handle_hpd_rx_offload_work(struct work_struct * work)1322  static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1323  {
1324  	struct hpd_rx_irq_offload_work *offload_work;
1325  	struct amdgpu_dm_connector *aconnector;
1326  	struct dc_link *dc_link;
1327  	struct amdgpu_device *adev;
1328  	enum dc_connection_type new_connection_type = dc_connection_none;
1329  	unsigned long flags;
1330  	union test_response test_response;
1331  
1332  	memset(&test_response, 0, sizeof(test_response));
1333  
1334  	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1335  	aconnector = offload_work->offload_wq->aconnector;
1336  
1337  	if (!aconnector) {
1338  		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1339  		goto skip;
1340  	}
1341  
1342  	adev = drm_to_adev(aconnector->base.dev);
1343  	dc_link = aconnector->dc_link;
1344  
1345  	mutex_lock(&aconnector->hpd_lock);
1346  	if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
1347  		DRM_ERROR("KMS: Failed to detect connector\n");
1348  	mutex_unlock(&aconnector->hpd_lock);
1349  
1350  	if (new_connection_type == dc_connection_none)
1351  		goto skip;
1352  
1353  	if (amdgpu_in_reset(adev))
1354  		goto skip;
1355  
1356  	if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
1357  		offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
1358  		dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
1359  		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1360  		offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
1361  		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1362  		goto skip;
1363  	}
1364  
1365  	mutex_lock(&adev->dm.dc_lock);
1366  	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1367  		dc_link_dp_handle_automated_test(dc_link);
1368  
1369  		if (aconnector->timing_changed) {
1370  			/* force connector disconnect and reconnect */
1371  			force_connector_state(aconnector, DRM_FORCE_OFF);
1372  			msleep(100);
1373  			force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1374  		}
1375  
1376  		test_response.bits.ACK = 1;
1377  
1378  		core_link_write_dpcd(
1379  		dc_link,
1380  		DP_TEST_RESPONSE,
1381  		&test_response.raw,
1382  		sizeof(test_response));
1383  	} else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1384  			dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
1385  			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1386  		/* offload_work->data is from handle_hpd_rx_irq->
1387  		 * schedule_hpd_rx_offload_work.this is defer handle
1388  		 * for hpd short pulse. upon here, link status may be
1389  		 * changed, need get latest link status from dpcd
1390  		 * registers. if link status is good, skip run link
1391  		 * training again.
1392  		 */
1393  		union hpd_irq_data irq_data;
1394  
1395  		memset(&irq_data, 0, sizeof(irq_data));
1396  
1397  		/* before dc_link_dp_handle_link_loss, allow new link lost handle
1398  		 * request be added to work queue if link lost at end of dc_link_
1399  		 * dp_handle_link_loss
1400  		 */
1401  		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1402  		offload_work->offload_wq->is_handling_link_loss = false;
1403  		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1404  
1405  		if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
1406  			dc_link_check_link_loss_status(dc_link, &irq_data))
1407  			dc_link_dp_handle_link_loss(dc_link);
1408  	}
1409  	mutex_unlock(&adev->dm.dc_lock);
1410  
1411  skip:
1412  	kfree(offload_work);
1413  
1414  }
1415  
hpd_rx_irq_create_workqueue(struct dc * dc)1416  static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1417  {
1418  	int max_caps = dc->caps.max_links;
1419  	int i = 0;
1420  	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1421  
1422  	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1423  
1424  	if (!hpd_rx_offload_wq)
1425  		return NULL;
1426  
1427  
1428  	for (i = 0; i < max_caps; i++) {
1429  		hpd_rx_offload_wq[i].wq =
1430  				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1431  
1432  		if (hpd_rx_offload_wq[i].wq == NULL) {
1433  			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1434  			goto out_err;
1435  		}
1436  
1437  		spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1438  	}
1439  
1440  	return hpd_rx_offload_wq;
1441  
1442  out_err:
1443  	for (i = 0; i < max_caps; i++) {
1444  		if (hpd_rx_offload_wq[i].wq)
1445  			destroy_workqueue(hpd_rx_offload_wq[i].wq);
1446  	}
1447  	kfree(hpd_rx_offload_wq);
1448  	return NULL;
1449  }
1450  
1451  struct amdgpu_stutter_quirk {
1452  	u16 chip_vendor;
1453  	u16 chip_device;
1454  	u16 subsys_vendor;
1455  	u16 subsys_device;
1456  	u8 revision;
1457  };
1458  
1459  static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1460  	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1461  	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1462  	{ 0, 0, 0, 0, 0 },
1463  };
1464  
dm_should_disable_stutter(struct pci_dev * pdev)1465  static bool dm_should_disable_stutter(struct pci_dev *pdev)
1466  {
1467  	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1468  
1469  	while (p && p->chip_device != 0) {
1470  		if (pdev->vendor == p->chip_vendor &&
1471  		    pdev->device == p->chip_device &&
1472  		    pdev->subsystem_vendor == p->subsys_vendor &&
1473  		    pdev->subsystem_device == p->subsys_device &&
1474  		    pdev->revision == p->revision) {
1475  			return true;
1476  		}
1477  		++p;
1478  	}
1479  	return false;
1480  }
1481  
1482  static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1483  	{
1484  		.matches = {
1485  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1486  			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1487  		},
1488  	},
1489  	{
1490  		.matches = {
1491  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1492  			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1493  		},
1494  	},
1495  	{
1496  		.matches = {
1497  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1498  			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1499  		},
1500  	},
1501  	{
1502  		.matches = {
1503  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1504  			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1505  		},
1506  	},
1507  	{
1508  		.matches = {
1509  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1510  			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1511  		},
1512  	},
1513  	{
1514  		.matches = {
1515  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1516  			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1517  		},
1518  	},
1519  	{
1520  		.matches = {
1521  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1522  			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1523  		},
1524  	},
1525  	{
1526  		.matches = {
1527  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1528  			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1529  		},
1530  	},
1531  	{
1532  		.matches = {
1533  			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1534  			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1535  		},
1536  	},
1537  	{}
1538  	/* TODO: refactor this from a fixed table to a dynamic option */
1539  };
1540  
retrieve_dmi_info(struct amdgpu_display_manager * dm)1541  static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1542  {
1543  	const struct dmi_system_id *dmi_id;
1544  
1545  	dm->aux_hpd_discon_quirk = false;
1546  
1547  	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1548  	if (dmi_id) {
1549  		dm->aux_hpd_discon_quirk = true;
1550  		DRM_INFO("aux_hpd_discon_quirk attached\n");
1551  	}
1552  }
1553  
amdgpu_dm_init(struct amdgpu_device * adev)1554  static int amdgpu_dm_init(struct amdgpu_device *adev)
1555  {
1556  	struct dc_init_data init_data;
1557  	struct dc_callback_init init_params;
1558  	int r;
1559  
1560  	adev->dm.ddev = adev_to_drm(adev);
1561  	adev->dm.adev = adev;
1562  
1563  	/* Zero all the fields */
1564  	memset(&init_data, 0, sizeof(init_data));
1565  	memset(&init_params, 0, sizeof(init_params));
1566  
1567  	mutex_init(&adev->dm.dpia_aux_lock);
1568  	mutex_init(&adev->dm.dc_lock);
1569  	mutex_init(&adev->dm.audio_lock);
1570  
1571  	if (amdgpu_dm_irq_init(adev)) {
1572  		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1573  		goto error;
1574  	}
1575  
1576  	init_data.asic_id.chip_family = adev->family;
1577  
1578  	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1579  	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1580  	init_data.asic_id.chip_id = adev->pdev->device;
1581  
1582  	init_data.asic_id.vram_width = adev->gmc.vram_width;
1583  	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1584  	init_data.asic_id.atombios_base_address =
1585  		adev->mode_info.atom_context->bios;
1586  
1587  	init_data.driver = adev;
1588  
1589  	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1590  
1591  	if (!adev->dm.cgs_device) {
1592  		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1593  		goto error;
1594  	}
1595  
1596  	init_data.cgs_device = adev->dm.cgs_device;
1597  
1598  	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1599  
1600  	switch (adev->ip_versions[DCE_HWIP][0]) {
1601  	case IP_VERSION(2, 1, 0):
1602  		switch (adev->dm.dmcub_fw_version) {
1603  		case 0: /* development */
1604  		case 0x1: /* linux-firmware.git hash 6d9f399 */
1605  		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1606  			init_data.flags.disable_dmcu = false;
1607  			break;
1608  		default:
1609  			init_data.flags.disable_dmcu = true;
1610  		}
1611  		break;
1612  	case IP_VERSION(2, 0, 3):
1613  		init_data.flags.disable_dmcu = true;
1614  		break;
1615  	default:
1616  		break;
1617  	}
1618  
1619  	switch (adev->asic_type) {
1620  	case CHIP_CARRIZO:
1621  	case CHIP_STONEY:
1622  		init_data.flags.gpu_vm_support = true;
1623  		break;
1624  	default:
1625  		switch (adev->ip_versions[DCE_HWIP][0]) {
1626  		case IP_VERSION(1, 0, 0):
1627  		case IP_VERSION(1, 0, 1):
1628  			/* enable S/G on PCO and RV2 */
1629  			if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1630  			    (adev->apu_flags & AMD_APU_IS_PICASSO))
1631  				init_data.flags.gpu_vm_support = true;
1632  			break;
1633  		case IP_VERSION(2, 1, 0):
1634  		case IP_VERSION(3, 0, 1):
1635  		case IP_VERSION(3, 1, 2):
1636  		case IP_VERSION(3, 1, 3):
1637  		case IP_VERSION(3, 1, 4):
1638  		case IP_VERSION(3, 1, 5):
1639  		case IP_VERSION(3, 1, 6):
1640  			init_data.flags.gpu_vm_support = true;
1641  			break;
1642  		default:
1643  			break;
1644  		}
1645  		break;
1646  	}
1647  	if (init_data.flags.gpu_vm_support &&
1648  	    (amdgpu_sg_display == 0))
1649  		init_data.flags.gpu_vm_support = false;
1650  
1651  	if (init_data.flags.gpu_vm_support)
1652  		adev->mode_info.gpu_vm_support = true;
1653  
1654  	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1655  		init_data.flags.fbc_support = true;
1656  
1657  	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1658  		init_data.flags.multi_mon_pp_mclk_switch = true;
1659  
1660  	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1661  		init_data.flags.disable_fractional_pwm = true;
1662  
1663  	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1664  		init_data.flags.edp_no_power_sequencing = true;
1665  
1666  	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1667  		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1668  	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1669  		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1670  
1671  	init_data.flags.seamless_boot_edp_requested = false;
1672  
1673  	if (check_seamless_boot_capability(adev)) {
1674  		init_data.flags.seamless_boot_edp_requested = true;
1675  		init_data.flags.allow_seamless_boot_optimization = true;
1676  		DRM_INFO("Seamless boot condition check passed\n");
1677  	}
1678  
1679  	init_data.flags.enable_mipi_converter_optimization = true;
1680  
1681  	init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1682  	init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1683  
1684  	INIT_LIST_HEAD(&adev->dm.da_list);
1685  
1686  	retrieve_dmi_info(&adev->dm);
1687  
1688  	/* Display Core create. */
1689  	adev->dm.dc = dc_create(&init_data);
1690  
1691  	if (adev->dm.dc) {
1692  		DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
1693  			 dce_version_to_string(adev->dm.dc->ctx->dce_version));
1694  	} else {
1695  		DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
1696  			 dce_version_to_string(adev->dm.dc->ctx->dce_version));
1697  		goto error;
1698  	}
1699  
1700  	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1701  		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1702  		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1703  	}
1704  
1705  	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1706  		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1707  	if (dm_should_disable_stutter(adev->pdev))
1708  		adev->dm.dc->debug.disable_stutter = true;
1709  
1710  	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1711  		adev->dm.dc->debug.disable_stutter = true;
1712  
1713  	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1714  		adev->dm.dc->debug.disable_dsc = true;
1715  
1716  	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1717  		adev->dm.dc->debug.disable_clock_gate = true;
1718  
1719  	if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1720  		adev->dm.dc->debug.force_subvp_mclk_switch = true;
1721  
1722  	adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1723  
1724  	/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1725  	adev->dm.dc->debug.ignore_cable_id = true;
1726  
1727  	/* TODO: There is a new drm mst change where the freedom of
1728  	 * vc_next_start_slot update is revoked/moved into drm, instead of in
1729  	 * driver. This forces us to make sure to get vc_next_start_slot updated
1730  	 * in drm function each time without considering if mst_state is active
1731  	 * or not. Otherwise, next time hotplug will give wrong start_slot
1732  	 * number. We are implementing a temporary solution to even notify drm
1733  	 * mst deallocation when link is no longer of MST type when uncommitting
1734  	 * the stream so we will have more time to work on a proper solution.
1735  	 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
1736  	 * should notify drm to do a complete "reset" of its states and stop
1737  	 * calling further drm mst functions when link is no longer of an MST
1738  	 * type. This could happen when we unplug an MST hubs/displays. When
1739  	 * uncommit stream comes later after unplug, we should just reset
1740  	 * hardware states only.
1741  	 */
1742  	adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
1743  
1744  	if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
1745  		DRM_INFO("DP-HDMI FRL PCON supported\n");
1746  
1747  	r = dm_dmub_hw_init(adev);
1748  	if (r) {
1749  		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1750  		goto error;
1751  	}
1752  
1753  	dc_hardware_init(adev->dm.dc);
1754  
1755  	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1756  	if (!adev->dm.hpd_rx_offload_wq) {
1757  		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1758  		goto error;
1759  	}
1760  
1761  	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1762  		struct dc_phy_addr_space_config pa_config;
1763  
1764  		mmhub_read_system_context(adev, &pa_config);
1765  
1766  		// Call the DC init_memory func
1767  		dc_setup_system_context(adev->dm.dc, &pa_config);
1768  	}
1769  
1770  	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1771  	if (!adev->dm.freesync_module) {
1772  		DRM_ERROR(
1773  		"amdgpu: failed to initialize freesync_module.\n");
1774  	} else
1775  		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1776  				adev->dm.freesync_module);
1777  
1778  	amdgpu_dm_init_color_mod();
1779  
1780  	if (adev->dm.dc->caps.max_links > 0) {
1781  		adev->dm.vblank_control_workqueue =
1782  			create_singlethread_workqueue("dm_vblank_control_workqueue");
1783  		if (!adev->dm.vblank_control_workqueue)
1784  			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1785  	}
1786  
1787  	if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1788  		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1789  
1790  		if (!adev->dm.hdcp_workqueue)
1791  			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1792  		else
1793  			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1794  
1795  		dc_init_callbacks(adev->dm.dc, &init_params);
1796  	}
1797  	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1798  		init_completion(&adev->dm.dmub_aux_transfer_done);
1799  		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1800  		if (!adev->dm.dmub_notify) {
1801  			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1802  			goto error;
1803  		}
1804  
1805  		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1806  		if (!adev->dm.delayed_hpd_wq) {
1807  			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1808  			goto error;
1809  		}
1810  
1811  		amdgpu_dm_outbox_init(adev);
1812  		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1813  			dmub_aux_setconfig_callback, false)) {
1814  			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1815  			goto error;
1816  		}
1817  		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1818  			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1819  			goto error;
1820  		}
1821  		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1822  			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1823  			goto error;
1824  		}
1825  	}
1826  
1827  	/* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1828  	 * It is expected that DMUB will resend any pending notifications at this point, for
1829  	 * example HPD from DPIA.
1830  	 */
1831  	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1832  		dc_enable_dmub_outbox(adev->dm.dc);
1833  
1834  		/* DPIA trace goes to dmesg logs only if outbox is enabled */
1835  		if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
1836  			dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
1837  	}
1838  
1839  	if (amdgpu_dm_initialize_drm_device(adev)) {
1840  		DRM_ERROR(
1841  		"amdgpu: failed to initialize sw for display support.\n");
1842  		goto error;
1843  	}
1844  
1845  	/* create fake encoders for MST */
1846  	dm_dp_create_fake_mst_encoders(adev);
1847  
1848  	/* TODO: Add_display_info? */
1849  
1850  	/* TODO use dynamic cursor width */
1851  	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1852  	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1853  
1854  	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1855  		DRM_ERROR(
1856  		"amdgpu: failed to initialize sw for display support.\n");
1857  		goto error;
1858  	}
1859  
1860  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1861  	adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
1862  	if (!adev->dm.secure_display_ctxs)
1863  		DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
1864  #endif
1865  
1866  	DRM_DEBUG_DRIVER("KMS initialized.\n");
1867  
1868  	return 0;
1869  error:
1870  	amdgpu_dm_fini(adev);
1871  
1872  	return -EINVAL;
1873  }
1874  
amdgpu_dm_early_fini(void * handle)1875  static int amdgpu_dm_early_fini(void *handle)
1876  {
1877  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1878  
1879  	amdgpu_dm_audio_fini(adev);
1880  
1881  	return 0;
1882  }
1883  
amdgpu_dm_fini(struct amdgpu_device * adev)1884  static void amdgpu_dm_fini(struct amdgpu_device *adev)
1885  {
1886  	int i;
1887  
1888  	if (adev->dm.vblank_control_workqueue) {
1889  		destroy_workqueue(adev->dm.vblank_control_workqueue);
1890  		adev->dm.vblank_control_workqueue = NULL;
1891  	}
1892  
1893  	amdgpu_dm_destroy_drm_device(&adev->dm);
1894  
1895  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1896  	if (adev->dm.secure_display_ctxs) {
1897  		for (i = 0; i < adev->mode_info.num_crtc; i++) {
1898  			if (adev->dm.secure_display_ctxs[i].crtc) {
1899  				flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
1900  				flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
1901  			}
1902  		}
1903  		kfree(adev->dm.secure_display_ctxs);
1904  		adev->dm.secure_display_ctxs = NULL;
1905  	}
1906  #endif
1907  	if (adev->dm.hdcp_workqueue) {
1908  		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1909  		adev->dm.hdcp_workqueue = NULL;
1910  	}
1911  
1912  	if (adev->dm.dc)
1913  		dc_deinit_callbacks(adev->dm.dc);
1914  
1915  	if (adev->dm.dc)
1916  		dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1917  
1918  	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1919  		kfree(adev->dm.dmub_notify);
1920  		adev->dm.dmub_notify = NULL;
1921  		destroy_workqueue(adev->dm.delayed_hpd_wq);
1922  		adev->dm.delayed_hpd_wq = NULL;
1923  	}
1924  
1925  	if (adev->dm.dmub_bo)
1926  		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1927  				      &adev->dm.dmub_bo_gpu_addr,
1928  				      &adev->dm.dmub_bo_cpu_addr);
1929  
1930  	if (adev->dm.hpd_rx_offload_wq) {
1931  		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1932  			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1933  				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1934  				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1935  			}
1936  		}
1937  
1938  		kfree(adev->dm.hpd_rx_offload_wq);
1939  		adev->dm.hpd_rx_offload_wq = NULL;
1940  	}
1941  
1942  	/* DC Destroy TODO: Replace destroy DAL */
1943  	if (adev->dm.dc)
1944  		dc_destroy(&adev->dm.dc);
1945  	/*
1946  	 * TODO: pageflip, vlank interrupt
1947  	 *
1948  	 * amdgpu_dm_irq_fini(adev);
1949  	 */
1950  
1951  	if (adev->dm.cgs_device) {
1952  		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1953  		adev->dm.cgs_device = NULL;
1954  	}
1955  	if (adev->dm.freesync_module) {
1956  		mod_freesync_destroy(adev->dm.freesync_module);
1957  		adev->dm.freesync_module = NULL;
1958  	}
1959  
1960  	mutex_destroy(&adev->dm.audio_lock);
1961  	mutex_destroy(&adev->dm.dc_lock);
1962  	mutex_destroy(&adev->dm.dpia_aux_lock);
1963  }
1964  
load_dmcu_fw(struct amdgpu_device * adev)1965  static int load_dmcu_fw(struct amdgpu_device *adev)
1966  {
1967  	const char *fw_name_dmcu = NULL;
1968  	int r;
1969  	const struct dmcu_firmware_header_v1_0 *hdr;
1970  
1971  	switch (adev->asic_type) {
1972  #if defined(CONFIG_DRM_AMD_DC_SI)
1973  	case CHIP_TAHITI:
1974  	case CHIP_PITCAIRN:
1975  	case CHIP_VERDE:
1976  	case CHIP_OLAND:
1977  #endif
1978  	case CHIP_BONAIRE:
1979  	case CHIP_HAWAII:
1980  	case CHIP_KAVERI:
1981  	case CHIP_KABINI:
1982  	case CHIP_MULLINS:
1983  	case CHIP_TONGA:
1984  	case CHIP_FIJI:
1985  	case CHIP_CARRIZO:
1986  	case CHIP_STONEY:
1987  	case CHIP_POLARIS11:
1988  	case CHIP_POLARIS10:
1989  	case CHIP_POLARIS12:
1990  	case CHIP_VEGAM:
1991  	case CHIP_VEGA10:
1992  	case CHIP_VEGA12:
1993  	case CHIP_VEGA20:
1994  		return 0;
1995  	case CHIP_NAVI12:
1996  		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1997  		break;
1998  	case CHIP_RAVEN:
1999  		if (ASICREV_IS_PICASSO(adev->external_rev_id))
2000  			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2001  		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
2002  			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
2003  		else
2004  			return 0;
2005  		break;
2006  	default:
2007  		switch (adev->ip_versions[DCE_HWIP][0]) {
2008  		case IP_VERSION(2, 0, 2):
2009  		case IP_VERSION(2, 0, 3):
2010  		case IP_VERSION(2, 0, 0):
2011  		case IP_VERSION(2, 1, 0):
2012  		case IP_VERSION(3, 0, 0):
2013  		case IP_VERSION(3, 0, 2):
2014  		case IP_VERSION(3, 0, 3):
2015  		case IP_VERSION(3, 0, 1):
2016  		case IP_VERSION(3, 1, 2):
2017  		case IP_VERSION(3, 1, 3):
2018  		case IP_VERSION(3, 1, 4):
2019  		case IP_VERSION(3, 1, 5):
2020  		case IP_VERSION(3, 1, 6):
2021  		case IP_VERSION(3, 2, 0):
2022  		case IP_VERSION(3, 2, 1):
2023  			return 0;
2024  		default:
2025  			break;
2026  		}
2027  		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2028  		return -EINVAL;
2029  	}
2030  
2031  	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2032  		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2033  		return 0;
2034  	}
2035  
2036  	r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
2037  	if (r == -ENODEV) {
2038  		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
2039  		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2040  		adev->dm.fw_dmcu = NULL;
2041  		return 0;
2042  	}
2043  	if (r) {
2044  		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
2045  			fw_name_dmcu);
2046  		amdgpu_ucode_release(&adev->dm.fw_dmcu);
2047  		return r;
2048  	}
2049  
2050  	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2051  	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
2052  	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2053  	adev->firmware.fw_size +=
2054  		ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2055  
2056  	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2057  	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2058  	adev->firmware.fw_size +=
2059  		ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2060  
2061  	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2062  
2063  	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2064  
2065  	return 0;
2066  }
2067  
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)2068  static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2069  {
2070  	struct amdgpu_device *adev = ctx;
2071  
2072  	return dm_read_reg(adev->dm.dc->ctx, address);
2073  }
2074  
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)2075  static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2076  				     uint32_t value)
2077  {
2078  	struct amdgpu_device *adev = ctx;
2079  
2080  	return dm_write_reg(adev->dm.dc->ctx, address, value);
2081  }
2082  
dm_dmub_sw_init(struct amdgpu_device * adev)2083  static int dm_dmub_sw_init(struct amdgpu_device *adev)
2084  {
2085  	struct dmub_srv_create_params create_params;
2086  	struct dmub_srv_region_params region_params;
2087  	struct dmub_srv_region_info region_info;
2088  	struct dmub_srv_fb_params fb_params;
2089  	struct dmub_srv_fb_info *fb_info;
2090  	struct dmub_srv *dmub_srv;
2091  	const struct dmcub_firmware_header_v1_0 *hdr;
2092  	enum dmub_asic dmub_asic;
2093  	enum dmub_status status;
2094  	int r;
2095  
2096  	switch (adev->ip_versions[DCE_HWIP][0]) {
2097  	case IP_VERSION(2, 1, 0):
2098  		dmub_asic = DMUB_ASIC_DCN21;
2099  		break;
2100  	case IP_VERSION(3, 0, 0):
2101  		dmub_asic = DMUB_ASIC_DCN30;
2102  		break;
2103  	case IP_VERSION(3, 0, 1):
2104  		dmub_asic = DMUB_ASIC_DCN301;
2105  		break;
2106  	case IP_VERSION(3, 0, 2):
2107  		dmub_asic = DMUB_ASIC_DCN302;
2108  		break;
2109  	case IP_VERSION(3, 0, 3):
2110  		dmub_asic = DMUB_ASIC_DCN303;
2111  		break;
2112  	case IP_VERSION(3, 1, 2):
2113  	case IP_VERSION(3, 1, 3):
2114  		dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2115  		break;
2116  	case IP_VERSION(3, 1, 4):
2117  		dmub_asic = DMUB_ASIC_DCN314;
2118  		break;
2119  	case IP_VERSION(3, 1, 5):
2120  		dmub_asic = DMUB_ASIC_DCN315;
2121  		break;
2122  	case IP_VERSION(3, 1, 6):
2123  		dmub_asic = DMUB_ASIC_DCN316;
2124  		break;
2125  	case IP_VERSION(3, 2, 0):
2126  		dmub_asic = DMUB_ASIC_DCN32;
2127  		break;
2128  	case IP_VERSION(3, 2, 1):
2129  		dmub_asic = DMUB_ASIC_DCN321;
2130  		break;
2131  	default:
2132  		/* ASIC doesn't support DMUB. */
2133  		return 0;
2134  	}
2135  
2136  	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2137  	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2138  
2139  	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2140  		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2141  			AMDGPU_UCODE_ID_DMCUB;
2142  		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2143  			adev->dm.dmub_fw;
2144  		adev->firmware.fw_size +=
2145  			ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2146  
2147  		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2148  			 adev->dm.dmcub_fw_version);
2149  	}
2150  
2151  
2152  	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2153  	dmub_srv = adev->dm.dmub_srv;
2154  
2155  	if (!dmub_srv) {
2156  		DRM_ERROR("Failed to allocate DMUB service!\n");
2157  		return -ENOMEM;
2158  	}
2159  
2160  	memset(&create_params, 0, sizeof(create_params));
2161  	create_params.user_ctx = adev;
2162  	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2163  	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2164  	create_params.asic = dmub_asic;
2165  
2166  	/* Create the DMUB service. */
2167  	status = dmub_srv_create(dmub_srv, &create_params);
2168  	if (status != DMUB_STATUS_OK) {
2169  		DRM_ERROR("Error creating DMUB service: %d\n", status);
2170  		return -EINVAL;
2171  	}
2172  
2173  	/* Calculate the size of all the regions for the DMUB service. */
2174  	memset(&region_params, 0, sizeof(region_params));
2175  
2176  	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2177  					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2178  	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2179  	region_params.vbios_size = adev->bios_size;
2180  	region_params.fw_bss_data = region_params.bss_data_size ?
2181  		adev->dm.dmub_fw->data +
2182  		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2183  		le32_to_cpu(hdr->inst_const_bytes) : NULL;
2184  	region_params.fw_inst_const =
2185  		adev->dm.dmub_fw->data +
2186  		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2187  		PSP_HEADER_BYTES;
2188  
2189  	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
2190  					   &region_info);
2191  
2192  	if (status != DMUB_STATUS_OK) {
2193  		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2194  		return -EINVAL;
2195  	}
2196  
2197  	/*
2198  	 * Allocate a framebuffer based on the total size of all the regions.
2199  	 * TODO: Move this into GART.
2200  	 */
2201  	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2202  				    AMDGPU_GEM_DOMAIN_VRAM |
2203  				    AMDGPU_GEM_DOMAIN_GTT,
2204  				    &adev->dm.dmub_bo,
2205  				    &adev->dm.dmub_bo_gpu_addr,
2206  				    &adev->dm.dmub_bo_cpu_addr);
2207  	if (r)
2208  		return r;
2209  
2210  	/* Rebase the regions on the framebuffer address. */
2211  	memset(&fb_params, 0, sizeof(fb_params));
2212  	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
2213  	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
2214  	fb_params.region_info = &region_info;
2215  
2216  	adev->dm.dmub_fb_info =
2217  		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2218  	fb_info = adev->dm.dmub_fb_info;
2219  
2220  	if (!fb_info) {
2221  		DRM_ERROR(
2222  			"Failed to allocate framebuffer info for DMUB service!\n");
2223  		return -ENOMEM;
2224  	}
2225  
2226  	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
2227  	if (status != DMUB_STATUS_OK) {
2228  		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2229  		return -EINVAL;
2230  	}
2231  
2232  	return 0;
2233  }
2234  
dm_sw_init(void * handle)2235  static int dm_sw_init(void *handle)
2236  {
2237  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2238  	int r;
2239  
2240  	r = dm_dmub_sw_init(adev);
2241  	if (r)
2242  		return r;
2243  
2244  	return load_dmcu_fw(adev);
2245  }
2246  
dm_sw_fini(void * handle)2247  static int dm_sw_fini(void *handle)
2248  {
2249  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2250  
2251  	kfree(adev->dm.dmub_fb_info);
2252  	adev->dm.dmub_fb_info = NULL;
2253  
2254  	if (adev->dm.dmub_srv) {
2255  		dmub_srv_destroy(adev->dm.dmub_srv);
2256  		adev->dm.dmub_srv = NULL;
2257  	}
2258  
2259  	amdgpu_ucode_release(&adev->dm.dmub_fw);
2260  	amdgpu_ucode_release(&adev->dm.fw_dmcu);
2261  
2262  	return 0;
2263  }
2264  
detect_mst_link_for_all_connectors(struct drm_device * dev)2265  static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2266  {
2267  	struct amdgpu_dm_connector *aconnector;
2268  	struct drm_connector *connector;
2269  	struct drm_connector_list_iter iter;
2270  	int ret = 0;
2271  
2272  	drm_connector_list_iter_begin(dev, &iter);
2273  	drm_for_each_connector_iter(connector, &iter) {
2274  		aconnector = to_amdgpu_dm_connector(connector);
2275  		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2276  		    aconnector->mst_mgr.aux) {
2277  			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2278  					 aconnector,
2279  					 aconnector->base.base.id);
2280  
2281  			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2282  			if (ret < 0) {
2283  				DRM_ERROR("DM_MST: Failed to start MST\n");
2284  				aconnector->dc_link->type =
2285  					dc_connection_single;
2286  				ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2287  								     aconnector->dc_link);
2288  				break;
2289  			}
2290  		}
2291  	}
2292  	drm_connector_list_iter_end(&iter);
2293  
2294  	return ret;
2295  }
2296  
dm_late_init(void * handle)2297  static int dm_late_init(void *handle)
2298  {
2299  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2300  
2301  	struct dmcu_iram_parameters params;
2302  	unsigned int linear_lut[16];
2303  	int i;
2304  	struct dmcu *dmcu = NULL;
2305  
2306  	dmcu = adev->dm.dc->res_pool->dmcu;
2307  
2308  	for (i = 0; i < 16; i++)
2309  		linear_lut[i] = 0xFFFF * i / 15;
2310  
2311  	params.set = 0;
2312  	params.backlight_ramping_override = false;
2313  	params.backlight_ramping_start = 0xCCCC;
2314  	params.backlight_ramping_reduction = 0xCCCCCCCC;
2315  	params.backlight_lut_array_size = 16;
2316  	params.backlight_lut_array = linear_lut;
2317  
2318  	/* Min backlight level after ABM reduction,  Don't allow below 1%
2319  	 * 0xFFFF x 0.01 = 0x28F
2320  	 */
2321  	params.min_abm_backlight = 0x28F;
2322  	/* In the case where abm is implemented on dmcub,
2323  	 * dmcu object will be null.
2324  	 * ABM 2.4 and up are implemented on dmcub.
2325  	 */
2326  	if (dmcu) {
2327  		if (!dmcu_load_iram(dmcu, params))
2328  			return -EINVAL;
2329  	} else if (adev->dm.dc->ctx->dmub_srv) {
2330  		struct dc_link *edp_links[MAX_NUM_EDP];
2331  		int edp_num;
2332  
2333  		dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
2334  		for (i = 0; i < edp_num; i++) {
2335  			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2336  				return -EINVAL;
2337  		}
2338  	}
2339  
2340  	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2341  }
2342  
resume_mst_branch_status(struct drm_dp_mst_topology_mgr * mgr)2343  static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2344  {
2345  	int ret;
2346  	u8 guid[16];
2347  	u64 tmp64;
2348  
2349  	mutex_lock(&mgr->lock);
2350  	if (!mgr->mst_primary)
2351  		goto out_fail;
2352  
2353  	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2354  		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2355  		goto out_fail;
2356  	}
2357  
2358  	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2359  				 DP_MST_EN |
2360  				 DP_UP_REQ_EN |
2361  				 DP_UPSTREAM_IS_SRC);
2362  	if (ret < 0) {
2363  		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2364  		goto out_fail;
2365  	}
2366  
2367  	/* Some hubs forget their guids after they resume */
2368  	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2369  	if (ret != 16) {
2370  		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2371  		goto out_fail;
2372  	}
2373  
2374  	if (memchr_inv(guid, 0, 16) == NULL) {
2375  		tmp64 = get_jiffies_64();
2376  		memcpy(&guid[0], &tmp64, sizeof(u64));
2377  		memcpy(&guid[8], &tmp64, sizeof(u64));
2378  
2379  		ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
2380  
2381  		if (ret != 16) {
2382  			drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2383  			goto out_fail;
2384  		}
2385  	}
2386  
2387  	memcpy(mgr->mst_primary->guid, guid, 16);
2388  
2389  out_fail:
2390  	mutex_unlock(&mgr->lock);
2391  }
2392  
s3_handle_mst(struct drm_device * dev,bool suspend)2393  static void s3_handle_mst(struct drm_device *dev, bool suspend)
2394  {
2395  	struct amdgpu_dm_connector *aconnector;
2396  	struct drm_connector *connector;
2397  	struct drm_connector_list_iter iter;
2398  	struct drm_dp_mst_topology_mgr *mgr;
2399  
2400  	drm_connector_list_iter_begin(dev, &iter);
2401  	drm_for_each_connector_iter(connector, &iter) {
2402  		aconnector = to_amdgpu_dm_connector(connector);
2403  		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2404  		    aconnector->mst_root)
2405  			continue;
2406  
2407  		mgr = &aconnector->mst_mgr;
2408  
2409  		if (suspend) {
2410  			drm_dp_mst_topology_mgr_suspend(mgr);
2411  		} else {
2412  			/* if extended timeout is supported in hardware,
2413  			 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2414  			 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2415  			 */
2416  			try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2417  			if (!dp_is_lttpr_present(aconnector->dc_link))
2418  				try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2419  
2420  			/* TODO: move resume_mst_branch_status() into drm mst resume again
2421  			 * once topology probing work is pulled out from mst resume into mst
2422  			 * resume 2nd step. mst resume 2nd step should be called after old
2423  			 * state getting restored (i.e. drm_atomic_helper_resume()).
2424  			 */
2425  			resume_mst_branch_status(mgr);
2426  		}
2427  	}
2428  	drm_connector_list_iter_end(&iter);
2429  }
2430  
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)2431  static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2432  {
2433  	int ret = 0;
2434  
2435  	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2436  	 * on window driver dc implementation.
2437  	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2438  	 * should be passed to smu during boot up and resume from s3.
2439  	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2440  	 * dcn20_resource_construct
2441  	 * then call pplib functions below to pass the settings to smu:
2442  	 * smu_set_watermarks_for_clock_ranges
2443  	 * smu_set_watermarks_table
2444  	 * navi10_set_watermarks_table
2445  	 * smu_write_watermarks_table
2446  	 *
2447  	 * For Renoir, clock settings of dcn watermark are also fixed values.
2448  	 * dc has implemented different flow for window driver:
2449  	 * dc_hardware_init / dc_set_power_state
2450  	 * dcn10_init_hw
2451  	 * notify_wm_ranges
2452  	 * set_wm_ranges
2453  	 * -- Linux
2454  	 * smu_set_watermarks_for_clock_ranges
2455  	 * renoir_set_watermarks_table
2456  	 * smu_write_watermarks_table
2457  	 *
2458  	 * For Linux,
2459  	 * dc_hardware_init -> amdgpu_dm_init
2460  	 * dc_set_power_state --> dm_resume
2461  	 *
2462  	 * therefore, this function apply to navi10/12/14 but not Renoir
2463  	 * *
2464  	 */
2465  	switch (adev->ip_versions[DCE_HWIP][0]) {
2466  	case IP_VERSION(2, 0, 2):
2467  	case IP_VERSION(2, 0, 0):
2468  		break;
2469  	default:
2470  		return 0;
2471  	}
2472  
2473  	ret = amdgpu_dpm_write_watermarks_table(adev);
2474  	if (ret) {
2475  		DRM_ERROR("Failed to update WMTABLE!\n");
2476  		return ret;
2477  	}
2478  
2479  	return 0;
2480  }
2481  
2482  /**
2483   * dm_hw_init() - Initialize DC device
2484   * @handle: The base driver device containing the amdgpu_dm device.
2485   *
2486   * Initialize the &struct amdgpu_display_manager device. This involves calling
2487   * the initializers of each DM component, then populating the struct with them.
2488   *
2489   * Although the function implies hardware initialization, both hardware and
2490   * software are initialized here. Splitting them out to their relevant init
2491   * hooks is a future TODO item.
2492   *
2493   * Some notable things that are initialized here:
2494   *
2495   * - Display Core, both software and hardware
2496   * - DC modules that we need (freesync and color management)
2497   * - DRM software states
2498   * - Interrupt sources and handlers
2499   * - Vblank support
2500   * - Debug FS entries, if enabled
2501   */
dm_hw_init(void * handle)2502  static int dm_hw_init(void *handle)
2503  {
2504  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2505  	/* Create DAL display manager */
2506  	amdgpu_dm_init(adev);
2507  	amdgpu_dm_hpd_init(adev);
2508  
2509  	return 0;
2510  }
2511  
2512  /**
2513   * dm_hw_fini() - Teardown DC device
2514   * @handle: The base driver device containing the amdgpu_dm device.
2515   *
2516   * Teardown components within &struct amdgpu_display_manager that require
2517   * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2518   * were loaded. Also flush IRQ workqueues and disable them.
2519   */
dm_hw_fini(void * handle)2520  static int dm_hw_fini(void *handle)
2521  {
2522  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2523  
2524  	amdgpu_dm_hpd_fini(adev);
2525  
2526  	amdgpu_dm_irq_fini(adev);
2527  	amdgpu_dm_fini(adev);
2528  	return 0;
2529  }
2530  
2531  
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)2532  static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2533  				 struct dc_state *state, bool enable)
2534  {
2535  	enum dc_irq_source irq_source;
2536  	struct amdgpu_crtc *acrtc;
2537  	int rc = -EBUSY;
2538  	int i = 0;
2539  
2540  	for (i = 0; i < state->stream_count; i++) {
2541  		acrtc = get_crtc_by_otg_inst(
2542  				adev, state->stream_status[i].primary_otg_inst);
2543  
2544  		if (acrtc && state->stream_status[i].plane_count != 0) {
2545  			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2546  			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2547  			if (rc)
2548  				DRM_WARN("Failed to %s pflip interrupts\n",
2549  					 enable ? "enable" : "disable");
2550  
2551  			if (enable) {
2552  				if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
2553  					rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
2554  			} else
2555  				rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
2556  
2557  			if (rc)
2558  				DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
2559  
2560  			irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2561  			/* During gpu-reset we disable and then enable vblank irq, so
2562  			 * don't use amdgpu_irq_get/put() to avoid refcount change.
2563  			 */
2564  			if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
2565  				DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
2566  		}
2567  	}
2568  
2569  }
2570  
amdgpu_dm_commit_zero_streams(struct dc * dc)2571  static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2572  {
2573  	struct dc_state *context = NULL;
2574  	enum dc_status res = DC_ERROR_UNEXPECTED;
2575  	int i;
2576  	struct dc_stream_state *del_streams[MAX_PIPES];
2577  	int del_streams_count = 0;
2578  
2579  	memset(del_streams, 0, sizeof(del_streams));
2580  
2581  	context = dc_create_state(dc);
2582  	if (context == NULL)
2583  		goto context_alloc_fail;
2584  
2585  	dc_resource_state_copy_construct_current(dc, context);
2586  
2587  	/* First remove from context all streams */
2588  	for (i = 0; i < context->stream_count; i++) {
2589  		struct dc_stream_state *stream = context->streams[i];
2590  
2591  		del_streams[del_streams_count++] = stream;
2592  	}
2593  
2594  	/* Remove all planes for removed streams and then remove the streams */
2595  	for (i = 0; i < del_streams_count; i++) {
2596  		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2597  			res = DC_FAIL_DETACH_SURFACES;
2598  			goto fail;
2599  		}
2600  
2601  		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2602  		if (res != DC_OK)
2603  			goto fail;
2604  	}
2605  
2606  	res = dc_commit_streams(dc, context->streams, context->stream_count);
2607  
2608  fail:
2609  	dc_release_state(context);
2610  
2611  context_alloc_fail:
2612  	return res;
2613  }
2614  
hpd_rx_irq_work_suspend(struct amdgpu_display_manager * dm)2615  static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2616  {
2617  	int i;
2618  
2619  	if (dm->hpd_rx_offload_wq) {
2620  		for (i = 0; i < dm->dc->caps.max_links; i++)
2621  			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2622  	}
2623  }
2624  
dm_suspend(void * handle)2625  static int dm_suspend(void *handle)
2626  {
2627  	struct amdgpu_device *adev = handle;
2628  	struct amdgpu_display_manager *dm = &adev->dm;
2629  	int ret = 0;
2630  
2631  	if (amdgpu_in_reset(adev)) {
2632  		mutex_lock(&dm->dc_lock);
2633  
2634  		dc_allow_idle_optimizations(adev->dm.dc, false);
2635  
2636  		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2637  
2638  		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2639  
2640  		amdgpu_dm_commit_zero_streams(dm->dc);
2641  
2642  		amdgpu_dm_irq_suspend(adev);
2643  
2644  		hpd_rx_irq_work_suspend(dm);
2645  
2646  		return ret;
2647  	}
2648  
2649  	WARN_ON(adev->dm.cached_state);
2650  	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2651  
2652  	s3_handle_mst(adev_to_drm(adev), true);
2653  
2654  	amdgpu_dm_irq_suspend(adev);
2655  
2656  	hpd_rx_irq_work_suspend(dm);
2657  
2658  	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2659  
2660  	return 0;
2661  }
2662  
2663  struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)2664  amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2665  					     struct drm_crtc *crtc)
2666  {
2667  	u32 i;
2668  	struct drm_connector_state *new_con_state;
2669  	struct drm_connector *connector;
2670  	struct drm_crtc *crtc_from_state;
2671  
2672  	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2673  		crtc_from_state = new_con_state->crtc;
2674  
2675  		if (crtc_from_state == crtc)
2676  			return to_amdgpu_dm_connector(connector);
2677  	}
2678  
2679  	return NULL;
2680  }
2681  
emulated_link_detect(struct dc_link * link)2682  static void emulated_link_detect(struct dc_link *link)
2683  {
2684  	struct dc_sink_init_data sink_init_data = { 0 };
2685  	struct display_sink_capability sink_caps = { 0 };
2686  	enum dc_edid_status edid_status;
2687  	struct dc_context *dc_ctx = link->ctx;
2688  	struct dc_sink *sink = NULL;
2689  	struct dc_sink *prev_sink = NULL;
2690  
2691  	link->type = dc_connection_none;
2692  	prev_sink = link->local_sink;
2693  
2694  	if (prev_sink)
2695  		dc_sink_release(prev_sink);
2696  
2697  	switch (link->connector_signal) {
2698  	case SIGNAL_TYPE_HDMI_TYPE_A: {
2699  		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2700  		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2701  		break;
2702  	}
2703  
2704  	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2705  		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2706  		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2707  		break;
2708  	}
2709  
2710  	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2711  		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2712  		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2713  		break;
2714  	}
2715  
2716  	case SIGNAL_TYPE_LVDS: {
2717  		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2718  		sink_caps.signal = SIGNAL_TYPE_LVDS;
2719  		break;
2720  	}
2721  
2722  	case SIGNAL_TYPE_EDP: {
2723  		sink_caps.transaction_type =
2724  			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2725  		sink_caps.signal = SIGNAL_TYPE_EDP;
2726  		break;
2727  	}
2728  
2729  	case SIGNAL_TYPE_DISPLAY_PORT: {
2730  		sink_caps.transaction_type =
2731  			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2732  		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2733  		break;
2734  	}
2735  
2736  	default:
2737  		DC_ERROR("Invalid connector type! signal:%d\n",
2738  			link->connector_signal);
2739  		return;
2740  	}
2741  
2742  	sink_init_data.link = link;
2743  	sink_init_data.sink_signal = sink_caps.signal;
2744  
2745  	sink = dc_sink_create(&sink_init_data);
2746  	if (!sink) {
2747  		DC_ERROR("Failed to create sink!\n");
2748  		return;
2749  	}
2750  
2751  	/* dc_sink_create returns a new reference */
2752  	link->local_sink = sink;
2753  
2754  	edid_status = dm_helpers_read_local_edid(
2755  			link->ctx,
2756  			link,
2757  			sink);
2758  
2759  	if (edid_status != EDID_OK)
2760  		DC_ERROR("Failed to read EDID");
2761  
2762  }
2763  
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)2764  static void dm_gpureset_commit_state(struct dc_state *dc_state,
2765  				     struct amdgpu_display_manager *dm)
2766  {
2767  	struct {
2768  		struct dc_surface_update surface_updates[MAX_SURFACES];
2769  		struct dc_plane_info plane_infos[MAX_SURFACES];
2770  		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2771  		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2772  		struct dc_stream_update stream_update;
2773  	} *bundle;
2774  	int k, m;
2775  
2776  	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2777  
2778  	if (!bundle) {
2779  		dm_error("Failed to allocate update bundle\n");
2780  		goto cleanup;
2781  	}
2782  
2783  	for (k = 0; k < dc_state->stream_count; k++) {
2784  		bundle->stream_update.stream = dc_state->streams[k];
2785  
2786  		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2787  			bundle->surface_updates[m].surface =
2788  				dc_state->stream_status->plane_states[m];
2789  			bundle->surface_updates[m].surface->force_full_update =
2790  				true;
2791  		}
2792  
2793  		update_planes_and_stream_adapter(dm->dc,
2794  					 UPDATE_TYPE_FULL,
2795  					 dc_state->stream_status->plane_count,
2796  					 dc_state->streams[k],
2797  					 &bundle->stream_update,
2798  					 bundle->surface_updates);
2799  	}
2800  
2801  cleanup:
2802  	kfree(bundle);
2803  }
2804  
dm_resume(void * handle)2805  static int dm_resume(void *handle)
2806  {
2807  	struct amdgpu_device *adev = handle;
2808  	struct drm_device *ddev = adev_to_drm(adev);
2809  	struct amdgpu_display_manager *dm = &adev->dm;
2810  	struct amdgpu_dm_connector *aconnector;
2811  	struct drm_connector *connector;
2812  	struct drm_connector_list_iter iter;
2813  	struct drm_crtc *crtc;
2814  	struct drm_crtc_state *new_crtc_state;
2815  	struct dm_crtc_state *dm_new_crtc_state;
2816  	struct drm_plane *plane;
2817  	struct drm_plane_state *new_plane_state;
2818  	struct dm_plane_state *dm_new_plane_state;
2819  	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2820  	enum dc_connection_type new_connection_type = dc_connection_none;
2821  	struct dc_state *dc_state;
2822  	int i, r, j, ret;
2823  	bool need_hotplug = false;
2824  
2825  	if (amdgpu_in_reset(adev)) {
2826  		dc_state = dm->cached_dc_state;
2827  
2828  		/*
2829  		 * The dc->current_state is backed up into dm->cached_dc_state
2830  		 * before we commit 0 streams.
2831  		 *
2832  		 * DC will clear link encoder assignments on the real state
2833  		 * but the changes won't propagate over to the copy we made
2834  		 * before the 0 streams commit.
2835  		 *
2836  		 * DC expects that link encoder assignments are *not* valid
2837  		 * when committing a state, so as a workaround we can copy
2838  		 * off of the current state.
2839  		 *
2840  		 * We lose the previous assignments, but we had already
2841  		 * commit 0 streams anyway.
2842  		 */
2843  		link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2844  
2845  		r = dm_dmub_hw_init(adev);
2846  		if (r)
2847  			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2848  
2849  		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2850  		dc_resume(dm->dc);
2851  
2852  		amdgpu_dm_irq_resume_early(adev);
2853  
2854  		for (i = 0; i < dc_state->stream_count; i++) {
2855  			dc_state->streams[i]->mode_changed = true;
2856  			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2857  				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2858  					= 0xffffffff;
2859  			}
2860  		}
2861  
2862  		if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2863  			amdgpu_dm_outbox_init(adev);
2864  			dc_enable_dmub_outbox(adev->dm.dc);
2865  		}
2866  
2867  		WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
2868  
2869  		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2870  
2871  		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2872  
2873  		dc_release_state(dm->cached_dc_state);
2874  		dm->cached_dc_state = NULL;
2875  
2876  		amdgpu_dm_irq_resume_late(adev);
2877  
2878  		mutex_unlock(&dm->dc_lock);
2879  
2880  		return 0;
2881  	}
2882  	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2883  	dc_release_state(dm_state->context);
2884  	dm_state->context = dc_create_state(dm->dc);
2885  	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2886  	dc_resource_state_construct(dm->dc, dm_state->context);
2887  
2888  	/* Before powering on DC we need to re-initialize DMUB. */
2889  	dm_dmub_hw_resume(adev);
2890  
2891  	/* Re-enable outbox interrupts for DPIA. */
2892  	if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2893  		amdgpu_dm_outbox_init(adev);
2894  		dc_enable_dmub_outbox(adev->dm.dc);
2895  	}
2896  
2897  	/* power on hardware */
2898  	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2899  
2900  	/* program HPD filter */
2901  	dc_resume(dm->dc);
2902  
2903  	/*
2904  	 * early enable HPD Rx IRQ, should be done before set mode as short
2905  	 * pulse interrupts are used for MST
2906  	 */
2907  	amdgpu_dm_irq_resume_early(adev);
2908  
2909  	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2910  	s3_handle_mst(ddev, false);
2911  
2912  	/* Do detection*/
2913  	drm_connector_list_iter_begin(ddev, &iter);
2914  	drm_for_each_connector_iter(connector, &iter) {
2915  		aconnector = to_amdgpu_dm_connector(connector);
2916  
2917  		if (!aconnector->dc_link)
2918  			continue;
2919  
2920  		/*
2921  		 * this is the case when traversing through already created end sink
2922  		 * MST connectors, should be skipped
2923  		 */
2924  		if (aconnector && aconnector->mst_root)
2925  			continue;
2926  
2927  		mutex_lock(&aconnector->hpd_lock);
2928  		if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
2929  			DRM_ERROR("KMS: Failed to detect connector\n");
2930  
2931  		if (aconnector->base.force && new_connection_type == dc_connection_none) {
2932  			emulated_link_detect(aconnector->dc_link);
2933  		} else {
2934  			mutex_lock(&dm->dc_lock);
2935  			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2936  			mutex_unlock(&dm->dc_lock);
2937  		}
2938  
2939  		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2940  			aconnector->fake_enable = false;
2941  
2942  		if (aconnector->dc_sink)
2943  			dc_sink_release(aconnector->dc_sink);
2944  		aconnector->dc_sink = NULL;
2945  		amdgpu_dm_update_connector_after_detect(aconnector);
2946  		mutex_unlock(&aconnector->hpd_lock);
2947  	}
2948  	drm_connector_list_iter_end(&iter);
2949  
2950  	/* Force mode set in atomic commit */
2951  	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2952  		new_crtc_state->active_changed = true;
2953  
2954  	/*
2955  	 * atomic_check is expected to create the dc states. We need to release
2956  	 * them here, since they were duplicated as part of the suspend
2957  	 * procedure.
2958  	 */
2959  	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2960  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2961  		if (dm_new_crtc_state->stream) {
2962  			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2963  			dc_stream_release(dm_new_crtc_state->stream);
2964  			dm_new_crtc_state->stream = NULL;
2965  		}
2966  	}
2967  
2968  	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2969  		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2970  		if (dm_new_plane_state->dc_state) {
2971  			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2972  			dc_plane_state_release(dm_new_plane_state->dc_state);
2973  			dm_new_plane_state->dc_state = NULL;
2974  		}
2975  	}
2976  
2977  	drm_atomic_helper_resume(ddev, dm->cached_state);
2978  
2979  	dm->cached_state = NULL;
2980  
2981  	/* Do mst topology probing after resuming cached state*/
2982  	drm_connector_list_iter_begin(ddev, &iter);
2983  	drm_for_each_connector_iter(connector, &iter) {
2984  		aconnector = to_amdgpu_dm_connector(connector);
2985  		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2986  		    aconnector->mst_root)
2987  			continue;
2988  
2989  		ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
2990  
2991  		if (ret < 0) {
2992  			dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2993  					aconnector->dc_link);
2994  			need_hotplug = true;
2995  		}
2996  	}
2997  	drm_connector_list_iter_end(&iter);
2998  
2999  	if (need_hotplug)
3000  		drm_kms_helper_hotplug_event(ddev);
3001  
3002  	amdgpu_dm_irq_resume_late(adev);
3003  
3004  	amdgpu_dm_smu_write_watermarks_table(adev);
3005  
3006  	return 0;
3007  }
3008  
3009  /**
3010   * DOC: DM Lifecycle
3011   *
3012   * DM (and consequently DC) is registered in the amdgpu base driver as a IP
3013   * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
3014   * the base driver's device list to be initialized and torn down accordingly.
3015   *
3016   * The functions to do so are provided as hooks in &struct amd_ip_funcs.
3017   */
3018  
3019  static const struct amd_ip_funcs amdgpu_dm_funcs = {
3020  	.name = "dm",
3021  	.early_init = dm_early_init,
3022  	.late_init = dm_late_init,
3023  	.sw_init = dm_sw_init,
3024  	.sw_fini = dm_sw_fini,
3025  	.early_fini = amdgpu_dm_early_fini,
3026  	.hw_init = dm_hw_init,
3027  	.hw_fini = dm_hw_fini,
3028  	.suspend = dm_suspend,
3029  	.resume = dm_resume,
3030  	.is_idle = dm_is_idle,
3031  	.wait_for_idle = dm_wait_for_idle,
3032  	.check_soft_reset = dm_check_soft_reset,
3033  	.soft_reset = dm_soft_reset,
3034  	.set_clockgating_state = dm_set_clockgating_state,
3035  	.set_powergating_state = dm_set_powergating_state,
3036  };
3037  
3038  const struct amdgpu_ip_block_version dm_ip_block = {
3039  	.type = AMD_IP_BLOCK_TYPE_DCE,
3040  	.major = 1,
3041  	.minor = 0,
3042  	.rev = 0,
3043  	.funcs = &amdgpu_dm_funcs,
3044  };
3045  
3046  
3047  /**
3048   * DOC: atomic
3049   *
3050   * *WIP*
3051   */
3052  
3053  static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
3054  	.fb_create = amdgpu_display_user_framebuffer_create,
3055  	.get_format_info = amdgpu_dm_plane_get_format_info,
3056  	.atomic_check = amdgpu_dm_atomic_check,
3057  	.atomic_commit = drm_atomic_helper_commit,
3058  };
3059  
3060  static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
3061  	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
3062  	.atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
3063  };
3064  
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)3065  static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
3066  {
3067  	struct amdgpu_dm_backlight_caps *caps;
3068  	struct drm_connector *conn_base;
3069  	struct amdgpu_device *adev;
3070  	struct drm_luminance_range_info *luminance_range;
3071  
3072  	if (aconnector->bl_idx == -1 ||
3073  	    aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
3074  		return;
3075  
3076  	conn_base = &aconnector->base;
3077  	adev = drm_to_adev(conn_base->dev);
3078  
3079  	caps = &adev->dm.backlight_caps[aconnector->bl_idx];
3080  	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
3081  	caps->aux_support = false;
3082  
3083  	if (caps->ext_caps->bits.oled == 1
3084  	    /*
3085  	     * ||
3086  	     * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
3087  	     * caps->ext_caps->bits.hdr_aux_backlight_control == 1
3088  	     */)
3089  		caps->aux_support = true;
3090  
3091  	if (amdgpu_backlight == 0)
3092  		caps->aux_support = false;
3093  	else if (amdgpu_backlight == 1)
3094  		caps->aux_support = true;
3095  
3096  	luminance_range = &conn_base->display_info.luminance_range;
3097  
3098  	if (luminance_range->max_luminance) {
3099  		caps->aux_min_input_signal = luminance_range->min_luminance;
3100  		caps->aux_max_input_signal = luminance_range->max_luminance;
3101  	} else {
3102  		caps->aux_min_input_signal = 0;
3103  		caps->aux_max_input_signal = 512;
3104  	}
3105  }
3106  
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)3107  void amdgpu_dm_update_connector_after_detect(
3108  		struct amdgpu_dm_connector *aconnector)
3109  {
3110  	struct drm_connector *connector = &aconnector->base;
3111  	struct drm_device *dev = connector->dev;
3112  	struct dc_sink *sink;
3113  
3114  	/* MST handled by drm_mst framework */
3115  	if (aconnector->mst_mgr.mst_state == true)
3116  		return;
3117  
3118  	sink = aconnector->dc_link->local_sink;
3119  	if (sink)
3120  		dc_sink_retain(sink);
3121  
3122  	/*
3123  	 * Edid mgmt connector gets first update only in mode_valid hook and then
3124  	 * the connector sink is set to either fake or physical sink depends on link status.
3125  	 * Skip if already done during boot.
3126  	 */
3127  	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3128  			&& aconnector->dc_em_sink) {
3129  
3130  		/*
3131  		 * For S3 resume with headless use eml_sink to fake stream
3132  		 * because on resume connector->sink is set to NULL
3133  		 */
3134  		mutex_lock(&dev->mode_config.mutex);
3135  
3136  		if (sink) {
3137  			if (aconnector->dc_sink) {
3138  				amdgpu_dm_update_freesync_caps(connector, NULL);
3139  				/*
3140  				 * retain and release below are used to
3141  				 * bump up refcount for sink because the link doesn't point
3142  				 * to it anymore after disconnect, so on next crtc to connector
3143  				 * reshuffle by UMD we will get into unwanted dc_sink release
3144  				 */
3145  				dc_sink_release(aconnector->dc_sink);
3146  			}
3147  			aconnector->dc_sink = sink;
3148  			dc_sink_retain(aconnector->dc_sink);
3149  			amdgpu_dm_update_freesync_caps(connector,
3150  					aconnector->edid);
3151  		} else {
3152  			amdgpu_dm_update_freesync_caps(connector, NULL);
3153  			if (!aconnector->dc_sink) {
3154  				aconnector->dc_sink = aconnector->dc_em_sink;
3155  				dc_sink_retain(aconnector->dc_sink);
3156  			}
3157  		}
3158  
3159  		mutex_unlock(&dev->mode_config.mutex);
3160  
3161  		if (sink)
3162  			dc_sink_release(sink);
3163  		return;
3164  	}
3165  
3166  	/*
3167  	 * TODO: temporary guard to look for proper fix
3168  	 * if this sink is MST sink, we should not do anything
3169  	 */
3170  	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3171  		dc_sink_release(sink);
3172  		return;
3173  	}
3174  
3175  	if (aconnector->dc_sink == sink) {
3176  		/*
3177  		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3178  		 * Do nothing!!
3179  		 */
3180  		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3181  				aconnector->connector_id);
3182  		if (sink)
3183  			dc_sink_release(sink);
3184  		return;
3185  	}
3186  
3187  	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3188  		aconnector->connector_id, aconnector->dc_sink, sink);
3189  
3190  	mutex_lock(&dev->mode_config.mutex);
3191  
3192  	/*
3193  	 * 1. Update status of the drm connector
3194  	 * 2. Send an event and let userspace tell us what to do
3195  	 */
3196  	if (sink) {
3197  		/*
3198  		 * TODO: check if we still need the S3 mode update workaround.
3199  		 * If yes, put it here.
3200  		 */
3201  		if (aconnector->dc_sink) {
3202  			amdgpu_dm_update_freesync_caps(connector, NULL);
3203  			dc_sink_release(aconnector->dc_sink);
3204  		}
3205  
3206  		aconnector->dc_sink = sink;
3207  		dc_sink_retain(aconnector->dc_sink);
3208  		if (sink->dc_edid.length == 0) {
3209  			aconnector->edid = NULL;
3210  			if (aconnector->dc_link->aux_mode) {
3211  				drm_dp_cec_unset_edid(
3212  					&aconnector->dm_dp_aux.aux);
3213  			}
3214  		} else {
3215  			aconnector->edid =
3216  				(struct edid *)sink->dc_edid.raw_edid;
3217  
3218  			if (aconnector->dc_link->aux_mode)
3219  				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3220  						    aconnector->edid);
3221  		}
3222  
3223  		if (!aconnector->timing_requested) {
3224  			aconnector->timing_requested =
3225  				kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3226  			if (!aconnector->timing_requested)
3227  				dm_error("failed to create aconnector->requested_timing\n");
3228  		}
3229  
3230  		drm_connector_update_edid_property(connector, aconnector->edid);
3231  		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3232  		update_connector_ext_caps(aconnector);
3233  	} else {
3234  		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3235  		amdgpu_dm_update_freesync_caps(connector, NULL);
3236  		drm_connector_update_edid_property(connector, NULL);
3237  		aconnector->num_modes = 0;
3238  		dc_sink_release(aconnector->dc_sink);
3239  		aconnector->dc_sink = NULL;
3240  		aconnector->edid = NULL;
3241  		kfree(aconnector->timing_requested);
3242  		aconnector->timing_requested = NULL;
3243  		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3244  		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3245  			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3246  	}
3247  
3248  	mutex_unlock(&dev->mode_config.mutex);
3249  
3250  	update_subconnector_property(aconnector);
3251  
3252  	if (sink)
3253  		dc_sink_release(sink);
3254  }
3255  
handle_hpd_irq_helper(struct amdgpu_dm_connector * aconnector)3256  static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3257  {
3258  	struct drm_connector *connector = &aconnector->base;
3259  	struct drm_device *dev = connector->dev;
3260  	enum dc_connection_type new_connection_type = dc_connection_none;
3261  	struct amdgpu_device *adev = drm_to_adev(dev);
3262  	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3263  	bool ret = false;
3264  
3265  	if (adev->dm.disable_hpd_irq)
3266  		return;
3267  
3268  	/*
3269  	 * In case of failure or MST no need to update connector status or notify the OS
3270  	 * since (for MST case) MST does this in its own context.
3271  	 */
3272  	mutex_lock(&aconnector->hpd_lock);
3273  
3274  	if (adev->dm.hdcp_workqueue) {
3275  		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3276  		dm_con_state->update_hdcp = true;
3277  	}
3278  	if (aconnector->fake_enable)
3279  		aconnector->fake_enable = false;
3280  
3281  	aconnector->timing_changed = false;
3282  
3283  	if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3284  		DRM_ERROR("KMS: Failed to detect connector\n");
3285  
3286  	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3287  		emulated_link_detect(aconnector->dc_link);
3288  
3289  		drm_modeset_lock_all(dev);
3290  		dm_restore_drm_connector_state(dev, connector);
3291  		drm_modeset_unlock_all(dev);
3292  
3293  		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3294  			drm_kms_helper_connector_hotplug_event(connector);
3295  	} else {
3296  		mutex_lock(&adev->dm.dc_lock);
3297  		ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3298  		mutex_unlock(&adev->dm.dc_lock);
3299  		if (ret) {
3300  			amdgpu_dm_update_connector_after_detect(aconnector);
3301  
3302  			drm_modeset_lock_all(dev);
3303  			dm_restore_drm_connector_state(dev, connector);
3304  			drm_modeset_unlock_all(dev);
3305  
3306  			if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3307  				drm_kms_helper_connector_hotplug_event(connector);
3308  		}
3309  	}
3310  	mutex_unlock(&aconnector->hpd_lock);
3311  
3312  }
3313  
handle_hpd_irq(void * param)3314  static void handle_hpd_irq(void *param)
3315  {
3316  	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3317  
3318  	handle_hpd_irq_helper(aconnector);
3319  
3320  }
3321  
schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue * offload_wq,union hpd_irq_data hpd_irq_data)3322  static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3323  							union hpd_irq_data hpd_irq_data)
3324  {
3325  	struct hpd_rx_irq_offload_work *offload_work =
3326  				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3327  
3328  	if (!offload_work) {
3329  		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3330  		return;
3331  	}
3332  
3333  	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3334  	offload_work->data = hpd_irq_data;
3335  	offload_work->offload_wq = offload_wq;
3336  
3337  	queue_work(offload_wq->wq, &offload_work->work);
3338  	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3339  }
3340  
handle_hpd_rx_irq(void * param)3341  static void handle_hpd_rx_irq(void *param)
3342  {
3343  	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3344  	struct drm_connector *connector = &aconnector->base;
3345  	struct drm_device *dev = connector->dev;
3346  	struct dc_link *dc_link = aconnector->dc_link;
3347  	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3348  	bool result = false;
3349  	enum dc_connection_type new_connection_type = dc_connection_none;
3350  	struct amdgpu_device *adev = drm_to_adev(dev);
3351  	union hpd_irq_data hpd_irq_data;
3352  	bool link_loss = false;
3353  	bool has_left_work = false;
3354  	int idx = dc_link->link_index;
3355  	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3356  
3357  	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3358  
3359  	if (adev->dm.disable_hpd_irq)
3360  		return;
3361  
3362  	/*
3363  	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3364  	 * conflict, after implement i2c helper, this mutex should be
3365  	 * retired.
3366  	 */
3367  	mutex_lock(&aconnector->hpd_lock);
3368  
3369  	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3370  						&link_loss, true, &has_left_work);
3371  
3372  	if (!has_left_work)
3373  		goto out;
3374  
3375  	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3376  		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3377  		goto out;
3378  	}
3379  
3380  	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3381  		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3382  			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3383  			bool skip = false;
3384  
3385  			/*
3386  			 * DOWN_REP_MSG_RDY is also handled by polling method
3387  			 * mgr->cbs->poll_hpd_irq()
3388  			 */
3389  			spin_lock(&offload_wq->offload_lock);
3390  			skip = offload_wq->is_handling_mst_msg_rdy_event;
3391  
3392  			if (!skip)
3393  				offload_wq->is_handling_mst_msg_rdy_event = true;
3394  
3395  			spin_unlock(&offload_wq->offload_lock);
3396  
3397  			if (!skip)
3398  				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3399  
3400  			goto out;
3401  		}
3402  
3403  		if (link_loss) {
3404  			bool skip = false;
3405  
3406  			spin_lock(&offload_wq->offload_lock);
3407  			skip = offload_wq->is_handling_link_loss;
3408  
3409  			if (!skip)
3410  				offload_wq->is_handling_link_loss = true;
3411  
3412  			spin_unlock(&offload_wq->offload_lock);
3413  
3414  			if (!skip)
3415  				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3416  
3417  			goto out;
3418  		}
3419  	}
3420  
3421  out:
3422  	if (result && !is_mst_root_connector) {
3423  		/* Downstream Port status changed. */
3424  		if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
3425  			DRM_ERROR("KMS: Failed to detect connector\n");
3426  
3427  		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3428  			emulated_link_detect(dc_link);
3429  
3430  			if (aconnector->fake_enable)
3431  				aconnector->fake_enable = false;
3432  
3433  			amdgpu_dm_update_connector_after_detect(aconnector);
3434  
3435  
3436  			drm_modeset_lock_all(dev);
3437  			dm_restore_drm_connector_state(dev, connector);
3438  			drm_modeset_unlock_all(dev);
3439  
3440  			drm_kms_helper_connector_hotplug_event(connector);
3441  		} else {
3442  			bool ret = false;
3443  
3444  			mutex_lock(&adev->dm.dc_lock);
3445  			ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3446  			mutex_unlock(&adev->dm.dc_lock);
3447  
3448  			if (ret) {
3449  				if (aconnector->fake_enable)
3450  					aconnector->fake_enable = false;
3451  
3452  				amdgpu_dm_update_connector_after_detect(aconnector);
3453  
3454  				drm_modeset_lock_all(dev);
3455  				dm_restore_drm_connector_state(dev, connector);
3456  				drm_modeset_unlock_all(dev);
3457  
3458  				drm_kms_helper_connector_hotplug_event(connector);
3459  			}
3460  		}
3461  	}
3462  	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3463  		if (adev->dm.hdcp_workqueue)
3464  			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3465  	}
3466  
3467  	if (dc_link->type != dc_connection_mst_branch)
3468  		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3469  
3470  	mutex_unlock(&aconnector->hpd_lock);
3471  }
3472  
register_hpd_handlers(struct amdgpu_device * adev)3473  static void register_hpd_handlers(struct amdgpu_device *adev)
3474  {
3475  	struct drm_device *dev = adev_to_drm(adev);
3476  	struct drm_connector *connector;
3477  	struct amdgpu_dm_connector *aconnector;
3478  	const struct dc_link *dc_link;
3479  	struct dc_interrupt_params int_params = {0};
3480  
3481  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3482  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3483  
3484  	list_for_each_entry(connector,
3485  			&dev->mode_config.connector_list, head)	{
3486  
3487  		aconnector = to_amdgpu_dm_connector(connector);
3488  		dc_link = aconnector->dc_link;
3489  
3490  		if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
3491  			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3492  			int_params.irq_source = dc_link->irq_source_hpd;
3493  
3494  			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3495  					handle_hpd_irq,
3496  					(void *) aconnector);
3497  		}
3498  
3499  		if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
3500  
3501  			/* Also register for DP short pulse (hpd_rx). */
3502  			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3503  			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3504  
3505  			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3506  					handle_hpd_rx_irq,
3507  					(void *) aconnector);
3508  		}
3509  
3510  		if (adev->dm.hpd_rx_offload_wq)
3511  			adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3512  				aconnector;
3513  	}
3514  }
3515  
3516  #if defined(CONFIG_DRM_AMD_DC_SI)
3517  /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)3518  static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3519  {
3520  	struct dc *dc = adev->dm.dc;
3521  	struct common_irq_params *c_irq_params;
3522  	struct dc_interrupt_params int_params = {0};
3523  	int r;
3524  	int i;
3525  	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3526  
3527  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3528  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3529  
3530  	/*
3531  	 * Actions of amdgpu_irq_add_id():
3532  	 * 1. Register a set() function with base driver.
3533  	 *    Base driver will call set() function to enable/disable an
3534  	 *    interrupt in DC hardware.
3535  	 * 2. Register amdgpu_dm_irq_handler().
3536  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3537  	 *    coming from DC hardware.
3538  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3539  	 *    for acknowledging and handling.
3540  	 */
3541  
3542  	/* Use VBLANK interrupt */
3543  	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3544  		r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
3545  		if (r) {
3546  			DRM_ERROR("Failed to add crtc irq id!\n");
3547  			return r;
3548  		}
3549  
3550  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3551  		int_params.irq_source =
3552  			dc_interrupt_to_irq_source(dc, i + 1, 0);
3553  
3554  		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3555  
3556  		c_irq_params->adev = adev;
3557  		c_irq_params->irq_src = int_params.irq_source;
3558  
3559  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3560  				dm_crtc_high_irq, c_irq_params);
3561  	}
3562  
3563  	/* Use GRPH_PFLIP interrupt */
3564  	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3565  			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3566  		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3567  		if (r) {
3568  			DRM_ERROR("Failed to add page flip irq id!\n");
3569  			return r;
3570  		}
3571  
3572  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3573  		int_params.irq_source =
3574  			dc_interrupt_to_irq_source(dc, i, 0);
3575  
3576  		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3577  
3578  		c_irq_params->adev = adev;
3579  		c_irq_params->irq_src = int_params.irq_source;
3580  
3581  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3582  				dm_pflip_high_irq, c_irq_params);
3583  
3584  	}
3585  
3586  	/* HPD */
3587  	r = amdgpu_irq_add_id(adev, client_id,
3588  			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3589  	if (r) {
3590  		DRM_ERROR("Failed to add hpd irq id!\n");
3591  		return r;
3592  	}
3593  
3594  	register_hpd_handlers(adev);
3595  
3596  	return 0;
3597  }
3598  #endif
3599  
3600  /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)3601  static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3602  {
3603  	struct dc *dc = adev->dm.dc;
3604  	struct common_irq_params *c_irq_params;
3605  	struct dc_interrupt_params int_params = {0};
3606  	int r;
3607  	int i;
3608  	unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3609  
3610  	if (adev->family >= AMDGPU_FAMILY_AI)
3611  		client_id = SOC15_IH_CLIENTID_DCE;
3612  
3613  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3614  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3615  
3616  	/*
3617  	 * Actions of amdgpu_irq_add_id():
3618  	 * 1. Register a set() function with base driver.
3619  	 *    Base driver will call set() function to enable/disable an
3620  	 *    interrupt in DC hardware.
3621  	 * 2. Register amdgpu_dm_irq_handler().
3622  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3623  	 *    coming from DC hardware.
3624  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3625  	 *    for acknowledging and handling.
3626  	 */
3627  
3628  	/* Use VBLANK interrupt */
3629  	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3630  		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3631  		if (r) {
3632  			DRM_ERROR("Failed to add crtc irq id!\n");
3633  			return r;
3634  		}
3635  
3636  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3637  		int_params.irq_source =
3638  			dc_interrupt_to_irq_source(dc, i, 0);
3639  
3640  		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3641  
3642  		c_irq_params->adev = adev;
3643  		c_irq_params->irq_src = int_params.irq_source;
3644  
3645  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3646  				dm_crtc_high_irq, c_irq_params);
3647  	}
3648  
3649  	/* Use VUPDATE interrupt */
3650  	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3651  		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3652  		if (r) {
3653  			DRM_ERROR("Failed to add vupdate irq id!\n");
3654  			return r;
3655  		}
3656  
3657  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3658  		int_params.irq_source =
3659  			dc_interrupt_to_irq_source(dc, i, 0);
3660  
3661  		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3662  
3663  		c_irq_params->adev = adev;
3664  		c_irq_params->irq_src = int_params.irq_source;
3665  
3666  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3667  				dm_vupdate_high_irq, c_irq_params);
3668  	}
3669  
3670  	/* Use GRPH_PFLIP interrupt */
3671  	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3672  			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3673  		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3674  		if (r) {
3675  			DRM_ERROR("Failed to add page flip irq id!\n");
3676  			return r;
3677  		}
3678  
3679  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3680  		int_params.irq_source =
3681  			dc_interrupt_to_irq_source(dc, i, 0);
3682  
3683  		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3684  
3685  		c_irq_params->adev = adev;
3686  		c_irq_params->irq_src = int_params.irq_source;
3687  
3688  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3689  				dm_pflip_high_irq, c_irq_params);
3690  
3691  	}
3692  
3693  	/* HPD */
3694  	r = amdgpu_irq_add_id(adev, client_id,
3695  			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3696  	if (r) {
3697  		DRM_ERROR("Failed to add hpd irq id!\n");
3698  		return r;
3699  	}
3700  
3701  	register_hpd_handlers(adev);
3702  
3703  	return 0;
3704  }
3705  
3706  /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)3707  static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3708  {
3709  	struct dc *dc = adev->dm.dc;
3710  	struct common_irq_params *c_irq_params;
3711  	struct dc_interrupt_params int_params = {0};
3712  	int r;
3713  	int i;
3714  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3715  	static const unsigned int vrtl_int_srcid[] = {
3716  		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3717  		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3718  		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3719  		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3720  		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3721  		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3722  	};
3723  #endif
3724  
3725  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3726  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3727  
3728  	/*
3729  	 * Actions of amdgpu_irq_add_id():
3730  	 * 1. Register a set() function with base driver.
3731  	 *    Base driver will call set() function to enable/disable an
3732  	 *    interrupt in DC hardware.
3733  	 * 2. Register amdgpu_dm_irq_handler().
3734  	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3735  	 *    coming from DC hardware.
3736  	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3737  	 *    for acknowledging and handling.
3738  	 */
3739  
3740  	/* Use VSTARTUP interrupt */
3741  	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3742  			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3743  			i++) {
3744  		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3745  
3746  		if (r) {
3747  			DRM_ERROR("Failed to add crtc irq id!\n");
3748  			return r;
3749  		}
3750  
3751  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3752  		int_params.irq_source =
3753  			dc_interrupt_to_irq_source(dc, i, 0);
3754  
3755  		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3756  
3757  		c_irq_params->adev = adev;
3758  		c_irq_params->irq_src = int_params.irq_source;
3759  
3760  		amdgpu_dm_irq_register_interrupt(
3761  			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3762  	}
3763  
3764  	/* Use otg vertical line interrupt */
3765  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3766  	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3767  		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3768  				vrtl_int_srcid[i], &adev->vline0_irq);
3769  
3770  		if (r) {
3771  			DRM_ERROR("Failed to add vline0 irq id!\n");
3772  			return r;
3773  		}
3774  
3775  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3776  		int_params.irq_source =
3777  			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3778  
3779  		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3780  			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3781  			break;
3782  		}
3783  
3784  		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3785  					- DC_IRQ_SOURCE_DC1_VLINE0];
3786  
3787  		c_irq_params->adev = adev;
3788  		c_irq_params->irq_src = int_params.irq_source;
3789  
3790  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3791  				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3792  	}
3793  #endif
3794  
3795  	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3796  	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3797  	 * to trigger at end of each vblank, regardless of state of the lock,
3798  	 * matching DCE behaviour.
3799  	 */
3800  	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3801  	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3802  	     i++) {
3803  		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3804  
3805  		if (r) {
3806  			DRM_ERROR("Failed to add vupdate irq id!\n");
3807  			return r;
3808  		}
3809  
3810  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3811  		int_params.irq_source =
3812  			dc_interrupt_to_irq_source(dc, i, 0);
3813  
3814  		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3815  
3816  		c_irq_params->adev = adev;
3817  		c_irq_params->irq_src = int_params.irq_source;
3818  
3819  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3820  				dm_vupdate_high_irq, c_irq_params);
3821  	}
3822  
3823  	/* Use GRPH_PFLIP interrupt */
3824  	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3825  			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3826  			i++) {
3827  		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3828  		if (r) {
3829  			DRM_ERROR("Failed to add page flip irq id!\n");
3830  			return r;
3831  		}
3832  
3833  		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3834  		int_params.irq_source =
3835  			dc_interrupt_to_irq_source(dc, i, 0);
3836  
3837  		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3838  
3839  		c_irq_params->adev = adev;
3840  		c_irq_params->irq_src = int_params.irq_source;
3841  
3842  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3843  				dm_pflip_high_irq, c_irq_params);
3844  
3845  	}
3846  
3847  	/* HPD */
3848  	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3849  			&adev->hpd_irq);
3850  	if (r) {
3851  		DRM_ERROR("Failed to add hpd irq id!\n");
3852  		return r;
3853  	}
3854  
3855  	register_hpd_handlers(adev);
3856  
3857  	return 0;
3858  }
3859  /* Register Outbox IRQ sources and initialize IRQ callbacks */
register_outbox_irq_handlers(struct amdgpu_device * adev)3860  static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3861  {
3862  	struct dc *dc = adev->dm.dc;
3863  	struct common_irq_params *c_irq_params;
3864  	struct dc_interrupt_params int_params = {0};
3865  	int r, i;
3866  
3867  	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3868  	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3869  
3870  	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3871  			&adev->dmub_outbox_irq);
3872  	if (r) {
3873  		DRM_ERROR("Failed to add outbox irq id!\n");
3874  		return r;
3875  	}
3876  
3877  	if (dc->ctx->dmub_srv) {
3878  		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3879  		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3880  		int_params.irq_source =
3881  		dc_interrupt_to_irq_source(dc, i, 0);
3882  
3883  		c_irq_params = &adev->dm.dmub_outbox_params[0];
3884  
3885  		c_irq_params->adev = adev;
3886  		c_irq_params->irq_src = int_params.irq_source;
3887  
3888  		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3889  				dm_dmub_outbox1_low_irq, c_irq_params);
3890  	}
3891  
3892  	return 0;
3893  }
3894  
3895  /*
3896   * Acquires the lock for the atomic state object and returns
3897   * the new atomic state.
3898   *
3899   * This should only be called during atomic check.
3900   */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)3901  int dm_atomic_get_state(struct drm_atomic_state *state,
3902  			struct dm_atomic_state **dm_state)
3903  {
3904  	struct drm_device *dev = state->dev;
3905  	struct amdgpu_device *adev = drm_to_adev(dev);
3906  	struct amdgpu_display_manager *dm = &adev->dm;
3907  	struct drm_private_state *priv_state;
3908  
3909  	if (*dm_state)
3910  		return 0;
3911  
3912  	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3913  	if (IS_ERR(priv_state))
3914  		return PTR_ERR(priv_state);
3915  
3916  	*dm_state = to_dm_atomic_state(priv_state);
3917  
3918  	return 0;
3919  }
3920  
3921  static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)3922  dm_atomic_get_new_state(struct drm_atomic_state *state)
3923  {
3924  	struct drm_device *dev = state->dev;
3925  	struct amdgpu_device *adev = drm_to_adev(dev);
3926  	struct amdgpu_display_manager *dm = &adev->dm;
3927  	struct drm_private_obj *obj;
3928  	struct drm_private_state *new_obj_state;
3929  	int i;
3930  
3931  	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3932  		if (obj->funcs == dm->atomic_obj.funcs)
3933  			return to_dm_atomic_state(new_obj_state);
3934  	}
3935  
3936  	return NULL;
3937  }
3938  
3939  static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)3940  dm_atomic_duplicate_state(struct drm_private_obj *obj)
3941  {
3942  	struct dm_atomic_state *old_state, *new_state;
3943  
3944  	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3945  	if (!new_state)
3946  		return NULL;
3947  
3948  	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3949  
3950  	old_state = to_dm_atomic_state(obj->state);
3951  
3952  	if (old_state && old_state->context)
3953  		new_state->context = dc_copy_state(old_state->context);
3954  
3955  	if (!new_state->context) {
3956  		kfree(new_state);
3957  		return NULL;
3958  	}
3959  
3960  	return &new_state->base;
3961  }
3962  
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3963  static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3964  				    struct drm_private_state *state)
3965  {
3966  	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3967  
3968  	if (dm_state && dm_state->context)
3969  		dc_release_state(dm_state->context);
3970  
3971  	kfree(dm_state);
3972  }
3973  
3974  static struct drm_private_state_funcs dm_atomic_state_funcs = {
3975  	.atomic_duplicate_state = dm_atomic_duplicate_state,
3976  	.atomic_destroy_state = dm_atomic_destroy_state,
3977  };
3978  
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)3979  static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3980  {
3981  	struct dm_atomic_state *state;
3982  	int r;
3983  
3984  	adev->mode_info.mode_config_initialized = true;
3985  
3986  	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3987  	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3988  
3989  	adev_to_drm(adev)->mode_config.max_width = 16384;
3990  	adev_to_drm(adev)->mode_config.max_height = 16384;
3991  
3992  	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3993  	if (adev->asic_type == CHIP_HAWAII)
3994  		/* disable prefer shadow for now due to hibernation issues */
3995  		adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3996  	else
3997  		adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3998  	/* indicates support for immediate flip */
3999  	adev_to_drm(adev)->mode_config.async_page_flip = true;
4000  
4001  	state = kzalloc(sizeof(*state), GFP_KERNEL);
4002  	if (!state)
4003  		return -ENOMEM;
4004  
4005  	state->context = dc_create_state(adev->dm.dc);
4006  	if (!state->context) {
4007  		kfree(state);
4008  		return -ENOMEM;
4009  	}
4010  
4011  	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
4012  
4013  	drm_atomic_private_obj_init(adev_to_drm(adev),
4014  				    &adev->dm.atomic_obj,
4015  				    &state->base,
4016  				    &dm_atomic_state_funcs);
4017  
4018  	r = amdgpu_display_modeset_create_props(adev);
4019  	if (r) {
4020  		dc_release_state(state->context);
4021  		kfree(state);
4022  		return r;
4023  	}
4024  
4025  	r = amdgpu_dm_audio_init(adev);
4026  	if (r) {
4027  		dc_release_state(state->context);
4028  		kfree(state);
4029  		return r;
4030  	}
4031  
4032  	return 0;
4033  }
4034  
4035  #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
4036  #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
4037  #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
4038  
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm,int bl_idx)4039  static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4040  					    int bl_idx)
4041  {
4042  #if defined(CONFIG_ACPI)
4043  	struct amdgpu_dm_backlight_caps caps;
4044  
4045  	memset(&caps, 0, sizeof(caps));
4046  
4047  	if (dm->backlight_caps[bl_idx].caps_valid)
4048  		return;
4049  
4050  	amdgpu_acpi_get_backlight_caps(&caps);
4051  	if (caps.caps_valid) {
4052  		dm->backlight_caps[bl_idx].caps_valid = true;
4053  		if (caps.aux_support)
4054  			return;
4055  		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
4056  		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
4057  	} else {
4058  		dm->backlight_caps[bl_idx].min_input_signal =
4059  				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4060  		dm->backlight_caps[bl_idx].max_input_signal =
4061  				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4062  	}
4063  #else
4064  	if (dm->backlight_caps[bl_idx].aux_support)
4065  		return;
4066  
4067  	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4068  	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4069  #endif
4070  }
4071  
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned int * min,unsigned int * max)4072  static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4073  				unsigned int *min, unsigned int *max)
4074  {
4075  	if (!caps)
4076  		return 0;
4077  
4078  	if (caps->aux_support) {
4079  		// Firmware limits are in nits, DC API wants millinits.
4080  		*max = 1000 * caps->aux_max_input_signal;
4081  		*min = 1000 * caps->aux_min_input_signal;
4082  	} else {
4083  		// Firmware limits are 8-bit, PWM control is 16-bit.
4084  		*max = 0x101 * caps->max_input_signal;
4085  		*min = 0x101 * caps->min_input_signal;
4086  	}
4087  	return 1;
4088  }
4089  
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)4090  static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4091  					uint32_t brightness)
4092  {
4093  	unsigned int min, max;
4094  
4095  	if (!get_brightness_range(caps, &min, &max))
4096  		return brightness;
4097  
4098  	// Rescale 0..255 to min..max
4099  	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4100  				       AMDGPU_MAX_BL_LEVEL);
4101  }
4102  
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)4103  static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4104  				      uint32_t brightness)
4105  {
4106  	unsigned int min, max;
4107  
4108  	if (!get_brightness_range(caps, &min, &max))
4109  		return brightness;
4110  
4111  	if (brightness < min)
4112  		return 0;
4113  	// Rescale min..max to 0..255
4114  	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4115  				 max - min);
4116  }
4117  
amdgpu_dm_backlight_set_level(struct amdgpu_display_manager * dm,int bl_idx,u32 user_brightness)4118  static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4119  					 int bl_idx,
4120  					 u32 user_brightness)
4121  {
4122  	struct amdgpu_dm_backlight_caps caps;
4123  	struct dc_link *link;
4124  	u32 brightness;
4125  	bool rc;
4126  
4127  	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4128  	caps = dm->backlight_caps[bl_idx];
4129  
4130  	dm->brightness[bl_idx] = user_brightness;
4131  	/* update scratch register */
4132  	if (bl_idx == 0)
4133  		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4134  	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4135  	link = (struct dc_link *)dm->backlight_link[bl_idx];
4136  
4137  	/* Change brightness based on AUX property */
4138  	if (caps.aux_support) {
4139  		rc = dc_link_set_backlight_level_nits(link, true, brightness,
4140  						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4141  		if (!rc)
4142  			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4143  	} else {
4144  		rc = dc_link_set_backlight_level(link, brightness, 0);
4145  		if (!rc)
4146  			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4147  	}
4148  
4149  	if (rc)
4150  		dm->actual_brightness[bl_idx] = user_brightness;
4151  }
4152  
amdgpu_dm_backlight_update_status(struct backlight_device * bd)4153  static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4154  {
4155  	struct amdgpu_display_manager *dm = bl_get_data(bd);
4156  	int i;
4157  
4158  	for (i = 0; i < dm->num_of_edps; i++) {
4159  		if (bd == dm->backlight_dev[i])
4160  			break;
4161  	}
4162  	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4163  		i = 0;
4164  	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4165  
4166  	return 0;
4167  }
4168  
amdgpu_dm_backlight_get_level(struct amdgpu_display_manager * dm,int bl_idx)4169  static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4170  					 int bl_idx)
4171  {
4172  	int ret;
4173  	struct amdgpu_dm_backlight_caps caps;
4174  	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4175  
4176  	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4177  	caps = dm->backlight_caps[bl_idx];
4178  
4179  	if (caps.aux_support) {
4180  		u32 avg, peak;
4181  		bool rc;
4182  
4183  		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4184  		if (!rc)
4185  			return dm->brightness[bl_idx];
4186  		return convert_brightness_to_user(&caps, avg);
4187  	}
4188  
4189  	ret = dc_link_get_backlight_level(link);
4190  
4191  	if (ret == DC_ERROR_UNEXPECTED)
4192  		return dm->brightness[bl_idx];
4193  
4194  	return convert_brightness_to_user(&caps, ret);
4195  }
4196  
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)4197  static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4198  {
4199  	struct amdgpu_display_manager *dm = bl_get_data(bd);
4200  	int i;
4201  
4202  	for (i = 0; i < dm->num_of_edps; i++) {
4203  		if (bd == dm->backlight_dev[i])
4204  			break;
4205  	}
4206  	if (i >= AMDGPU_DM_MAX_NUM_EDP)
4207  		i = 0;
4208  	return amdgpu_dm_backlight_get_level(dm, i);
4209  }
4210  
4211  static const struct backlight_ops amdgpu_dm_backlight_ops = {
4212  	.options = BL_CORE_SUSPENDRESUME,
4213  	.get_brightness = amdgpu_dm_backlight_get_brightness,
4214  	.update_status	= amdgpu_dm_backlight_update_status,
4215  };
4216  
4217  static void
amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector * aconnector)4218  amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
4219  {
4220  	struct drm_device *drm = aconnector->base.dev;
4221  	struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
4222  	struct backlight_properties props = { 0 };
4223  	char bl_name[16];
4224  
4225  	if (aconnector->bl_idx == -1)
4226  		return;
4227  
4228  	if (!acpi_video_backlight_use_native()) {
4229  		drm_info(drm, "Skipping amdgpu DM backlight registration\n");
4230  		/* Try registering an ACPI video backlight device instead. */
4231  		acpi_video_register_backlight();
4232  		return;
4233  	}
4234  
4235  	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4236  	props.brightness = AMDGPU_MAX_BL_LEVEL;
4237  	props.type = BACKLIGHT_RAW;
4238  
4239  	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4240  		 drm->primary->index + aconnector->bl_idx);
4241  
4242  	dm->backlight_dev[aconnector->bl_idx] =
4243  		backlight_device_register(bl_name, aconnector->base.kdev, dm,
4244  					  &amdgpu_dm_backlight_ops, &props);
4245  
4246  	if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
4247  		DRM_ERROR("DM: Backlight registration failed!\n");
4248  		dm->backlight_dev[aconnector->bl_idx] = NULL;
4249  	} else
4250  		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4251  }
4252  
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)4253  static int initialize_plane(struct amdgpu_display_manager *dm,
4254  			    struct amdgpu_mode_info *mode_info, int plane_id,
4255  			    enum drm_plane_type plane_type,
4256  			    const struct dc_plane_cap *plane_cap)
4257  {
4258  	struct drm_plane *plane;
4259  	unsigned long possible_crtcs;
4260  	int ret = 0;
4261  
4262  	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4263  	if (!plane) {
4264  		DRM_ERROR("KMS: Failed to allocate plane\n");
4265  		return -ENOMEM;
4266  	}
4267  	plane->type = plane_type;
4268  
4269  	/*
4270  	 * HACK: IGT tests expect that the primary plane for a CRTC
4271  	 * can only have one possible CRTC. Only expose support for
4272  	 * any CRTC if they're not going to be used as a primary plane
4273  	 * for a CRTC - like overlay or underlay planes.
4274  	 */
4275  	possible_crtcs = 1 << plane_id;
4276  	if (plane_id >= dm->dc->caps.max_streams)
4277  		possible_crtcs = 0xff;
4278  
4279  	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4280  
4281  	if (ret) {
4282  		DRM_ERROR("KMS: Failed to initialize plane\n");
4283  		kfree(plane);
4284  		return ret;
4285  	}
4286  
4287  	if (mode_info)
4288  		mode_info->planes[plane_id] = plane;
4289  
4290  	return ret;
4291  }
4292  
4293  
setup_backlight_device(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector)4294  static void setup_backlight_device(struct amdgpu_display_manager *dm,
4295  				   struct amdgpu_dm_connector *aconnector)
4296  {
4297  	struct dc_link *link = aconnector->dc_link;
4298  	int bl_idx = dm->num_of_edps;
4299  
4300  	if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
4301  	    link->type == dc_connection_none)
4302  		return;
4303  
4304  	if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
4305  		drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
4306  		return;
4307  	}
4308  
4309  	aconnector->bl_idx = bl_idx;
4310  
4311  	amdgpu_dm_update_backlight_caps(dm, bl_idx);
4312  	dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
4313  	dm->backlight_link[bl_idx] = link;
4314  	dm->num_of_edps++;
4315  
4316  	update_connector_ext_caps(aconnector);
4317  }
4318  
4319  static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4320  
4321  /*
4322   * In this architecture, the association
4323   * connector -> encoder -> crtc
4324   * id not really requried. The crtc and connector will hold the
4325   * display_index as an abstraction to use with DAL component
4326   *
4327   * Returns 0 on success
4328   */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)4329  static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4330  {
4331  	struct amdgpu_display_manager *dm = &adev->dm;
4332  	s32 i;
4333  	struct amdgpu_dm_connector *aconnector = NULL;
4334  	struct amdgpu_encoder *aencoder = NULL;
4335  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4336  	u32 link_cnt;
4337  	s32 primary_planes;
4338  	enum dc_connection_type new_connection_type = dc_connection_none;
4339  	const struct dc_plane_cap *plane;
4340  	bool psr_feature_enabled = false;
4341  	bool replay_feature_enabled = false;
4342  	int max_overlay = dm->dc->caps.max_slave_planes;
4343  
4344  	dm->display_indexes_num = dm->dc->caps.max_streams;
4345  	/* Update the actual used number of crtc */
4346  	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4347  
4348  	amdgpu_dm_set_irq_funcs(adev);
4349  
4350  	link_cnt = dm->dc->caps.max_links;
4351  	if (amdgpu_dm_mode_config_init(dm->adev)) {
4352  		DRM_ERROR("DM: Failed to initialize mode config\n");
4353  		return -EINVAL;
4354  	}
4355  
4356  	/* There is one primary plane per CRTC */
4357  	primary_planes = dm->dc->caps.max_streams;
4358  	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4359  
4360  	/*
4361  	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4362  	 * Order is reversed to match iteration order in atomic check.
4363  	 */
4364  	for (i = (primary_planes - 1); i >= 0; i--) {
4365  		plane = &dm->dc->caps.planes[i];
4366  
4367  		if (initialize_plane(dm, mode_info, i,
4368  				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4369  			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4370  			goto fail;
4371  		}
4372  	}
4373  
4374  	/*
4375  	 * Initialize overlay planes, index starting after primary planes.
4376  	 * These planes have a higher DRM index than the primary planes since
4377  	 * they should be considered as having a higher z-order.
4378  	 * Order is reversed to match iteration order in atomic check.
4379  	 *
4380  	 * Only support DCN for now, and only expose one so we don't encourage
4381  	 * userspace to use up all the pipes.
4382  	 */
4383  	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4384  		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4385  
4386  		/* Do not create overlay if MPO disabled */
4387  		if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4388  			break;
4389  
4390  		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4391  			continue;
4392  
4393  		if (!plane->pixel_format_support.argb8888)
4394  			continue;
4395  
4396  		if (max_overlay-- == 0)
4397  			break;
4398  
4399  		if (initialize_plane(dm, NULL, primary_planes + i,
4400  				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4401  			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4402  			goto fail;
4403  		}
4404  	}
4405  
4406  	for (i = 0; i < dm->dc->caps.max_streams; i++)
4407  		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4408  			DRM_ERROR("KMS: Failed to initialize crtc\n");
4409  			goto fail;
4410  		}
4411  
4412  	/* Use Outbox interrupt */
4413  	switch (adev->ip_versions[DCE_HWIP][0]) {
4414  	case IP_VERSION(3, 0, 0):
4415  	case IP_VERSION(3, 1, 2):
4416  	case IP_VERSION(3, 1, 3):
4417  	case IP_VERSION(3, 1, 4):
4418  	case IP_VERSION(3, 1, 5):
4419  	case IP_VERSION(3, 1, 6):
4420  	case IP_VERSION(3, 2, 0):
4421  	case IP_VERSION(3, 2, 1):
4422  	case IP_VERSION(2, 1, 0):
4423  		if (register_outbox_irq_handlers(dm->adev)) {
4424  			DRM_ERROR("DM: Failed to initialize IRQ\n");
4425  			goto fail;
4426  		}
4427  		break;
4428  	default:
4429  		DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4430  			      adev->ip_versions[DCE_HWIP][0]);
4431  	}
4432  
4433  	/* Determine whether to enable PSR support by default. */
4434  	if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4435  		switch (adev->ip_versions[DCE_HWIP][0]) {
4436  		case IP_VERSION(3, 1, 2):
4437  		case IP_VERSION(3, 1, 3):
4438  		case IP_VERSION(3, 1, 4):
4439  		case IP_VERSION(3, 1, 5):
4440  		case IP_VERSION(3, 1, 6):
4441  		case IP_VERSION(3, 2, 0):
4442  		case IP_VERSION(3, 2, 1):
4443  			psr_feature_enabled = true;
4444  			break;
4445  		default:
4446  			psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4447  			break;
4448  		}
4449  	}
4450  
4451  	if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
4452  		switch (adev->ip_versions[DCE_HWIP][0]) {
4453  		case IP_VERSION(3, 1, 4):
4454  		case IP_VERSION(3, 1, 5):
4455  		case IP_VERSION(3, 1, 6):
4456  		case IP_VERSION(3, 2, 0):
4457  		case IP_VERSION(3, 2, 1):
4458  			replay_feature_enabled = true;
4459  			break;
4460  		default:
4461  			replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
4462  			break;
4463  		}
4464  	}
4465  	/* loops over all connectors on the board */
4466  	for (i = 0; i < link_cnt; i++) {
4467  		struct dc_link *link = NULL;
4468  
4469  		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4470  			DRM_ERROR(
4471  				"KMS: Cannot support more than %d display indexes\n",
4472  					AMDGPU_DM_MAX_DISPLAY_INDEX);
4473  			continue;
4474  		}
4475  
4476  		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4477  		if (!aconnector)
4478  			goto fail;
4479  
4480  		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4481  		if (!aencoder)
4482  			goto fail;
4483  
4484  		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4485  			DRM_ERROR("KMS: Failed to initialize encoder\n");
4486  			goto fail;
4487  		}
4488  
4489  		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4490  			DRM_ERROR("KMS: Failed to initialize connector\n");
4491  			goto fail;
4492  		}
4493  
4494  		link = dc_get_link_at_index(dm->dc, i);
4495  
4496  		if (!dc_link_detect_connection_type(link, &new_connection_type))
4497  			DRM_ERROR("KMS: Failed to detect connector\n");
4498  
4499  		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4500  			emulated_link_detect(link);
4501  			amdgpu_dm_update_connector_after_detect(aconnector);
4502  		} else {
4503  			bool ret = false;
4504  
4505  			mutex_lock(&dm->dc_lock);
4506  			ret = dc_link_detect(link, DETECT_REASON_BOOT);
4507  			mutex_unlock(&dm->dc_lock);
4508  
4509  			if (ret) {
4510  				amdgpu_dm_update_connector_after_detect(aconnector);
4511  				setup_backlight_device(dm, aconnector);
4512  
4513  				/*
4514  				 * Disable psr if replay can be enabled
4515  				 */
4516  				if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
4517  					psr_feature_enabled = false;
4518  
4519  				if (psr_feature_enabled)
4520  					amdgpu_dm_set_psr_caps(link);
4521  
4522  				/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4523  				 * PSR is also supported.
4524  				 */
4525  				if (link->psr_settings.psr_feature_enabled)
4526  					adev_to_drm(adev)->vblank_disable_immediate = false;
4527  			}
4528  		}
4529  		amdgpu_set_panel_orientation(&aconnector->base);
4530  	}
4531  
4532  	/* Software is initialized. Now we can register interrupt handlers. */
4533  	switch (adev->asic_type) {
4534  #if defined(CONFIG_DRM_AMD_DC_SI)
4535  	case CHIP_TAHITI:
4536  	case CHIP_PITCAIRN:
4537  	case CHIP_VERDE:
4538  	case CHIP_OLAND:
4539  		if (dce60_register_irq_handlers(dm->adev)) {
4540  			DRM_ERROR("DM: Failed to initialize IRQ\n");
4541  			goto fail;
4542  		}
4543  		break;
4544  #endif
4545  	case CHIP_BONAIRE:
4546  	case CHIP_HAWAII:
4547  	case CHIP_KAVERI:
4548  	case CHIP_KABINI:
4549  	case CHIP_MULLINS:
4550  	case CHIP_TONGA:
4551  	case CHIP_FIJI:
4552  	case CHIP_CARRIZO:
4553  	case CHIP_STONEY:
4554  	case CHIP_POLARIS11:
4555  	case CHIP_POLARIS10:
4556  	case CHIP_POLARIS12:
4557  	case CHIP_VEGAM:
4558  	case CHIP_VEGA10:
4559  	case CHIP_VEGA12:
4560  	case CHIP_VEGA20:
4561  		if (dce110_register_irq_handlers(dm->adev)) {
4562  			DRM_ERROR("DM: Failed to initialize IRQ\n");
4563  			goto fail;
4564  		}
4565  		break;
4566  	default:
4567  		switch (adev->ip_versions[DCE_HWIP][0]) {
4568  		case IP_VERSION(1, 0, 0):
4569  		case IP_VERSION(1, 0, 1):
4570  		case IP_VERSION(2, 0, 2):
4571  		case IP_VERSION(2, 0, 3):
4572  		case IP_VERSION(2, 0, 0):
4573  		case IP_VERSION(2, 1, 0):
4574  		case IP_VERSION(3, 0, 0):
4575  		case IP_VERSION(3, 0, 2):
4576  		case IP_VERSION(3, 0, 3):
4577  		case IP_VERSION(3, 0, 1):
4578  		case IP_VERSION(3, 1, 2):
4579  		case IP_VERSION(3, 1, 3):
4580  		case IP_VERSION(3, 1, 4):
4581  		case IP_VERSION(3, 1, 5):
4582  		case IP_VERSION(3, 1, 6):
4583  		case IP_VERSION(3, 2, 0):
4584  		case IP_VERSION(3, 2, 1):
4585  			if (dcn10_register_irq_handlers(dm->adev)) {
4586  				DRM_ERROR("DM: Failed to initialize IRQ\n");
4587  				goto fail;
4588  			}
4589  			break;
4590  		default:
4591  			DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4592  					adev->ip_versions[DCE_HWIP][0]);
4593  			goto fail;
4594  		}
4595  		break;
4596  	}
4597  
4598  	return 0;
4599  fail:
4600  	kfree(aencoder);
4601  	kfree(aconnector);
4602  
4603  	return -EINVAL;
4604  }
4605  
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)4606  static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4607  {
4608  	drm_atomic_private_obj_fini(&dm->atomic_obj);
4609  }
4610  
4611  /******************************************************************************
4612   * amdgpu_display_funcs functions
4613   *****************************************************************************/
4614  
4615  /*
4616   * dm_bandwidth_update - program display watermarks
4617   *
4618   * @adev: amdgpu_device pointer
4619   *
4620   * Calculate and program the display watermarks and line buffer allocation.
4621   */
dm_bandwidth_update(struct amdgpu_device * adev)4622  static void dm_bandwidth_update(struct amdgpu_device *adev)
4623  {
4624  	/* TODO: implement later */
4625  }
4626  
4627  static const struct amdgpu_display_funcs dm_display_funcs = {
4628  	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4629  	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4630  	.backlight_set_level = NULL, /* never called for DC */
4631  	.backlight_get_level = NULL, /* never called for DC */
4632  	.hpd_sense = NULL,/* called unconditionally */
4633  	.hpd_set_polarity = NULL, /* called unconditionally */
4634  	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4635  	.page_flip_get_scanoutpos =
4636  		dm_crtc_get_scanoutpos,/* called unconditionally */
4637  	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4638  	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4639  };
4640  
4641  #if defined(CONFIG_DEBUG_KERNEL_DC)
4642  
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)4643  static ssize_t s3_debug_store(struct device *device,
4644  			      struct device_attribute *attr,
4645  			      const char *buf,
4646  			      size_t count)
4647  {
4648  	int ret;
4649  	int s3_state;
4650  	struct drm_device *drm_dev = dev_get_drvdata(device);
4651  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4652  
4653  	ret = kstrtoint(buf, 0, &s3_state);
4654  
4655  	if (ret == 0) {
4656  		if (s3_state) {
4657  			dm_resume(adev);
4658  			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4659  		} else
4660  			dm_suspend(adev);
4661  	}
4662  
4663  	return ret == 0 ? count : 0;
4664  }
4665  
4666  DEVICE_ATTR_WO(s3_debug);
4667  
4668  #endif
4669  
dm_init_microcode(struct amdgpu_device * adev)4670  static int dm_init_microcode(struct amdgpu_device *adev)
4671  {
4672  	char *fw_name_dmub;
4673  	int r;
4674  
4675  	switch (adev->ip_versions[DCE_HWIP][0]) {
4676  	case IP_VERSION(2, 1, 0):
4677  		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
4678  		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
4679  			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
4680  		break;
4681  	case IP_VERSION(3, 0, 0):
4682  		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
4683  			fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
4684  		else
4685  			fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
4686  		break;
4687  	case IP_VERSION(3, 0, 1):
4688  		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
4689  		break;
4690  	case IP_VERSION(3, 0, 2):
4691  		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
4692  		break;
4693  	case IP_VERSION(3, 0, 3):
4694  		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
4695  		break;
4696  	case IP_VERSION(3, 1, 2):
4697  	case IP_VERSION(3, 1, 3):
4698  		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
4699  		break;
4700  	case IP_VERSION(3, 1, 4):
4701  		fw_name_dmub = FIRMWARE_DCN_314_DMUB;
4702  		break;
4703  	case IP_VERSION(3, 1, 5):
4704  		fw_name_dmub = FIRMWARE_DCN_315_DMUB;
4705  		break;
4706  	case IP_VERSION(3, 1, 6):
4707  		fw_name_dmub = FIRMWARE_DCN316_DMUB;
4708  		break;
4709  	case IP_VERSION(3, 2, 0):
4710  		fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
4711  		break;
4712  	case IP_VERSION(3, 2, 1):
4713  		fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
4714  		break;
4715  	default:
4716  		/* ASIC doesn't support DMUB. */
4717  		return 0;
4718  	}
4719  	r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
4720  	if (r)
4721  		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
4722  	return r;
4723  }
4724  
dm_early_init(void * handle)4725  static int dm_early_init(void *handle)
4726  {
4727  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4728  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4729  	struct atom_context *ctx = mode_info->atom_context;
4730  	int index = GetIndexIntoMasterTable(DATA, Object_Header);
4731  	u16 data_offset;
4732  
4733  	/* if there is no object header, skip DM */
4734  	if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4735  		adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4736  		dev_info(adev->dev, "No object header, skipping DM\n");
4737  		return -ENOENT;
4738  	}
4739  
4740  	switch (adev->asic_type) {
4741  #if defined(CONFIG_DRM_AMD_DC_SI)
4742  	case CHIP_TAHITI:
4743  	case CHIP_PITCAIRN:
4744  	case CHIP_VERDE:
4745  		adev->mode_info.num_crtc = 6;
4746  		adev->mode_info.num_hpd = 6;
4747  		adev->mode_info.num_dig = 6;
4748  		break;
4749  	case CHIP_OLAND:
4750  		adev->mode_info.num_crtc = 2;
4751  		adev->mode_info.num_hpd = 2;
4752  		adev->mode_info.num_dig = 2;
4753  		break;
4754  #endif
4755  	case CHIP_BONAIRE:
4756  	case CHIP_HAWAII:
4757  		adev->mode_info.num_crtc = 6;
4758  		adev->mode_info.num_hpd = 6;
4759  		adev->mode_info.num_dig = 6;
4760  		break;
4761  	case CHIP_KAVERI:
4762  		adev->mode_info.num_crtc = 4;
4763  		adev->mode_info.num_hpd = 6;
4764  		adev->mode_info.num_dig = 7;
4765  		break;
4766  	case CHIP_KABINI:
4767  	case CHIP_MULLINS:
4768  		adev->mode_info.num_crtc = 2;
4769  		adev->mode_info.num_hpd = 6;
4770  		adev->mode_info.num_dig = 6;
4771  		break;
4772  	case CHIP_FIJI:
4773  	case CHIP_TONGA:
4774  		adev->mode_info.num_crtc = 6;
4775  		adev->mode_info.num_hpd = 6;
4776  		adev->mode_info.num_dig = 7;
4777  		break;
4778  	case CHIP_CARRIZO:
4779  		adev->mode_info.num_crtc = 3;
4780  		adev->mode_info.num_hpd = 6;
4781  		adev->mode_info.num_dig = 9;
4782  		break;
4783  	case CHIP_STONEY:
4784  		adev->mode_info.num_crtc = 2;
4785  		adev->mode_info.num_hpd = 6;
4786  		adev->mode_info.num_dig = 9;
4787  		break;
4788  	case CHIP_POLARIS11:
4789  	case CHIP_POLARIS12:
4790  		adev->mode_info.num_crtc = 5;
4791  		adev->mode_info.num_hpd = 5;
4792  		adev->mode_info.num_dig = 5;
4793  		break;
4794  	case CHIP_POLARIS10:
4795  	case CHIP_VEGAM:
4796  		adev->mode_info.num_crtc = 6;
4797  		adev->mode_info.num_hpd = 6;
4798  		adev->mode_info.num_dig = 6;
4799  		break;
4800  	case CHIP_VEGA10:
4801  	case CHIP_VEGA12:
4802  	case CHIP_VEGA20:
4803  		adev->mode_info.num_crtc = 6;
4804  		adev->mode_info.num_hpd = 6;
4805  		adev->mode_info.num_dig = 6;
4806  		break;
4807  	default:
4808  
4809  		switch (adev->ip_versions[DCE_HWIP][0]) {
4810  		case IP_VERSION(2, 0, 2):
4811  		case IP_VERSION(3, 0, 0):
4812  			adev->mode_info.num_crtc = 6;
4813  			adev->mode_info.num_hpd = 6;
4814  			adev->mode_info.num_dig = 6;
4815  			break;
4816  		case IP_VERSION(2, 0, 0):
4817  		case IP_VERSION(3, 0, 2):
4818  			adev->mode_info.num_crtc = 5;
4819  			adev->mode_info.num_hpd = 5;
4820  			adev->mode_info.num_dig = 5;
4821  			break;
4822  		case IP_VERSION(2, 0, 3):
4823  		case IP_VERSION(3, 0, 3):
4824  			adev->mode_info.num_crtc = 2;
4825  			adev->mode_info.num_hpd = 2;
4826  			adev->mode_info.num_dig = 2;
4827  			break;
4828  		case IP_VERSION(1, 0, 0):
4829  		case IP_VERSION(1, 0, 1):
4830  		case IP_VERSION(3, 0, 1):
4831  		case IP_VERSION(2, 1, 0):
4832  		case IP_VERSION(3, 1, 2):
4833  		case IP_VERSION(3, 1, 3):
4834  		case IP_VERSION(3, 1, 4):
4835  		case IP_VERSION(3, 1, 5):
4836  		case IP_VERSION(3, 1, 6):
4837  		case IP_VERSION(3, 2, 0):
4838  		case IP_VERSION(3, 2, 1):
4839  			adev->mode_info.num_crtc = 4;
4840  			adev->mode_info.num_hpd = 4;
4841  			adev->mode_info.num_dig = 4;
4842  			break;
4843  		default:
4844  			DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4845  					adev->ip_versions[DCE_HWIP][0]);
4846  			return -EINVAL;
4847  		}
4848  		break;
4849  	}
4850  
4851  	if (adev->mode_info.funcs == NULL)
4852  		adev->mode_info.funcs = &dm_display_funcs;
4853  
4854  	/*
4855  	 * Note: Do NOT change adev->audio_endpt_rreg and
4856  	 * adev->audio_endpt_wreg because they are initialised in
4857  	 * amdgpu_device_init()
4858  	 */
4859  #if defined(CONFIG_DEBUG_KERNEL_DC)
4860  	device_create_file(
4861  		adev_to_drm(adev)->dev,
4862  		&dev_attr_s3_debug);
4863  #endif
4864  	adev->dc_enabled = true;
4865  
4866  	return dm_init_microcode(adev);
4867  }
4868  
modereset_required(struct drm_crtc_state * crtc_state)4869  static bool modereset_required(struct drm_crtc_state *crtc_state)
4870  {
4871  	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4872  }
4873  
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)4874  static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4875  {
4876  	drm_encoder_cleanup(encoder);
4877  	kfree(encoder);
4878  }
4879  
4880  static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4881  	.destroy = amdgpu_dm_encoder_destroy,
4882  };
4883  
4884  static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4885  fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4886  			    const enum surface_pixel_format format,
4887  			    enum dc_color_space *color_space)
4888  {
4889  	bool full_range;
4890  
4891  	*color_space = COLOR_SPACE_SRGB;
4892  
4893  	/* DRM color properties only affect non-RGB formats. */
4894  	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4895  		return 0;
4896  
4897  	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4898  
4899  	switch (plane_state->color_encoding) {
4900  	case DRM_COLOR_YCBCR_BT601:
4901  		if (full_range)
4902  			*color_space = COLOR_SPACE_YCBCR601;
4903  		else
4904  			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
4905  		break;
4906  
4907  	case DRM_COLOR_YCBCR_BT709:
4908  		if (full_range)
4909  			*color_space = COLOR_SPACE_YCBCR709;
4910  		else
4911  			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
4912  		break;
4913  
4914  	case DRM_COLOR_YCBCR_BT2020:
4915  		if (full_range)
4916  			*color_space = COLOR_SPACE_2020_YCBCR;
4917  		else
4918  			return -EINVAL;
4919  		break;
4920  
4921  	default:
4922  		return -EINVAL;
4923  	}
4924  
4925  	return 0;
4926  }
4927  
4928  static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const u64 tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4929  fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4930  			    const struct drm_plane_state *plane_state,
4931  			    const u64 tiling_flags,
4932  			    struct dc_plane_info *plane_info,
4933  			    struct dc_plane_address *address,
4934  			    bool tmz_surface,
4935  			    bool force_disable_dcc)
4936  {
4937  	const struct drm_framebuffer *fb = plane_state->fb;
4938  	const struct amdgpu_framebuffer *afb =
4939  		to_amdgpu_framebuffer(plane_state->fb);
4940  	int ret;
4941  
4942  	memset(plane_info, 0, sizeof(*plane_info));
4943  
4944  	switch (fb->format->format) {
4945  	case DRM_FORMAT_C8:
4946  		plane_info->format =
4947  			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4948  		break;
4949  	case DRM_FORMAT_RGB565:
4950  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4951  		break;
4952  	case DRM_FORMAT_XRGB8888:
4953  	case DRM_FORMAT_ARGB8888:
4954  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4955  		break;
4956  	case DRM_FORMAT_XRGB2101010:
4957  	case DRM_FORMAT_ARGB2101010:
4958  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4959  		break;
4960  	case DRM_FORMAT_XBGR2101010:
4961  	case DRM_FORMAT_ABGR2101010:
4962  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4963  		break;
4964  	case DRM_FORMAT_XBGR8888:
4965  	case DRM_FORMAT_ABGR8888:
4966  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4967  		break;
4968  	case DRM_FORMAT_NV21:
4969  		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4970  		break;
4971  	case DRM_FORMAT_NV12:
4972  		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4973  		break;
4974  	case DRM_FORMAT_P010:
4975  		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4976  		break;
4977  	case DRM_FORMAT_XRGB16161616F:
4978  	case DRM_FORMAT_ARGB16161616F:
4979  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4980  		break;
4981  	case DRM_FORMAT_XBGR16161616F:
4982  	case DRM_FORMAT_ABGR16161616F:
4983  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4984  		break;
4985  	case DRM_FORMAT_XRGB16161616:
4986  	case DRM_FORMAT_ARGB16161616:
4987  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4988  		break;
4989  	case DRM_FORMAT_XBGR16161616:
4990  	case DRM_FORMAT_ABGR16161616:
4991  		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4992  		break;
4993  	default:
4994  		DRM_ERROR(
4995  			"Unsupported screen format %p4cc\n",
4996  			&fb->format->format);
4997  		return -EINVAL;
4998  	}
4999  
5000  	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5001  	case DRM_MODE_ROTATE_0:
5002  		plane_info->rotation = ROTATION_ANGLE_0;
5003  		break;
5004  	case DRM_MODE_ROTATE_90:
5005  		plane_info->rotation = ROTATION_ANGLE_90;
5006  		break;
5007  	case DRM_MODE_ROTATE_180:
5008  		plane_info->rotation = ROTATION_ANGLE_180;
5009  		break;
5010  	case DRM_MODE_ROTATE_270:
5011  		plane_info->rotation = ROTATION_ANGLE_270;
5012  		break;
5013  	default:
5014  		plane_info->rotation = ROTATION_ANGLE_0;
5015  		break;
5016  	}
5017  
5018  
5019  	plane_info->visible = true;
5020  	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5021  
5022  	plane_info->layer_index = plane_state->normalized_zpos;
5023  
5024  	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5025  					  &plane_info->color_space);
5026  	if (ret)
5027  		return ret;
5028  
5029  	ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
5030  					   plane_info->rotation, tiling_flags,
5031  					   &plane_info->tiling_info,
5032  					   &plane_info->plane_size,
5033  					   &plane_info->dcc, address,
5034  					   tmz_surface, force_disable_dcc);
5035  	if (ret)
5036  		return ret;
5037  
5038  	amdgpu_dm_plane_fill_blending_from_plane_state(
5039  		plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5040  		&plane_info->global_alpha, &plane_info->global_alpha_value);
5041  
5042  	return 0;
5043  }
5044  
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)5045  static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5046  				    struct dc_plane_state *dc_plane_state,
5047  				    struct drm_plane_state *plane_state,
5048  				    struct drm_crtc_state *crtc_state)
5049  {
5050  	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5051  	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5052  	struct dc_scaling_info scaling_info;
5053  	struct dc_plane_info plane_info;
5054  	int ret;
5055  	bool force_disable_dcc = false;
5056  
5057  	ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
5058  	if (ret)
5059  		return ret;
5060  
5061  	dc_plane_state->src_rect = scaling_info.src_rect;
5062  	dc_plane_state->dst_rect = scaling_info.dst_rect;
5063  	dc_plane_state->clip_rect = scaling_info.clip_rect;
5064  	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5065  
5066  	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5067  	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5068  					  afb->tiling_flags,
5069  					  &plane_info,
5070  					  &dc_plane_state->address,
5071  					  afb->tmz_surface,
5072  					  force_disable_dcc);
5073  	if (ret)
5074  		return ret;
5075  
5076  	dc_plane_state->format = plane_info.format;
5077  	dc_plane_state->color_space = plane_info.color_space;
5078  	dc_plane_state->format = plane_info.format;
5079  	dc_plane_state->plane_size = plane_info.plane_size;
5080  	dc_plane_state->rotation = plane_info.rotation;
5081  	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5082  	dc_plane_state->stereo_format = plane_info.stereo_format;
5083  	dc_plane_state->tiling_info = plane_info.tiling_info;
5084  	dc_plane_state->visible = plane_info.visible;
5085  	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5086  	dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5087  	dc_plane_state->global_alpha = plane_info.global_alpha;
5088  	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5089  	dc_plane_state->dcc = plane_info.dcc;
5090  	dc_plane_state->layer_index = plane_info.layer_index;
5091  	dc_plane_state->flip_int_enabled = true;
5092  
5093  	/*
5094  	 * Always set input transfer function, since plane state is refreshed
5095  	 * every time.
5096  	 */
5097  	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5098  	if (ret)
5099  		return ret;
5100  
5101  	return 0;
5102  }
5103  
fill_dc_dirty_rect(struct drm_plane * plane,struct rect * dirty_rect,int32_t x,s32 y,s32 width,s32 height,int * i,bool ffu)5104  static inline void fill_dc_dirty_rect(struct drm_plane *plane,
5105  				      struct rect *dirty_rect, int32_t x,
5106  				      s32 y, s32 width, s32 height,
5107  				      int *i, bool ffu)
5108  {
5109  	WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
5110  
5111  	dirty_rect->x = x;
5112  	dirty_rect->y = y;
5113  	dirty_rect->width = width;
5114  	dirty_rect->height = height;
5115  
5116  	if (ffu)
5117  		drm_dbg(plane->dev,
5118  			"[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5119  			plane->base.id, width, height);
5120  	else
5121  		drm_dbg(plane->dev,
5122  			"[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
5123  			plane->base.id, x, y, width, height);
5124  
5125  	(*i)++;
5126  }
5127  
5128  /**
5129   * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5130   *
5131   * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5132   *         remote fb
5133   * @old_plane_state: Old state of @plane
5134   * @new_plane_state: New state of @plane
5135   * @crtc_state: New state of CRTC connected to the @plane
5136   * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5137   * @dirty_regions_changed: dirty regions changed
5138   *
5139   * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5140   * (referred to as "damage clips" in DRM nomenclature) that require updating on
5141   * the eDP remote buffer. The responsibility of specifying the dirty regions is
5142   * amdgpu_dm's.
5143   *
5144   * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5145   * plane with regions that require flushing to the eDP remote buffer. In
5146   * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5147   * implicitly provide damage clips without any client support via the plane
5148   * bounds.
5149   */
fill_dc_dirty_rects(struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,struct drm_crtc_state * crtc_state,struct dc_flip_addrs * flip_addrs,bool * dirty_regions_changed)5150  static void fill_dc_dirty_rects(struct drm_plane *plane,
5151  				struct drm_plane_state *old_plane_state,
5152  				struct drm_plane_state *new_plane_state,
5153  				struct drm_crtc_state *crtc_state,
5154  				struct dc_flip_addrs *flip_addrs,
5155  				bool *dirty_regions_changed)
5156  {
5157  	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5158  	struct rect *dirty_rects = flip_addrs->dirty_rects;
5159  	u32 num_clips;
5160  	struct drm_mode_rect *clips;
5161  	bool bb_changed;
5162  	bool fb_changed;
5163  	u32 i = 0;
5164  	*dirty_regions_changed = false;
5165  
5166  	/*
5167  	 * Cursor plane has it's own dirty rect update interface. See
5168  	 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5169  	 */
5170  	if (plane->type == DRM_PLANE_TYPE_CURSOR)
5171  		return;
5172  
5173  	num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5174  	clips = drm_plane_get_damage_clips(new_plane_state);
5175  
5176  	if (!dm_crtc_state->mpo_requested) {
5177  		if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
5178  			goto ffu;
5179  
5180  		for (; flip_addrs->dirty_rect_count < num_clips; clips++)
5181  			fill_dc_dirty_rect(new_plane_state->plane,
5182  					   &dirty_rects[flip_addrs->dirty_rect_count],
5183  					   clips->x1, clips->y1,
5184  					   clips->x2 - clips->x1, clips->y2 - clips->y1,
5185  					   &flip_addrs->dirty_rect_count,
5186  					   false);
5187  		return;
5188  	}
5189  
5190  	/*
5191  	 * MPO is requested. Add entire plane bounding box to dirty rects if
5192  	 * flipped to or damaged.
5193  	 *
5194  	 * If plane is moved or resized, also add old bounding box to dirty
5195  	 * rects.
5196  	 */
5197  	fb_changed = old_plane_state->fb->base.id !=
5198  		     new_plane_state->fb->base.id;
5199  	bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5200  		      old_plane_state->crtc_y != new_plane_state->crtc_y ||
5201  		      old_plane_state->crtc_w != new_plane_state->crtc_w ||
5202  		      old_plane_state->crtc_h != new_plane_state->crtc_h);
5203  
5204  	drm_dbg(plane->dev,
5205  		"[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5206  		new_plane_state->plane->base.id,
5207  		bb_changed, fb_changed, num_clips);
5208  
5209  	*dirty_regions_changed = bb_changed;
5210  
5211  	if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
5212  		goto ffu;
5213  
5214  	if (bb_changed) {
5215  		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5216  				   new_plane_state->crtc_x,
5217  				   new_plane_state->crtc_y,
5218  				   new_plane_state->crtc_w,
5219  				   new_plane_state->crtc_h, &i, false);
5220  
5221  		/* Add old plane bounding-box if plane is moved or resized */
5222  		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5223  				   old_plane_state->crtc_x,
5224  				   old_plane_state->crtc_y,
5225  				   old_plane_state->crtc_w,
5226  				   old_plane_state->crtc_h, &i, false);
5227  	}
5228  
5229  	if (num_clips) {
5230  		for (; i < num_clips; clips++)
5231  			fill_dc_dirty_rect(new_plane_state->plane,
5232  					   &dirty_rects[i], clips->x1,
5233  					   clips->y1, clips->x2 - clips->x1,
5234  					   clips->y2 - clips->y1, &i, false);
5235  	} else if (fb_changed && !bb_changed) {
5236  		fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5237  				   new_plane_state->crtc_x,
5238  				   new_plane_state->crtc_y,
5239  				   new_plane_state->crtc_w,
5240  				   new_plane_state->crtc_h, &i, false);
5241  	}
5242  
5243  	flip_addrs->dirty_rect_count = i;
5244  	return;
5245  
5246  ffu:
5247  	fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5248  			   dm_crtc_state->base.mode.crtc_hdisplay,
5249  			   dm_crtc_state->base.mode.crtc_vdisplay,
5250  			   &flip_addrs->dirty_rect_count, true);
5251  }
5252  
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)5253  static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5254  					   const struct dm_connector_state *dm_state,
5255  					   struct dc_stream_state *stream)
5256  {
5257  	enum amdgpu_rmx_type rmx_type;
5258  
5259  	struct rect src = { 0 }; /* viewport in composition space*/
5260  	struct rect dst = { 0 }; /* stream addressable area */
5261  
5262  	/* no mode. nothing to be done */
5263  	if (!mode)
5264  		return;
5265  
5266  	/* Full screen scaling by default */
5267  	src.width = mode->hdisplay;
5268  	src.height = mode->vdisplay;
5269  	dst.width = stream->timing.h_addressable;
5270  	dst.height = stream->timing.v_addressable;
5271  
5272  	if (dm_state) {
5273  		rmx_type = dm_state->scaling;
5274  		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5275  			if (src.width * dst.height <
5276  					src.height * dst.width) {
5277  				/* height needs less upscaling/more downscaling */
5278  				dst.width = src.width *
5279  						dst.height / src.height;
5280  			} else {
5281  				/* width needs less upscaling/more downscaling */
5282  				dst.height = src.height *
5283  						dst.width / src.width;
5284  			}
5285  		} else if (rmx_type == RMX_CENTER) {
5286  			dst = src;
5287  		}
5288  
5289  		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5290  		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5291  
5292  		if (dm_state->underscan_enable) {
5293  			dst.x += dm_state->underscan_hborder / 2;
5294  			dst.y += dm_state->underscan_vborder / 2;
5295  			dst.width -= dm_state->underscan_hborder;
5296  			dst.height -= dm_state->underscan_vborder;
5297  		}
5298  	}
5299  
5300  	stream->src = src;
5301  	stream->dst = dst;
5302  
5303  	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5304  		      dst.x, dst.y, dst.width, dst.height);
5305  
5306  }
5307  
5308  static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)5309  convert_color_depth_from_display_info(const struct drm_connector *connector,
5310  				      bool is_y420, int requested_bpc)
5311  {
5312  	u8 bpc;
5313  
5314  	if (is_y420) {
5315  		bpc = 8;
5316  
5317  		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5318  		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5319  			bpc = 16;
5320  		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5321  			bpc = 12;
5322  		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5323  			bpc = 10;
5324  	} else {
5325  		bpc = (uint8_t)connector->display_info.bpc;
5326  		/* Assume 8 bpc by default if no bpc is specified. */
5327  		bpc = bpc ? bpc : 8;
5328  	}
5329  
5330  	if (requested_bpc > 0) {
5331  		/*
5332  		 * Cap display bpc based on the user requested value.
5333  		 *
5334  		 * The value for state->max_bpc may not correctly updated
5335  		 * depending on when the connector gets added to the state
5336  		 * or if this was called outside of atomic check, so it
5337  		 * can't be used directly.
5338  		 */
5339  		bpc = min_t(u8, bpc, requested_bpc);
5340  
5341  		/* Round down to the nearest even number. */
5342  		bpc = bpc - (bpc & 1);
5343  	}
5344  
5345  	switch (bpc) {
5346  	case 0:
5347  		/*
5348  		 * Temporary Work around, DRM doesn't parse color depth for
5349  		 * EDID revision before 1.4
5350  		 * TODO: Fix edid parsing
5351  		 */
5352  		return COLOR_DEPTH_888;
5353  	case 6:
5354  		return COLOR_DEPTH_666;
5355  	case 8:
5356  		return COLOR_DEPTH_888;
5357  	case 10:
5358  		return COLOR_DEPTH_101010;
5359  	case 12:
5360  		return COLOR_DEPTH_121212;
5361  	case 14:
5362  		return COLOR_DEPTH_141414;
5363  	case 16:
5364  		return COLOR_DEPTH_161616;
5365  	default:
5366  		return COLOR_DEPTH_UNDEFINED;
5367  	}
5368  }
5369  
5370  static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)5371  get_aspect_ratio(const struct drm_display_mode *mode_in)
5372  {
5373  	/* 1-1 mapping, since both enums follow the HDMI spec. */
5374  	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5375  }
5376  
5377  static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing,const struct drm_connector_state * connector_state)5378  get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
5379  		       const struct drm_connector_state *connector_state)
5380  {
5381  	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5382  
5383  	switch (connector_state->colorspace) {
5384  	case DRM_MODE_COLORIMETRY_BT601_YCC:
5385  		if (dc_crtc_timing->flags.Y_ONLY)
5386  			color_space = COLOR_SPACE_YCBCR601_LIMITED;
5387  		else
5388  			color_space = COLOR_SPACE_YCBCR601;
5389  		break;
5390  	case DRM_MODE_COLORIMETRY_BT709_YCC:
5391  		if (dc_crtc_timing->flags.Y_ONLY)
5392  			color_space = COLOR_SPACE_YCBCR709_LIMITED;
5393  		else
5394  			color_space = COLOR_SPACE_YCBCR709;
5395  		break;
5396  	case DRM_MODE_COLORIMETRY_OPRGB:
5397  		color_space = COLOR_SPACE_ADOBERGB;
5398  		break;
5399  	case DRM_MODE_COLORIMETRY_BT2020_RGB:
5400  	case DRM_MODE_COLORIMETRY_BT2020_YCC:
5401  		if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
5402  			color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
5403  		else
5404  			color_space = COLOR_SPACE_2020_YCBCR;
5405  		break;
5406  	case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
5407  	default:
5408  		if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
5409  			color_space = COLOR_SPACE_SRGB;
5410  		/*
5411  		 * 27030khz is the separation point between HDTV and SDTV
5412  		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5413  		 * respectively
5414  		 */
5415  		} else if (dc_crtc_timing->pix_clk_100hz > 270300) {
5416  			if (dc_crtc_timing->flags.Y_ONLY)
5417  				color_space =
5418  					COLOR_SPACE_YCBCR709_LIMITED;
5419  			else
5420  				color_space = COLOR_SPACE_YCBCR709;
5421  		} else {
5422  			if (dc_crtc_timing->flags.Y_ONLY)
5423  				color_space =
5424  					COLOR_SPACE_YCBCR601_LIMITED;
5425  			else
5426  				color_space = COLOR_SPACE_YCBCR601;
5427  		}
5428  		break;
5429  	}
5430  
5431  	return color_space;
5432  }
5433  
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)5434  static bool adjust_colour_depth_from_display_info(
5435  	struct dc_crtc_timing *timing_out,
5436  	const struct drm_display_info *info)
5437  {
5438  	enum dc_color_depth depth = timing_out->display_color_depth;
5439  	int normalized_clk;
5440  
5441  	do {
5442  		normalized_clk = timing_out->pix_clk_100hz / 10;
5443  		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5444  		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5445  			normalized_clk /= 2;
5446  		/* Adjusting pix clock following on HDMI spec based on colour depth */
5447  		switch (depth) {
5448  		case COLOR_DEPTH_888:
5449  			break;
5450  		case COLOR_DEPTH_101010:
5451  			normalized_clk = (normalized_clk * 30) / 24;
5452  			break;
5453  		case COLOR_DEPTH_121212:
5454  			normalized_clk = (normalized_clk * 36) / 24;
5455  			break;
5456  		case COLOR_DEPTH_161616:
5457  			normalized_clk = (normalized_clk * 48) / 24;
5458  			break;
5459  		default:
5460  			/* The above depths are the only ones valid for HDMI. */
5461  			return false;
5462  		}
5463  		if (normalized_clk <= info->max_tmds_clock) {
5464  			timing_out->display_color_depth = depth;
5465  			return true;
5466  		}
5467  	} while (--depth > COLOR_DEPTH_666);
5468  	return false;
5469  }
5470  
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)5471  static void fill_stream_properties_from_drm_display_mode(
5472  	struct dc_stream_state *stream,
5473  	const struct drm_display_mode *mode_in,
5474  	const struct drm_connector *connector,
5475  	const struct drm_connector_state *connector_state,
5476  	const struct dc_stream_state *old_stream,
5477  	int requested_bpc)
5478  {
5479  	struct dc_crtc_timing *timing_out = &stream->timing;
5480  	const struct drm_display_info *info = &connector->display_info;
5481  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5482  	struct hdmi_vendor_infoframe hv_frame;
5483  	struct hdmi_avi_infoframe avi_frame;
5484  
5485  	memset(&hv_frame, 0, sizeof(hv_frame));
5486  	memset(&avi_frame, 0, sizeof(avi_frame));
5487  
5488  	timing_out->h_border_left = 0;
5489  	timing_out->h_border_right = 0;
5490  	timing_out->v_border_top = 0;
5491  	timing_out->v_border_bottom = 0;
5492  	/* TODO: un-hardcode */
5493  	if (drm_mode_is_420_only(info, mode_in)
5494  			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5495  		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5496  	else if (drm_mode_is_420_also(info, mode_in)
5497  			&& aconnector->force_yuv420_output)
5498  		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5499  	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5500  			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5501  		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5502  	else
5503  		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5504  
5505  	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5506  	timing_out->display_color_depth = convert_color_depth_from_display_info(
5507  		connector,
5508  		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5509  		requested_bpc);
5510  	timing_out->scan_type = SCANNING_TYPE_NODATA;
5511  	timing_out->hdmi_vic = 0;
5512  
5513  	if (old_stream) {
5514  		timing_out->vic = old_stream->timing.vic;
5515  		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5516  		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5517  	} else {
5518  		timing_out->vic = drm_match_cea_mode(mode_in);
5519  		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5520  			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5521  		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5522  			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5523  	}
5524  
5525  	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5526  		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5527  		timing_out->vic = avi_frame.video_code;
5528  		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5529  		timing_out->hdmi_vic = hv_frame.vic;
5530  	}
5531  
5532  	if (is_freesync_video_mode(mode_in, aconnector)) {
5533  		timing_out->h_addressable = mode_in->hdisplay;
5534  		timing_out->h_total = mode_in->htotal;
5535  		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5536  		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5537  		timing_out->v_total = mode_in->vtotal;
5538  		timing_out->v_addressable = mode_in->vdisplay;
5539  		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5540  		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5541  		timing_out->pix_clk_100hz = mode_in->clock * 10;
5542  	} else {
5543  		timing_out->h_addressable = mode_in->crtc_hdisplay;
5544  		timing_out->h_total = mode_in->crtc_htotal;
5545  		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5546  		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5547  		timing_out->v_total = mode_in->crtc_vtotal;
5548  		timing_out->v_addressable = mode_in->crtc_vdisplay;
5549  		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5550  		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5551  		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5552  	}
5553  
5554  	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5555  
5556  	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5557  	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5558  	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5559  		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5560  		    drm_mode_is_420_also(info, mode_in) &&
5561  		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5562  			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5563  			adjust_colour_depth_from_display_info(timing_out, info);
5564  		}
5565  	}
5566  
5567  	stream->output_color_space = get_output_color_space(timing_out, connector_state);
5568  }
5569  
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)5570  static void fill_audio_info(struct audio_info *audio_info,
5571  			    const struct drm_connector *drm_connector,
5572  			    const struct dc_sink *dc_sink)
5573  {
5574  	int i = 0;
5575  	int cea_revision = 0;
5576  	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5577  
5578  	audio_info->manufacture_id = edid_caps->manufacturer_id;
5579  	audio_info->product_id = edid_caps->product_id;
5580  
5581  	cea_revision = drm_connector->display_info.cea_rev;
5582  
5583  	strscpy(audio_info->display_name,
5584  		edid_caps->display_name,
5585  		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5586  
5587  	if (cea_revision >= 3) {
5588  		audio_info->mode_count = edid_caps->audio_mode_count;
5589  
5590  		for (i = 0; i < audio_info->mode_count; ++i) {
5591  			audio_info->modes[i].format_code =
5592  					(enum audio_format_code)
5593  					(edid_caps->audio_modes[i].format_code);
5594  			audio_info->modes[i].channel_count =
5595  					edid_caps->audio_modes[i].channel_count;
5596  			audio_info->modes[i].sample_rates.all =
5597  					edid_caps->audio_modes[i].sample_rate;
5598  			audio_info->modes[i].sample_size =
5599  					edid_caps->audio_modes[i].sample_size;
5600  		}
5601  	}
5602  
5603  	audio_info->flags.all = edid_caps->speaker_flags;
5604  
5605  	/* TODO: We only check for the progressive mode, check for interlace mode too */
5606  	if (drm_connector->latency_present[0]) {
5607  		audio_info->video_latency = drm_connector->video_latency[0];
5608  		audio_info->audio_latency = drm_connector->audio_latency[0];
5609  	}
5610  
5611  	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5612  
5613  }
5614  
5615  static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)5616  copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5617  				      struct drm_display_mode *dst_mode)
5618  {
5619  	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5620  	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5621  	dst_mode->crtc_clock = src_mode->crtc_clock;
5622  	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5623  	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5624  	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5625  	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5626  	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5627  	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5628  	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5629  	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5630  	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5631  	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5632  	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5633  }
5634  
5635  static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)5636  decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5637  					const struct drm_display_mode *native_mode,
5638  					bool scale_enabled)
5639  {
5640  	if (scale_enabled) {
5641  		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5642  	} else if (native_mode->clock == drm_mode->clock &&
5643  			native_mode->htotal == drm_mode->htotal &&
5644  			native_mode->vtotal == drm_mode->vtotal) {
5645  		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5646  	} else {
5647  		/* no scaling nor amdgpu inserted, no need to patch */
5648  	}
5649  }
5650  
5651  static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)5652  create_fake_sink(struct amdgpu_dm_connector *aconnector)
5653  {
5654  	struct dc_sink_init_data sink_init_data = { 0 };
5655  	struct dc_sink *sink = NULL;
5656  
5657  	sink_init_data.link = aconnector->dc_link;
5658  	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5659  
5660  	sink = dc_sink_create(&sink_init_data);
5661  	if (!sink) {
5662  		DRM_ERROR("Failed to create sink!\n");
5663  		return NULL;
5664  	}
5665  	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5666  
5667  	return sink;
5668  }
5669  
set_multisync_trigger_params(struct dc_stream_state * stream)5670  static void set_multisync_trigger_params(
5671  		struct dc_stream_state *stream)
5672  {
5673  	struct dc_stream_state *master = NULL;
5674  
5675  	if (stream->triggered_crtc_reset.enabled) {
5676  		master = stream->triggered_crtc_reset.event_source;
5677  		stream->triggered_crtc_reset.event =
5678  			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5679  			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5680  		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5681  	}
5682  }
5683  
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)5684  static void set_master_stream(struct dc_stream_state *stream_set[],
5685  			      int stream_count)
5686  {
5687  	int j, highest_rfr = 0, master_stream = 0;
5688  
5689  	for (j = 0;  j < stream_count; j++) {
5690  		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5691  			int refresh_rate = 0;
5692  
5693  			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5694  				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5695  			if (refresh_rate > highest_rfr) {
5696  				highest_rfr = refresh_rate;
5697  				master_stream = j;
5698  			}
5699  		}
5700  	}
5701  	for (j = 0;  j < stream_count; j++) {
5702  		if (stream_set[j])
5703  			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5704  	}
5705  }
5706  
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)5707  static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5708  {
5709  	int i = 0;
5710  	struct dc_stream_state *stream;
5711  
5712  	if (context->stream_count < 2)
5713  		return;
5714  	for (i = 0; i < context->stream_count ; i++) {
5715  		if (!context->streams[i])
5716  			continue;
5717  		/*
5718  		 * TODO: add a function to read AMD VSDB bits and set
5719  		 * crtc_sync_master.multi_sync_enabled flag
5720  		 * For now it's set to false
5721  		 */
5722  	}
5723  
5724  	set_master_stream(context->streams, context->stream_count);
5725  
5726  	for (i = 0; i < context->stream_count ; i++) {
5727  		stream = context->streams[i];
5728  
5729  		if (!stream)
5730  			continue;
5731  
5732  		set_multisync_trigger_params(stream);
5733  	}
5734  }
5735  
5736  /**
5737   * DOC: FreeSync Video
5738   *
5739   * When a userspace application wants to play a video, the content follows a
5740   * standard format definition that usually specifies the FPS for that format.
5741   * The below list illustrates some video format and the expected FPS,
5742   * respectively:
5743   *
5744   * - TV/NTSC (23.976 FPS)
5745   * - Cinema (24 FPS)
5746   * - TV/PAL (25 FPS)
5747   * - TV/NTSC (29.97 FPS)
5748   * - TV/NTSC (30 FPS)
5749   * - Cinema HFR (48 FPS)
5750   * - TV/PAL (50 FPS)
5751   * - Commonly used (60 FPS)
5752   * - Multiples of 24 (48,72,96 FPS)
5753   *
5754   * The list of standards video format is not huge and can be added to the
5755   * connector modeset list beforehand. With that, userspace can leverage
5756   * FreeSync to extends the front porch in order to attain the target refresh
5757   * rate. Such a switch will happen seamlessly, without screen blanking or
5758   * reprogramming of the output in any other way. If the userspace requests a
5759   * modesetting change compatible with FreeSync modes that only differ in the
5760   * refresh rate, DC will skip the full update and avoid blink during the
5761   * transition. For example, the video player can change the modesetting from
5762   * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5763   * causing any display blink. This same concept can be applied to a mode
5764   * setting change.
5765   */
5766  static struct drm_display_mode *
get_highest_refresh_rate_mode(struct amdgpu_dm_connector * aconnector,bool use_probed_modes)5767  get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5768  		bool use_probed_modes)
5769  {
5770  	struct drm_display_mode *m, *m_pref = NULL;
5771  	u16 current_refresh, highest_refresh;
5772  	struct list_head *list_head = use_probed_modes ?
5773  		&aconnector->base.probed_modes :
5774  		&aconnector->base.modes;
5775  
5776  	if (aconnector->freesync_vid_base.clock != 0)
5777  		return &aconnector->freesync_vid_base;
5778  
5779  	/* Find the preferred mode */
5780  	list_for_each_entry(m, list_head, head) {
5781  		if (m->type & DRM_MODE_TYPE_PREFERRED) {
5782  			m_pref = m;
5783  			break;
5784  		}
5785  	}
5786  
5787  	if (!m_pref) {
5788  		/* Probably an EDID with no preferred mode. Fallback to first entry */
5789  		m_pref = list_first_entry_or_null(
5790  				&aconnector->base.modes, struct drm_display_mode, head);
5791  		if (!m_pref) {
5792  			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5793  			return NULL;
5794  		}
5795  	}
5796  
5797  	highest_refresh = drm_mode_vrefresh(m_pref);
5798  
5799  	/*
5800  	 * Find the mode with highest refresh rate with same resolution.
5801  	 * For some monitors, preferred mode is not the mode with highest
5802  	 * supported refresh rate.
5803  	 */
5804  	list_for_each_entry(m, list_head, head) {
5805  		current_refresh  = drm_mode_vrefresh(m);
5806  
5807  		if (m->hdisplay == m_pref->hdisplay &&
5808  		    m->vdisplay == m_pref->vdisplay &&
5809  		    highest_refresh < current_refresh) {
5810  			highest_refresh = current_refresh;
5811  			m_pref = m;
5812  		}
5813  	}
5814  
5815  	drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5816  	return m_pref;
5817  }
5818  
is_freesync_video_mode(const struct drm_display_mode * mode,struct amdgpu_dm_connector * aconnector)5819  static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5820  		struct amdgpu_dm_connector *aconnector)
5821  {
5822  	struct drm_display_mode *high_mode;
5823  	int timing_diff;
5824  
5825  	high_mode = get_highest_refresh_rate_mode(aconnector, false);
5826  	if (!high_mode || !mode)
5827  		return false;
5828  
5829  	timing_diff = high_mode->vtotal - mode->vtotal;
5830  
5831  	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5832  	    high_mode->hdisplay != mode->hdisplay ||
5833  	    high_mode->vdisplay != mode->vdisplay ||
5834  	    high_mode->hsync_start != mode->hsync_start ||
5835  	    high_mode->hsync_end != mode->hsync_end ||
5836  	    high_mode->htotal != mode->htotal ||
5837  	    high_mode->hskew != mode->hskew ||
5838  	    high_mode->vscan != mode->vscan ||
5839  	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
5840  	    high_mode->vsync_end - mode->vsync_end != timing_diff)
5841  		return false;
5842  	else
5843  		return true;
5844  }
5845  
update_dsc_caps(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)5846  static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5847  			    struct dc_sink *sink, struct dc_stream_state *stream,
5848  			    struct dsc_dec_dpcd_caps *dsc_caps)
5849  {
5850  	stream->timing.flags.DSC = 0;
5851  	dsc_caps->is_dsc_supported = false;
5852  
5853  	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5854  	    sink->sink_signal == SIGNAL_TYPE_EDP)) {
5855  		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5856  			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5857  			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5858  				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5859  				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5860  				dsc_caps);
5861  	}
5862  }
5863  
5864  
apply_dsc_policy_for_edp(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps,uint32_t max_dsc_target_bpp_limit_override)5865  static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5866  				    struct dc_sink *sink, struct dc_stream_state *stream,
5867  				    struct dsc_dec_dpcd_caps *dsc_caps,
5868  				    uint32_t max_dsc_target_bpp_limit_override)
5869  {
5870  	const struct dc_link_settings *verified_link_cap = NULL;
5871  	u32 link_bw_in_kbps;
5872  	u32 edp_min_bpp_x16, edp_max_bpp_x16;
5873  	struct dc *dc = sink->ctx->dc;
5874  	struct dc_dsc_bw_range bw_range = {0};
5875  	struct dc_dsc_config dsc_cfg = {0};
5876  	struct dc_dsc_config_options dsc_options = {0};
5877  
5878  	dc_dsc_get_default_config_option(dc, &dsc_options);
5879  	dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
5880  
5881  	verified_link_cap = dc_link_get_link_cap(stream->link);
5882  	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5883  	edp_min_bpp_x16 = 8 * 16;
5884  	edp_max_bpp_x16 = 8 * 16;
5885  
5886  	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5887  		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5888  
5889  	if (edp_max_bpp_x16 < edp_min_bpp_x16)
5890  		edp_min_bpp_x16 = edp_max_bpp_x16;
5891  
5892  	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5893  				dc->debug.dsc_min_slice_height_override,
5894  				edp_min_bpp_x16, edp_max_bpp_x16,
5895  				dsc_caps,
5896  				&stream->timing,
5897  				dc_link_get_highest_encoding_format(aconnector->dc_link),
5898  				&bw_range)) {
5899  
5900  		if (bw_range.max_kbps < link_bw_in_kbps) {
5901  			if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5902  					dsc_caps,
5903  					&dsc_options,
5904  					0,
5905  					&stream->timing,
5906  					dc_link_get_highest_encoding_format(aconnector->dc_link),
5907  					&dsc_cfg)) {
5908  				stream->timing.dsc_cfg = dsc_cfg;
5909  				stream->timing.flags.DSC = 1;
5910  				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5911  			}
5912  			return;
5913  		}
5914  	}
5915  
5916  	if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5917  				dsc_caps,
5918  				&dsc_options,
5919  				link_bw_in_kbps,
5920  				&stream->timing,
5921  				dc_link_get_highest_encoding_format(aconnector->dc_link),
5922  				&dsc_cfg)) {
5923  		stream->timing.dsc_cfg = dsc_cfg;
5924  		stream->timing.flags.DSC = 1;
5925  	}
5926  }
5927  
5928  
apply_dsc_policy_for_stream(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)5929  static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5930  					struct dc_sink *sink, struct dc_stream_state *stream,
5931  					struct dsc_dec_dpcd_caps *dsc_caps)
5932  {
5933  	struct drm_connector *drm_connector = &aconnector->base;
5934  	u32 link_bandwidth_kbps;
5935  	struct dc *dc = sink->ctx->dc;
5936  	u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
5937  	u32 dsc_max_supported_bw_in_kbps;
5938  	u32 max_dsc_target_bpp_limit_override =
5939  		drm_connector->display_info.max_dsc_bpp;
5940  	struct dc_dsc_config_options dsc_options = {0};
5941  
5942  	dc_dsc_get_default_config_option(dc, &dsc_options);
5943  	dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
5944  
5945  	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5946  							dc_link_get_link_cap(aconnector->dc_link));
5947  
5948  	/* Set DSC policy according to dsc_clock_en */
5949  	dc_dsc_policy_set_enable_dsc_when_not_needed(
5950  		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5951  
5952  	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5953  	    !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
5954  	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5955  
5956  		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5957  
5958  	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5959  		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5960  			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5961  						dsc_caps,
5962  						&dsc_options,
5963  						link_bandwidth_kbps,
5964  						&stream->timing,
5965  						dc_link_get_highest_encoding_format(aconnector->dc_link),
5966  						&stream->timing.dsc_cfg)) {
5967  				stream->timing.flags.DSC = 1;
5968  				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5969  			}
5970  		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5971  			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
5972  					dc_link_get_highest_encoding_format(aconnector->dc_link));
5973  			max_supported_bw_in_kbps = link_bandwidth_kbps;
5974  			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5975  
5976  			if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5977  					max_supported_bw_in_kbps > 0 &&
5978  					dsc_max_supported_bw_in_kbps > 0)
5979  				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5980  						dsc_caps,
5981  						&dsc_options,
5982  						dsc_max_supported_bw_in_kbps,
5983  						&stream->timing,
5984  						dc_link_get_highest_encoding_format(aconnector->dc_link),
5985  						&stream->timing.dsc_cfg)) {
5986  					stream->timing.flags.DSC = 1;
5987  					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5988  									 __func__, drm_connector->name);
5989  				}
5990  		}
5991  	}
5992  
5993  	/* Overwrite the stream flag if DSC is enabled through debugfs */
5994  	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5995  		stream->timing.flags.DSC = 1;
5996  
5997  	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5998  		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5999  
6000  	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6001  		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6002  
6003  	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6004  		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6005  }
6006  
6007  static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)6008  create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6009  		       const struct drm_display_mode *drm_mode,
6010  		       const struct dm_connector_state *dm_state,
6011  		       const struct dc_stream_state *old_stream,
6012  		       int requested_bpc)
6013  {
6014  	struct drm_display_mode *preferred_mode = NULL;
6015  	struct drm_connector *drm_connector;
6016  	const struct drm_connector_state *con_state = &dm_state->base;
6017  	struct dc_stream_state *stream = NULL;
6018  	struct drm_display_mode mode;
6019  	struct drm_display_mode saved_mode;
6020  	struct drm_display_mode *freesync_mode = NULL;
6021  	bool native_mode_found = false;
6022  	bool recalculate_timing = false;
6023  	bool scale = dm_state->scaling != RMX_OFF;
6024  	int mode_refresh;
6025  	int preferred_refresh = 0;
6026  	enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
6027  	struct dsc_dec_dpcd_caps dsc_caps;
6028  
6029  	struct dc_sink *sink = NULL;
6030  
6031  	drm_mode_init(&mode, drm_mode);
6032  	memset(&saved_mode, 0, sizeof(saved_mode));
6033  
6034  	if (aconnector == NULL) {
6035  		DRM_ERROR("aconnector is NULL!\n");
6036  		return stream;
6037  	}
6038  
6039  	drm_connector = &aconnector->base;
6040  
6041  	if (!aconnector->dc_sink) {
6042  		sink = create_fake_sink(aconnector);
6043  		if (!sink)
6044  			return stream;
6045  	} else {
6046  		sink = aconnector->dc_sink;
6047  		dc_sink_retain(sink);
6048  	}
6049  
6050  	stream = dc_create_stream_for_sink(sink);
6051  
6052  	if (stream == NULL) {
6053  		DRM_ERROR("Failed to create stream for sink!\n");
6054  		goto finish;
6055  	}
6056  
6057  	stream->dm_stream_context = aconnector;
6058  
6059  	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6060  		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6061  
6062  	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6063  		/* Search for preferred mode */
6064  		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6065  			native_mode_found = true;
6066  			break;
6067  		}
6068  	}
6069  	if (!native_mode_found)
6070  		preferred_mode = list_first_entry_or_null(
6071  				&aconnector->base.modes,
6072  				struct drm_display_mode,
6073  				head);
6074  
6075  	mode_refresh = drm_mode_vrefresh(&mode);
6076  
6077  	if (preferred_mode == NULL) {
6078  		/*
6079  		 * This may not be an error, the use case is when we have no
6080  		 * usermode calls to reset and set mode upon hotplug. In this
6081  		 * case, we call set mode ourselves to restore the previous mode
6082  		 * and the modelist may not be filled in time.
6083  		 */
6084  		DRM_DEBUG_DRIVER("No preferred mode found\n");
6085  	} else {
6086  		recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6087  		if (recalculate_timing) {
6088  			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6089  			drm_mode_copy(&saved_mode, &mode);
6090  			drm_mode_copy(&mode, freesync_mode);
6091  		} else {
6092  			decide_crtc_timing_for_drm_display_mode(
6093  					&mode, preferred_mode, scale);
6094  
6095  			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6096  		}
6097  	}
6098  
6099  	if (recalculate_timing)
6100  		drm_mode_set_crtcinfo(&saved_mode, 0);
6101  
6102  	/*
6103  	 * If scaling is enabled and refresh rate didn't change
6104  	 * we copy the vic and polarities of the old timings
6105  	 */
6106  	if (!scale || mode_refresh != preferred_refresh)
6107  		fill_stream_properties_from_drm_display_mode(
6108  			stream, &mode, &aconnector->base, con_state, NULL,
6109  			requested_bpc);
6110  	else
6111  		fill_stream_properties_from_drm_display_mode(
6112  			stream, &mode, &aconnector->base, con_state, old_stream,
6113  			requested_bpc);
6114  
6115  	if (aconnector->timing_changed) {
6116  		DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
6117  				__func__,
6118  				stream->timing.display_color_depth,
6119  				aconnector->timing_requested->display_color_depth);
6120  		stream->timing = *aconnector->timing_requested;
6121  	}
6122  
6123  	/* SST DSC determination policy */
6124  	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6125  	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6126  		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6127  
6128  	update_stream_scaling_settings(&mode, dm_state, stream);
6129  
6130  	fill_audio_info(
6131  		&stream->audio_info,
6132  		drm_connector,
6133  		sink);
6134  
6135  	update_stream_signal(stream, sink);
6136  
6137  	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6138  		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6139  
6140  	if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
6141  		//
6142  		// should decide stream support vsc sdp colorimetry capability
6143  		// before building vsc info packet
6144  		//
6145  		stream->use_vsc_sdp_for_colorimetry = false;
6146  		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6147  			stream->use_vsc_sdp_for_colorimetry =
6148  				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6149  		} else {
6150  			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6151  				stream->use_vsc_sdp_for_colorimetry = true;
6152  		}
6153  		if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
6154  			tf = TRANSFER_FUNC_GAMMA_22;
6155  		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
6156  		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6157  
6158  	}
6159  finish:
6160  	dc_sink_release(sink);
6161  
6162  	return stream;
6163  }
6164  
6165  static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)6166  amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6167  {
6168  	bool connected;
6169  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6170  
6171  	/*
6172  	 * Notes:
6173  	 * 1. This interface is NOT called in context of HPD irq.
6174  	 * 2. This interface *is called* in context of user-mode ioctl. Which
6175  	 * makes it a bad place for *any* MST-related activity.
6176  	 */
6177  
6178  	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6179  	    !aconnector->fake_enable)
6180  		connected = (aconnector->dc_sink != NULL);
6181  	else
6182  		connected = (aconnector->base.force == DRM_FORCE_ON ||
6183  				aconnector->base.force == DRM_FORCE_ON_DIGITAL);
6184  
6185  	update_subconnector_property(aconnector);
6186  
6187  	return (connected ? connector_status_connected :
6188  			connector_status_disconnected);
6189  }
6190  
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)6191  int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6192  					    struct drm_connector_state *connector_state,
6193  					    struct drm_property *property,
6194  					    uint64_t val)
6195  {
6196  	struct drm_device *dev = connector->dev;
6197  	struct amdgpu_device *adev = drm_to_adev(dev);
6198  	struct dm_connector_state *dm_old_state =
6199  		to_dm_connector_state(connector->state);
6200  	struct dm_connector_state *dm_new_state =
6201  		to_dm_connector_state(connector_state);
6202  
6203  	int ret = -EINVAL;
6204  
6205  	if (property == dev->mode_config.scaling_mode_property) {
6206  		enum amdgpu_rmx_type rmx_type;
6207  
6208  		switch (val) {
6209  		case DRM_MODE_SCALE_CENTER:
6210  			rmx_type = RMX_CENTER;
6211  			break;
6212  		case DRM_MODE_SCALE_ASPECT:
6213  			rmx_type = RMX_ASPECT;
6214  			break;
6215  		case DRM_MODE_SCALE_FULLSCREEN:
6216  			rmx_type = RMX_FULL;
6217  			break;
6218  		case DRM_MODE_SCALE_NONE:
6219  		default:
6220  			rmx_type = RMX_OFF;
6221  			break;
6222  		}
6223  
6224  		if (dm_old_state->scaling == rmx_type)
6225  			return 0;
6226  
6227  		dm_new_state->scaling = rmx_type;
6228  		ret = 0;
6229  	} else if (property == adev->mode_info.underscan_hborder_property) {
6230  		dm_new_state->underscan_hborder = val;
6231  		ret = 0;
6232  	} else if (property == adev->mode_info.underscan_vborder_property) {
6233  		dm_new_state->underscan_vborder = val;
6234  		ret = 0;
6235  	} else if (property == adev->mode_info.underscan_property) {
6236  		dm_new_state->underscan_enable = val;
6237  		ret = 0;
6238  	} else if (property == adev->mode_info.abm_level_property) {
6239  		dm_new_state->abm_level = val;
6240  		ret = 0;
6241  	}
6242  
6243  	return ret;
6244  }
6245  
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)6246  int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6247  					    const struct drm_connector_state *state,
6248  					    struct drm_property *property,
6249  					    uint64_t *val)
6250  {
6251  	struct drm_device *dev = connector->dev;
6252  	struct amdgpu_device *adev = drm_to_adev(dev);
6253  	struct dm_connector_state *dm_state =
6254  		to_dm_connector_state(state);
6255  	int ret = -EINVAL;
6256  
6257  	if (property == dev->mode_config.scaling_mode_property) {
6258  		switch (dm_state->scaling) {
6259  		case RMX_CENTER:
6260  			*val = DRM_MODE_SCALE_CENTER;
6261  			break;
6262  		case RMX_ASPECT:
6263  			*val = DRM_MODE_SCALE_ASPECT;
6264  			break;
6265  		case RMX_FULL:
6266  			*val = DRM_MODE_SCALE_FULLSCREEN;
6267  			break;
6268  		case RMX_OFF:
6269  		default:
6270  			*val = DRM_MODE_SCALE_NONE;
6271  			break;
6272  		}
6273  		ret = 0;
6274  	} else if (property == adev->mode_info.underscan_hborder_property) {
6275  		*val = dm_state->underscan_hborder;
6276  		ret = 0;
6277  	} else if (property == adev->mode_info.underscan_vborder_property) {
6278  		*val = dm_state->underscan_vborder;
6279  		ret = 0;
6280  	} else if (property == adev->mode_info.underscan_property) {
6281  		*val = dm_state->underscan_enable;
6282  		ret = 0;
6283  	} else if (property == adev->mode_info.abm_level_property) {
6284  		*val = dm_state->abm_level;
6285  		ret = 0;
6286  	}
6287  
6288  	return ret;
6289  }
6290  
amdgpu_dm_connector_unregister(struct drm_connector * connector)6291  static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6292  {
6293  	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6294  
6295  	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6296  }
6297  
amdgpu_dm_connector_destroy(struct drm_connector * connector)6298  static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6299  {
6300  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6301  	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6302  	struct amdgpu_display_manager *dm = &adev->dm;
6303  
6304  	/*
6305  	 * Call only if mst_mgr was initialized before since it's not done
6306  	 * for all connector types.
6307  	 */
6308  	if (aconnector->mst_mgr.dev)
6309  		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6310  
6311  	if (aconnector->bl_idx != -1) {
6312  		backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
6313  		dm->backlight_dev[aconnector->bl_idx] = NULL;
6314  	}
6315  
6316  	if (aconnector->dc_em_sink)
6317  		dc_sink_release(aconnector->dc_em_sink);
6318  	aconnector->dc_em_sink = NULL;
6319  	if (aconnector->dc_sink)
6320  		dc_sink_release(aconnector->dc_sink);
6321  	aconnector->dc_sink = NULL;
6322  
6323  	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6324  	drm_connector_unregister(connector);
6325  	drm_connector_cleanup(connector);
6326  	if (aconnector->i2c) {
6327  		i2c_del_adapter(&aconnector->i2c->base);
6328  		kfree(aconnector->i2c);
6329  	}
6330  	kfree(aconnector->dm_dp_aux.aux.name);
6331  
6332  	kfree(connector);
6333  }
6334  
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)6335  void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6336  {
6337  	struct dm_connector_state *state =
6338  		to_dm_connector_state(connector->state);
6339  
6340  	if (connector->state)
6341  		__drm_atomic_helper_connector_destroy_state(connector->state);
6342  
6343  	kfree(state);
6344  
6345  	state = kzalloc(sizeof(*state), GFP_KERNEL);
6346  
6347  	if (state) {
6348  		state->scaling = RMX_OFF;
6349  		state->underscan_enable = false;
6350  		state->underscan_hborder = 0;
6351  		state->underscan_vborder = 0;
6352  		state->base.max_requested_bpc = 8;
6353  		state->vcpi_slots = 0;
6354  		state->pbn = 0;
6355  
6356  		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6357  			state->abm_level = amdgpu_dm_abm_level;
6358  
6359  		__drm_atomic_helper_connector_reset(connector, &state->base);
6360  	}
6361  }
6362  
6363  struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)6364  amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6365  {
6366  	struct dm_connector_state *state =
6367  		to_dm_connector_state(connector->state);
6368  
6369  	struct dm_connector_state *new_state =
6370  			kmemdup(state, sizeof(*state), GFP_KERNEL);
6371  
6372  	if (!new_state)
6373  		return NULL;
6374  
6375  	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6376  
6377  	new_state->freesync_capable = state->freesync_capable;
6378  	new_state->abm_level = state->abm_level;
6379  	new_state->scaling = state->scaling;
6380  	new_state->underscan_enable = state->underscan_enable;
6381  	new_state->underscan_hborder = state->underscan_hborder;
6382  	new_state->underscan_vborder = state->underscan_vborder;
6383  	new_state->vcpi_slots = state->vcpi_slots;
6384  	new_state->pbn = state->pbn;
6385  	return &new_state->base;
6386  }
6387  
6388  static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)6389  amdgpu_dm_connector_late_register(struct drm_connector *connector)
6390  {
6391  	struct amdgpu_dm_connector *amdgpu_dm_connector =
6392  		to_amdgpu_dm_connector(connector);
6393  	int r;
6394  
6395  	amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
6396  
6397  	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6398  	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6399  		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6400  		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6401  		if (r)
6402  			return r;
6403  	}
6404  
6405  #if defined(CONFIG_DEBUG_FS)
6406  	connector_debugfs_init(amdgpu_dm_connector);
6407  #endif
6408  
6409  	return 0;
6410  }
6411  
amdgpu_dm_connector_funcs_force(struct drm_connector * connector)6412  static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
6413  {
6414  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6415  	struct dc_link *dc_link = aconnector->dc_link;
6416  	struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
6417  	struct edid *edid;
6418  
6419  	if (!connector->edid_override)
6420  		return;
6421  
6422  	drm_edid_override_connector_update(&aconnector->base);
6423  	edid = aconnector->base.edid_blob_ptr->data;
6424  	aconnector->edid = edid;
6425  
6426  	/* Update emulated (virtual) sink's EDID */
6427  	if (dc_em_sink && dc_link) {
6428  		memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
6429  		memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
6430  		dm_helpers_parse_edid_caps(
6431  			dc_link,
6432  			&dc_em_sink->dc_edid,
6433  			&dc_em_sink->edid_caps);
6434  	}
6435  }
6436  
6437  static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6438  	.reset = amdgpu_dm_connector_funcs_reset,
6439  	.detect = amdgpu_dm_connector_detect,
6440  	.fill_modes = drm_helper_probe_single_connector_modes,
6441  	.destroy = amdgpu_dm_connector_destroy,
6442  	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6443  	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6444  	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6445  	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6446  	.late_register = amdgpu_dm_connector_late_register,
6447  	.early_unregister = amdgpu_dm_connector_unregister,
6448  	.force = amdgpu_dm_connector_funcs_force
6449  };
6450  
get_modes(struct drm_connector * connector)6451  static int get_modes(struct drm_connector *connector)
6452  {
6453  	return amdgpu_dm_connector_get_modes(connector);
6454  }
6455  
create_eml_sink(struct amdgpu_dm_connector * aconnector)6456  static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6457  {
6458  	struct dc_sink_init_data init_params = {
6459  			.link = aconnector->dc_link,
6460  			.sink_signal = SIGNAL_TYPE_VIRTUAL
6461  	};
6462  	struct edid *edid;
6463  
6464  	if (!aconnector->base.edid_blob_ptr) {
6465  		/* if connector->edid_override valid, pass
6466  		 * it to edid_override to edid_blob_ptr
6467  		 */
6468  
6469  		drm_edid_override_connector_update(&aconnector->base);
6470  
6471  		if (!aconnector->base.edid_blob_ptr) {
6472  			DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6473  					aconnector->base.name);
6474  
6475  			aconnector->base.force = DRM_FORCE_OFF;
6476  			return;
6477  		}
6478  	}
6479  
6480  	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6481  
6482  	aconnector->edid = edid;
6483  
6484  	aconnector->dc_em_sink = dc_link_add_remote_sink(
6485  		aconnector->dc_link,
6486  		(uint8_t *)edid,
6487  		(edid->extensions + 1) * EDID_LENGTH,
6488  		&init_params);
6489  
6490  	if (aconnector->base.force == DRM_FORCE_ON) {
6491  		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6492  		aconnector->dc_link->local_sink :
6493  		aconnector->dc_em_sink;
6494  		dc_sink_retain(aconnector->dc_sink);
6495  	}
6496  }
6497  
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)6498  static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6499  {
6500  	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6501  
6502  	/*
6503  	 * In case of headless boot with force on for DP managed connector
6504  	 * Those settings have to be != 0 to get initial modeset
6505  	 */
6506  	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6507  		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6508  		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6509  	}
6510  
6511  	create_eml_sink(aconnector);
6512  }
6513  
dm_validate_stream_and_context(struct dc * dc,struct dc_stream_state * stream)6514  static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6515  						struct dc_stream_state *stream)
6516  {
6517  	enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6518  	struct dc_plane_state *dc_plane_state = NULL;
6519  	struct dc_state *dc_state = NULL;
6520  
6521  	if (!stream)
6522  		goto cleanup;
6523  
6524  	dc_plane_state = dc_create_plane_state(dc);
6525  	if (!dc_plane_state)
6526  		goto cleanup;
6527  
6528  	dc_state = dc_create_state(dc);
6529  	if (!dc_state)
6530  		goto cleanup;
6531  
6532  	/* populate stream to plane */
6533  	dc_plane_state->src_rect.height  = stream->src.height;
6534  	dc_plane_state->src_rect.width   = stream->src.width;
6535  	dc_plane_state->dst_rect.height  = stream->src.height;
6536  	dc_plane_state->dst_rect.width   = stream->src.width;
6537  	dc_plane_state->clip_rect.height = stream->src.height;
6538  	dc_plane_state->clip_rect.width  = stream->src.width;
6539  	dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6540  	dc_plane_state->plane_size.surface_size.height = stream->src.height;
6541  	dc_plane_state->plane_size.surface_size.width  = stream->src.width;
6542  	dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
6543  	dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
6544  	dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6545  	dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6546  	dc_plane_state->rotation = ROTATION_ANGLE_0;
6547  	dc_plane_state->is_tiling_rotated = false;
6548  	dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6549  
6550  	dc_result = dc_validate_stream(dc, stream);
6551  	if (dc_result == DC_OK)
6552  		dc_result = dc_validate_plane(dc, dc_plane_state);
6553  
6554  	if (dc_result == DC_OK)
6555  		dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6556  
6557  	if (dc_result == DC_OK && !dc_add_plane_to_context(
6558  						dc,
6559  						stream,
6560  						dc_plane_state,
6561  						dc_state))
6562  		dc_result = DC_FAIL_ATTACH_SURFACES;
6563  
6564  	if (dc_result == DC_OK)
6565  		dc_result = dc_validate_global_state(dc, dc_state, true);
6566  
6567  cleanup:
6568  	if (dc_state)
6569  		dc_release_state(dc_state);
6570  
6571  	if (dc_plane_state)
6572  		dc_plane_state_release(dc_plane_state);
6573  
6574  	return dc_result;
6575  }
6576  
6577  struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)6578  create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6579  				const struct drm_display_mode *drm_mode,
6580  				const struct dm_connector_state *dm_state,
6581  				const struct dc_stream_state *old_stream)
6582  {
6583  	struct drm_connector *connector = &aconnector->base;
6584  	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6585  	struct dc_stream_state *stream;
6586  	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6587  	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6588  	enum dc_status dc_result = DC_OK;
6589  
6590  	do {
6591  		stream = create_stream_for_sink(aconnector, drm_mode,
6592  						dm_state, old_stream,
6593  						requested_bpc);
6594  		if (stream == NULL) {
6595  			DRM_ERROR("Failed to create stream for sink!\n");
6596  			break;
6597  		}
6598  
6599  		dc_result = dc_validate_stream(adev->dm.dc, stream);
6600  		if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6601  			dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6602  
6603  		if (dc_result == DC_OK)
6604  			dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6605  
6606  		if (dc_result != DC_OK) {
6607  			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6608  				      drm_mode->hdisplay,
6609  				      drm_mode->vdisplay,
6610  				      drm_mode->clock,
6611  				      dc_result,
6612  				      dc_status_to_str(dc_result));
6613  
6614  			dc_stream_release(stream);
6615  			stream = NULL;
6616  			requested_bpc -= 2; /* lower bpc to retry validation */
6617  		}
6618  
6619  	} while (stream == NULL && requested_bpc >= 6);
6620  
6621  	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6622  		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6623  
6624  		aconnector->force_yuv420_output = true;
6625  		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6626  						dm_state, old_stream);
6627  		aconnector->force_yuv420_output = false;
6628  	}
6629  
6630  	return stream;
6631  }
6632  
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)6633  enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6634  				   struct drm_display_mode *mode)
6635  {
6636  	int result = MODE_ERROR;
6637  	struct dc_sink *dc_sink;
6638  	/* TODO: Unhardcode stream count */
6639  	struct dc_stream_state *stream;
6640  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6641  
6642  	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6643  			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6644  		return result;
6645  
6646  	/*
6647  	 * Only run this the first time mode_valid is called to initilialize
6648  	 * EDID mgmt
6649  	 */
6650  	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6651  		!aconnector->dc_em_sink)
6652  		handle_edid_mgmt(aconnector);
6653  
6654  	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6655  
6656  	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6657  				aconnector->base.force != DRM_FORCE_ON) {
6658  		DRM_ERROR("dc_sink is NULL!\n");
6659  		goto fail;
6660  	}
6661  
6662  	drm_mode_set_crtcinfo(mode, 0);
6663  
6664  	stream = create_validate_stream_for_sink(aconnector, mode,
6665  						 to_dm_connector_state(connector->state),
6666  						 NULL);
6667  	if (stream) {
6668  		dc_stream_release(stream);
6669  		result = MODE_OK;
6670  	}
6671  
6672  fail:
6673  	/* TODO: error handling*/
6674  	return result;
6675  }
6676  
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)6677  static int fill_hdr_info_packet(const struct drm_connector_state *state,
6678  				struct dc_info_packet *out)
6679  {
6680  	struct hdmi_drm_infoframe frame;
6681  	unsigned char buf[30]; /* 26 + 4 */
6682  	ssize_t len;
6683  	int ret, i;
6684  
6685  	memset(out, 0, sizeof(*out));
6686  
6687  	if (!state->hdr_output_metadata)
6688  		return 0;
6689  
6690  	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6691  	if (ret)
6692  		return ret;
6693  
6694  	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6695  	if (len < 0)
6696  		return (int)len;
6697  
6698  	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6699  	if (len != 30)
6700  		return -EINVAL;
6701  
6702  	/* Prepare the infopacket for DC. */
6703  	switch (state->connector->connector_type) {
6704  	case DRM_MODE_CONNECTOR_HDMIA:
6705  		out->hb0 = 0x87; /* type */
6706  		out->hb1 = 0x01; /* version */
6707  		out->hb2 = 0x1A; /* length */
6708  		out->sb[0] = buf[3]; /* checksum */
6709  		i = 1;
6710  		break;
6711  
6712  	case DRM_MODE_CONNECTOR_DisplayPort:
6713  	case DRM_MODE_CONNECTOR_eDP:
6714  		out->hb0 = 0x00; /* sdp id, zero */
6715  		out->hb1 = 0x87; /* type */
6716  		out->hb2 = 0x1D; /* payload len - 1 */
6717  		out->hb3 = (0x13 << 2); /* sdp version */
6718  		out->sb[0] = 0x01; /* version */
6719  		out->sb[1] = 0x1A; /* length */
6720  		i = 2;
6721  		break;
6722  
6723  	default:
6724  		return -EINVAL;
6725  	}
6726  
6727  	memcpy(&out->sb[i], &buf[4], 26);
6728  	out->valid = true;
6729  
6730  	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6731  		       sizeof(out->sb), false);
6732  
6733  	return 0;
6734  }
6735  
6736  static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)6737  amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6738  				 struct drm_atomic_state *state)
6739  {
6740  	struct drm_connector_state *new_con_state =
6741  		drm_atomic_get_new_connector_state(state, conn);
6742  	struct drm_connector_state *old_con_state =
6743  		drm_atomic_get_old_connector_state(state, conn);
6744  	struct drm_crtc *crtc = new_con_state->crtc;
6745  	struct drm_crtc_state *new_crtc_state;
6746  	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6747  	int ret;
6748  
6749  	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6750  
6751  	if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6752  		ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6753  		if (ret < 0)
6754  			return ret;
6755  	}
6756  
6757  	if (!crtc)
6758  		return 0;
6759  
6760  	if (new_con_state->colorspace != old_con_state->colorspace) {
6761  		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6762  		if (IS_ERR(new_crtc_state))
6763  			return PTR_ERR(new_crtc_state);
6764  
6765  		new_crtc_state->mode_changed = true;
6766  	}
6767  
6768  	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6769  		struct dc_info_packet hdr_infopacket;
6770  
6771  		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6772  		if (ret)
6773  			return ret;
6774  
6775  		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6776  		if (IS_ERR(new_crtc_state))
6777  			return PTR_ERR(new_crtc_state);
6778  
6779  		/*
6780  		 * DC considers the stream backends changed if the
6781  		 * static metadata changes. Forcing the modeset also
6782  		 * gives a simple way for userspace to switch from
6783  		 * 8bpc to 10bpc when setting the metadata to enter
6784  		 * or exit HDR.
6785  		 *
6786  		 * Changing the static metadata after it's been
6787  		 * set is permissible, however. So only force a
6788  		 * modeset if we're entering or exiting HDR.
6789  		 */
6790  		new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
6791  			!old_con_state->hdr_output_metadata ||
6792  			!new_con_state->hdr_output_metadata;
6793  	}
6794  
6795  	return 0;
6796  }
6797  
6798  static const struct drm_connector_helper_funcs
6799  amdgpu_dm_connector_helper_funcs = {
6800  	/*
6801  	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6802  	 * modes will be filtered by drm_mode_validate_size(), and those modes
6803  	 * are missing after user start lightdm. So we need to renew modes list.
6804  	 * in get_modes call back, not just return the modes count
6805  	 */
6806  	.get_modes = get_modes,
6807  	.mode_valid = amdgpu_dm_connector_mode_valid,
6808  	.atomic_check = amdgpu_dm_connector_atomic_check,
6809  };
6810  
dm_encoder_helper_disable(struct drm_encoder * encoder)6811  static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6812  {
6813  
6814  }
6815  
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)6816  int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6817  {
6818  	switch (display_color_depth) {
6819  	case COLOR_DEPTH_666:
6820  		return 6;
6821  	case COLOR_DEPTH_888:
6822  		return 8;
6823  	case COLOR_DEPTH_101010:
6824  		return 10;
6825  	case COLOR_DEPTH_121212:
6826  		return 12;
6827  	case COLOR_DEPTH_141414:
6828  		return 14;
6829  	case COLOR_DEPTH_161616:
6830  		return 16;
6831  	default:
6832  		break;
6833  	}
6834  	return 0;
6835  }
6836  
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)6837  static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6838  					  struct drm_crtc_state *crtc_state,
6839  					  struct drm_connector_state *conn_state)
6840  {
6841  	struct drm_atomic_state *state = crtc_state->state;
6842  	struct drm_connector *connector = conn_state->connector;
6843  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6844  	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6845  	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6846  	struct drm_dp_mst_topology_mgr *mst_mgr;
6847  	struct drm_dp_mst_port *mst_port;
6848  	struct drm_dp_mst_topology_state *mst_state;
6849  	enum dc_color_depth color_depth;
6850  	int clock, bpp = 0;
6851  	bool is_y420 = false;
6852  
6853  	if (!aconnector->mst_output_port)
6854  		return 0;
6855  
6856  	mst_port = aconnector->mst_output_port;
6857  	mst_mgr = &aconnector->mst_root->mst_mgr;
6858  
6859  	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6860  		return 0;
6861  
6862  	mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6863  	if (IS_ERR(mst_state))
6864  		return PTR_ERR(mst_state);
6865  
6866  	if (!mst_state->pbn_div)
6867  		mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
6868  
6869  	if (!state->duplicated) {
6870  		int max_bpc = conn_state->max_requested_bpc;
6871  
6872  		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6873  			  aconnector->force_yuv420_output;
6874  		color_depth = convert_color_depth_from_display_info(connector,
6875  								    is_y420,
6876  								    max_bpc);
6877  		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6878  		clock = adjusted_mode->clock;
6879  		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6880  	}
6881  
6882  	dm_new_connector_state->vcpi_slots =
6883  		drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
6884  					      dm_new_connector_state->pbn);
6885  	if (dm_new_connector_state->vcpi_slots < 0) {
6886  		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6887  		return dm_new_connector_state->vcpi_slots;
6888  	}
6889  	return 0;
6890  }
6891  
6892  const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6893  	.disable = dm_encoder_helper_disable,
6894  	.atomic_check = dm_encoder_helper_atomic_check
6895  };
6896  
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)6897  static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6898  					    struct dc_state *dc_state,
6899  					    struct dsc_mst_fairness_vars *vars)
6900  {
6901  	struct dc_stream_state *stream = NULL;
6902  	struct drm_connector *connector;
6903  	struct drm_connector_state *new_con_state;
6904  	struct amdgpu_dm_connector *aconnector;
6905  	struct dm_connector_state *dm_conn_state;
6906  	int i, j, ret;
6907  	int vcpi, pbn_div, pbn, slot_num = 0;
6908  
6909  	for_each_new_connector_in_state(state, connector, new_con_state, i) {
6910  
6911  		aconnector = to_amdgpu_dm_connector(connector);
6912  
6913  		if (!aconnector->mst_output_port)
6914  			continue;
6915  
6916  		if (!new_con_state || !new_con_state->crtc)
6917  			continue;
6918  
6919  		dm_conn_state = to_dm_connector_state(new_con_state);
6920  
6921  		for (j = 0; j < dc_state->stream_count; j++) {
6922  			stream = dc_state->streams[j];
6923  			if (!stream)
6924  				continue;
6925  
6926  			if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6927  				break;
6928  
6929  			stream = NULL;
6930  		}
6931  
6932  		if (!stream)
6933  			continue;
6934  
6935  		pbn_div = dm_mst_get_pbn_divider(stream->link);
6936  		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
6937  		for (j = 0; j < dc_state->stream_count; j++) {
6938  			if (vars[j].aconnector == aconnector) {
6939  				pbn = vars[j].pbn;
6940  				break;
6941  			}
6942  		}
6943  
6944  		if (j == dc_state->stream_count)
6945  			continue;
6946  
6947  		slot_num = DIV_ROUND_UP(pbn, pbn_div);
6948  
6949  		if (stream->timing.flags.DSC != 1) {
6950  			dm_conn_state->pbn = pbn;
6951  			dm_conn_state->vcpi_slots = slot_num;
6952  
6953  			ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
6954  							   dm_conn_state->pbn, false);
6955  			if (ret < 0)
6956  				return ret;
6957  
6958  			continue;
6959  		}
6960  
6961  		vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
6962  		if (vcpi < 0)
6963  			return vcpi;
6964  
6965  		dm_conn_state->pbn = pbn;
6966  		dm_conn_state->vcpi_slots = vcpi;
6967  	}
6968  	return 0;
6969  }
6970  
to_drm_connector_type(enum signal_type st)6971  static int to_drm_connector_type(enum signal_type st)
6972  {
6973  	switch (st) {
6974  	case SIGNAL_TYPE_HDMI_TYPE_A:
6975  		return DRM_MODE_CONNECTOR_HDMIA;
6976  	case SIGNAL_TYPE_EDP:
6977  		return DRM_MODE_CONNECTOR_eDP;
6978  	case SIGNAL_TYPE_LVDS:
6979  		return DRM_MODE_CONNECTOR_LVDS;
6980  	case SIGNAL_TYPE_RGB:
6981  		return DRM_MODE_CONNECTOR_VGA;
6982  	case SIGNAL_TYPE_DISPLAY_PORT:
6983  	case SIGNAL_TYPE_DISPLAY_PORT_MST:
6984  		return DRM_MODE_CONNECTOR_DisplayPort;
6985  	case SIGNAL_TYPE_DVI_DUAL_LINK:
6986  	case SIGNAL_TYPE_DVI_SINGLE_LINK:
6987  		return DRM_MODE_CONNECTOR_DVID;
6988  	case SIGNAL_TYPE_VIRTUAL:
6989  		return DRM_MODE_CONNECTOR_VIRTUAL;
6990  
6991  	default:
6992  		return DRM_MODE_CONNECTOR_Unknown;
6993  	}
6994  }
6995  
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6996  static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6997  {
6998  	struct drm_encoder *encoder;
6999  
7000  	/* There is only one encoder per connector */
7001  	drm_connector_for_each_possible_encoder(connector, encoder)
7002  		return encoder;
7003  
7004  	return NULL;
7005  }
7006  
amdgpu_dm_get_native_mode(struct drm_connector * connector)7007  static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7008  {
7009  	struct drm_encoder *encoder;
7010  	struct amdgpu_encoder *amdgpu_encoder;
7011  
7012  	encoder = amdgpu_dm_connector_to_encoder(connector);
7013  
7014  	if (encoder == NULL)
7015  		return;
7016  
7017  	amdgpu_encoder = to_amdgpu_encoder(encoder);
7018  
7019  	amdgpu_encoder->native_mode.clock = 0;
7020  
7021  	if (!list_empty(&connector->probed_modes)) {
7022  		struct drm_display_mode *preferred_mode = NULL;
7023  
7024  		list_for_each_entry(preferred_mode,
7025  				    &connector->probed_modes,
7026  				    head) {
7027  			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7028  				amdgpu_encoder->native_mode = *preferred_mode;
7029  
7030  			break;
7031  		}
7032  
7033  	}
7034  }
7035  
7036  static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)7037  amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7038  			     char *name,
7039  			     int hdisplay, int vdisplay)
7040  {
7041  	struct drm_device *dev = encoder->dev;
7042  	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7043  	struct drm_display_mode *mode = NULL;
7044  	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7045  
7046  	mode = drm_mode_duplicate(dev, native_mode);
7047  
7048  	if (mode == NULL)
7049  		return NULL;
7050  
7051  	mode->hdisplay = hdisplay;
7052  	mode->vdisplay = vdisplay;
7053  	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7054  	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7055  
7056  	return mode;
7057  
7058  }
7059  
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)7060  static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7061  						 struct drm_connector *connector)
7062  {
7063  	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7064  	struct drm_display_mode *mode = NULL;
7065  	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7066  	struct amdgpu_dm_connector *amdgpu_dm_connector =
7067  				to_amdgpu_dm_connector(connector);
7068  	int i;
7069  	int n;
7070  	struct mode_size {
7071  		char name[DRM_DISPLAY_MODE_LEN];
7072  		int w;
7073  		int h;
7074  	} common_modes[] = {
7075  		{  "640x480",  640,  480},
7076  		{  "800x600",  800,  600},
7077  		{ "1024x768", 1024,  768},
7078  		{ "1280x720", 1280,  720},
7079  		{ "1280x800", 1280,  800},
7080  		{"1280x1024", 1280, 1024},
7081  		{ "1440x900", 1440,  900},
7082  		{"1680x1050", 1680, 1050},
7083  		{"1600x1200", 1600, 1200},
7084  		{"1920x1080", 1920, 1080},
7085  		{"1920x1200", 1920, 1200}
7086  	};
7087  
7088  	n = ARRAY_SIZE(common_modes);
7089  
7090  	for (i = 0; i < n; i++) {
7091  		struct drm_display_mode *curmode = NULL;
7092  		bool mode_existed = false;
7093  
7094  		if (common_modes[i].w > native_mode->hdisplay ||
7095  		    common_modes[i].h > native_mode->vdisplay ||
7096  		   (common_modes[i].w == native_mode->hdisplay &&
7097  		    common_modes[i].h == native_mode->vdisplay))
7098  			continue;
7099  
7100  		list_for_each_entry(curmode, &connector->probed_modes, head) {
7101  			if (common_modes[i].w == curmode->hdisplay &&
7102  			    common_modes[i].h == curmode->vdisplay) {
7103  				mode_existed = true;
7104  				break;
7105  			}
7106  		}
7107  
7108  		if (mode_existed)
7109  			continue;
7110  
7111  		mode = amdgpu_dm_create_common_mode(encoder,
7112  				common_modes[i].name, common_modes[i].w,
7113  				common_modes[i].h);
7114  		if (!mode)
7115  			continue;
7116  
7117  		drm_mode_probed_add(connector, mode);
7118  		amdgpu_dm_connector->num_modes++;
7119  	}
7120  }
7121  
amdgpu_set_panel_orientation(struct drm_connector * connector)7122  static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7123  {
7124  	struct drm_encoder *encoder;
7125  	struct amdgpu_encoder *amdgpu_encoder;
7126  	const struct drm_display_mode *native_mode;
7127  
7128  	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7129  	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7130  		return;
7131  
7132  	mutex_lock(&connector->dev->mode_config.mutex);
7133  	amdgpu_dm_connector_get_modes(connector);
7134  	mutex_unlock(&connector->dev->mode_config.mutex);
7135  
7136  	encoder = amdgpu_dm_connector_to_encoder(connector);
7137  	if (!encoder)
7138  		return;
7139  
7140  	amdgpu_encoder = to_amdgpu_encoder(encoder);
7141  
7142  	native_mode = &amdgpu_encoder->native_mode;
7143  	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7144  		return;
7145  
7146  	drm_connector_set_panel_orientation_with_quirk(connector,
7147  						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7148  						       native_mode->hdisplay,
7149  						       native_mode->vdisplay);
7150  }
7151  
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)7152  static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7153  					      struct edid *edid)
7154  {
7155  	struct amdgpu_dm_connector *amdgpu_dm_connector =
7156  			to_amdgpu_dm_connector(connector);
7157  
7158  	if (edid) {
7159  		/* empty probed_modes */
7160  		INIT_LIST_HEAD(&connector->probed_modes);
7161  		amdgpu_dm_connector->num_modes =
7162  				drm_add_edid_modes(connector, edid);
7163  
7164  		/* sorting the probed modes before calling function
7165  		 * amdgpu_dm_get_native_mode() since EDID can have
7166  		 * more than one preferred mode. The modes that are
7167  		 * later in the probed mode list could be of higher
7168  		 * and preferred resolution. For example, 3840x2160
7169  		 * resolution in base EDID preferred timing and 4096x2160
7170  		 * preferred resolution in DID extension block later.
7171  		 */
7172  		drm_mode_sort(&connector->probed_modes);
7173  		amdgpu_dm_get_native_mode(connector);
7174  
7175  		/* Freesync capabilities are reset by calling
7176  		 * drm_add_edid_modes() and need to be
7177  		 * restored here.
7178  		 */
7179  		amdgpu_dm_update_freesync_caps(connector, edid);
7180  	} else {
7181  		amdgpu_dm_connector->num_modes = 0;
7182  	}
7183  }
7184  
is_duplicate_mode(struct amdgpu_dm_connector * aconnector,struct drm_display_mode * mode)7185  static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7186  			      struct drm_display_mode *mode)
7187  {
7188  	struct drm_display_mode *m;
7189  
7190  	list_for_each_entry(m, &aconnector->base.probed_modes, head) {
7191  		if (drm_mode_equal(m, mode))
7192  			return true;
7193  	}
7194  
7195  	return false;
7196  }
7197  
add_fs_modes(struct amdgpu_dm_connector * aconnector)7198  static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7199  {
7200  	const struct drm_display_mode *m;
7201  	struct drm_display_mode *new_mode;
7202  	uint i;
7203  	u32 new_modes_count = 0;
7204  
7205  	/* Standard FPS values
7206  	 *
7207  	 * 23.976       - TV/NTSC
7208  	 * 24           - Cinema
7209  	 * 25           - TV/PAL
7210  	 * 29.97        - TV/NTSC
7211  	 * 30           - TV/NTSC
7212  	 * 48           - Cinema HFR
7213  	 * 50           - TV/PAL
7214  	 * 60           - Commonly used
7215  	 * 48,72,96,120 - Multiples of 24
7216  	 */
7217  	static const u32 common_rates[] = {
7218  		23976, 24000, 25000, 29970, 30000,
7219  		48000, 50000, 60000, 72000, 96000, 120000
7220  	};
7221  
7222  	/*
7223  	 * Find mode with highest refresh rate with the same resolution
7224  	 * as the preferred mode. Some monitors report a preferred mode
7225  	 * with lower resolution than the highest refresh rate supported.
7226  	 */
7227  
7228  	m = get_highest_refresh_rate_mode(aconnector, true);
7229  	if (!m)
7230  		return 0;
7231  
7232  	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7233  		u64 target_vtotal, target_vtotal_diff;
7234  		u64 num, den;
7235  
7236  		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7237  			continue;
7238  
7239  		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7240  		    common_rates[i] > aconnector->max_vfreq * 1000)
7241  			continue;
7242  
7243  		num = (unsigned long long)m->clock * 1000 * 1000;
7244  		den = common_rates[i] * (unsigned long long)m->htotal;
7245  		target_vtotal = div_u64(num, den);
7246  		target_vtotal_diff = target_vtotal - m->vtotal;
7247  
7248  		/* Check for illegal modes */
7249  		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7250  		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
7251  		    m->vtotal + target_vtotal_diff < m->vsync_end)
7252  			continue;
7253  
7254  		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7255  		if (!new_mode)
7256  			goto out;
7257  
7258  		new_mode->vtotal += (u16)target_vtotal_diff;
7259  		new_mode->vsync_start += (u16)target_vtotal_diff;
7260  		new_mode->vsync_end += (u16)target_vtotal_diff;
7261  		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7262  		new_mode->type |= DRM_MODE_TYPE_DRIVER;
7263  
7264  		if (!is_duplicate_mode(aconnector, new_mode)) {
7265  			drm_mode_probed_add(&aconnector->base, new_mode);
7266  			new_modes_count += 1;
7267  		} else
7268  			drm_mode_destroy(aconnector->base.dev, new_mode);
7269  	}
7270   out:
7271  	return new_modes_count;
7272  }
7273  
amdgpu_dm_connector_add_freesync_modes(struct drm_connector * connector,struct edid * edid)7274  static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7275  						   struct edid *edid)
7276  {
7277  	struct amdgpu_dm_connector *amdgpu_dm_connector =
7278  		to_amdgpu_dm_connector(connector);
7279  
7280  	if (!edid)
7281  		return;
7282  
7283  	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7284  		amdgpu_dm_connector->num_modes +=
7285  			add_fs_modes(amdgpu_dm_connector);
7286  }
7287  
amdgpu_dm_connector_get_modes(struct drm_connector * connector)7288  static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7289  {
7290  	struct amdgpu_dm_connector *amdgpu_dm_connector =
7291  			to_amdgpu_dm_connector(connector);
7292  	struct drm_encoder *encoder;
7293  	struct edid *edid = amdgpu_dm_connector->edid;
7294  	struct dc_link_settings *verified_link_cap =
7295  			&amdgpu_dm_connector->dc_link->verified_link_cap;
7296  	const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
7297  
7298  	encoder = amdgpu_dm_connector_to_encoder(connector);
7299  
7300  	if (!drm_edid_is_valid(edid)) {
7301  		amdgpu_dm_connector->num_modes =
7302  				drm_add_modes_noedid(connector, 640, 480);
7303  		if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
7304  			amdgpu_dm_connector->num_modes +=
7305  				drm_add_modes_noedid(connector, 1920, 1080);
7306  	} else {
7307  		amdgpu_dm_connector_ddc_get_modes(connector, edid);
7308  		amdgpu_dm_connector_add_common_modes(encoder, connector);
7309  		amdgpu_dm_connector_add_freesync_modes(connector, edid);
7310  	}
7311  	amdgpu_dm_fbc_init(connector);
7312  
7313  	return amdgpu_dm_connector->num_modes;
7314  }
7315  
7316  static const u32 supported_colorspaces =
7317  	BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
7318  	BIT(DRM_MODE_COLORIMETRY_OPRGB) |
7319  	BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
7320  	BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
7321  
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)7322  void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7323  				     struct amdgpu_dm_connector *aconnector,
7324  				     int connector_type,
7325  				     struct dc_link *link,
7326  				     int link_index)
7327  {
7328  	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7329  
7330  	/*
7331  	 * Some of the properties below require access to state, like bpc.
7332  	 * Allocate some default initial connector state with our reset helper.
7333  	 */
7334  	if (aconnector->base.funcs->reset)
7335  		aconnector->base.funcs->reset(&aconnector->base);
7336  
7337  	aconnector->connector_id = link_index;
7338  	aconnector->bl_idx = -1;
7339  	aconnector->dc_link = link;
7340  	aconnector->base.interlace_allowed = false;
7341  	aconnector->base.doublescan_allowed = false;
7342  	aconnector->base.stereo_allowed = false;
7343  	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7344  	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7345  	aconnector->audio_inst = -1;
7346  	aconnector->pack_sdp_v1_3 = false;
7347  	aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
7348  	memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
7349  	mutex_init(&aconnector->hpd_lock);
7350  	mutex_init(&aconnector->handle_mst_msg_ready);
7351  
7352  	/*
7353  	 * configure support HPD hot plug connector_>polled default value is 0
7354  	 * which means HPD hot plug not supported
7355  	 */
7356  	switch (connector_type) {
7357  	case DRM_MODE_CONNECTOR_HDMIA:
7358  		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7359  		aconnector->base.ycbcr_420_allowed =
7360  			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7361  		break;
7362  	case DRM_MODE_CONNECTOR_DisplayPort:
7363  		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7364  		link->link_enc = link_enc_cfg_get_link_enc(link);
7365  		ASSERT(link->link_enc);
7366  		if (link->link_enc)
7367  			aconnector->base.ycbcr_420_allowed =
7368  			link->link_enc->features.dp_ycbcr420_supported ? true : false;
7369  		break;
7370  	case DRM_MODE_CONNECTOR_DVID:
7371  		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7372  		break;
7373  	default:
7374  		break;
7375  	}
7376  
7377  	drm_object_attach_property(&aconnector->base.base,
7378  				dm->ddev->mode_config.scaling_mode_property,
7379  				DRM_MODE_SCALE_NONE);
7380  
7381  	drm_object_attach_property(&aconnector->base.base,
7382  				adev->mode_info.underscan_property,
7383  				UNDERSCAN_OFF);
7384  	drm_object_attach_property(&aconnector->base.base,
7385  				adev->mode_info.underscan_hborder_property,
7386  				0);
7387  	drm_object_attach_property(&aconnector->base.base,
7388  				adev->mode_info.underscan_vborder_property,
7389  				0);
7390  
7391  	if (!aconnector->mst_root)
7392  		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7393  
7394  	aconnector->base.state->max_bpc = 16;
7395  	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7396  
7397  	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7398  	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7399  		drm_object_attach_property(&aconnector->base.base,
7400  				adev->mode_info.abm_level_property, 0);
7401  	}
7402  
7403  	if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
7404  		if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
7405  			drm_connector_attach_colorspace_property(&aconnector->base);
7406  	} else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
7407  		   connector_type == DRM_MODE_CONNECTOR_eDP) {
7408  		if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
7409  			drm_connector_attach_colorspace_property(&aconnector->base);
7410  	}
7411  
7412  	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7413  	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7414  	    connector_type == DRM_MODE_CONNECTOR_eDP) {
7415  		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7416  
7417  		if (!aconnector->mst_root)
7418  			drm_connector_attach_vrr_capable_property(&aconnector->base);
7419  
7420  		if (adev->dm.hdcp_workqueue)
7421  			drm_connector_attach_content_protection_property(&aconnector->base, true);
7422  	}
7423  }
7424  
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)7425  static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7426  			      struct i2c_msg *msgs, int num)
7427  {
7428  	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7429  	struct ddc_service *ddc_service = i2c->ddc_service;
7430  	struct i2c_command cmd;
7431  	int i;
7432  	int result = -EIO;
7433  
7434  	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7435  
7436  	if (!cmd.payloads)
7437  		return result;
7438  
7439  	cmd.number_of_payloads = num;
7440  	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7441  	cmd.speed = 100;
7442  
7443  	for (i = 0; i < num; i++) {
7444  		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7445  		cmd.payloads[i].address = msgs[i].addr;
7446  		cmd.payloads[i].length = msgs[i].len;
7447  		cmd.payloads[i].data = msgs[i].buf;
7448  	}
7449  
7450  	if (dc_submit_i2c(
7451  			ddc_service->ctx->dc,
7452  			ddc_service->link->link_index,
7453  			&cmd))
7454  		result = num;
7455  
7456  	kfree(cmd.payloads);
7457  	return result;
7458  }
7459  
amdgpu_dm_i2c_func(struct i2c_adapter * adap)7460  static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7461  {
7462  	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7463  }
7464  
7465  static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7466  	.master_xfer = amdgpu_dm_i2c_xfer,
7467  	.functionality = amdgpu_dm_i2c_func,
7468  };
7469  
7470  static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)7471  create_i2c(struct ddc_service *ddc_service,
7472  	   int link_index,
7473  	   int *res)
7474  {
7475  	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7476  	struct amdgpu_i2c_adapter *i2c;
7477  
7478  	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7479  	if (!i2c)
7480  		return NULL;
7481  	i2c->base.owner = THIS_MODULE;
7482  	i2c->base.class = I2C_CLASS_DDC;
7483  	i2c->base.dev.parent = &adev->pdev->dev;
7484  	i2c->base.algo = &amdgpu_dm_i2c_algo;
7485  	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7486  	i2c_set_adapdata(&i2c->base, i2c);
7487  	i2c->ddc_service = ddc_service;
7488  
7489  	return i2c;
7490  }
7491  
7492  
7493  /*
7494   * Note: this function assumes that dc_link_detect() was called for the
7495   * dc_link which will be represented by this aconnector.
7496   */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,u32 link_index,struct amdgpu_encoder * aencoder)7497  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7498  				    struct amdgpu_dm_connector *aconnector,
7499  				    u32 link_index,
7500  				    struct amdgpu_encoder *aencoder)
7501  {
7502  	int res = 0;
7503  	int connector_type;
7504  	struct dc *dc = dm->dc;
7505  	struct dc_link *link = dc_get_link_at_index(dc, link_index);
7506  	struct amdgpu_i2c_adapter *i2c;
7507  
7508  	link->priv = aconnector;
7509  
7510  
7511  	i2c = create_i2c(link->ddc, link->link_index, &res);
7512  	if (!i2c) {
7513  		DRM_ERROR("Failed to create i2c adapter data\n");
7514  		return -ENOMEM;
7515  	}
7516  
7517  	aconnector->i2c = i2c;
7518  	res = i2c_add_adapter(&i2c->base);
7519  
7520  	if (res) {
7521  		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7522  		goto out_free;
7523  	}
7524  
7525  	connector_type = to_drm_connector_type(link->connector_signal);
7526  
7527  	res = drm_connector_init_with_ddc(
7528  			dm->ddev,
7529  			&aconnector->base,
7530  			&amdgpu_dm_connector_funcs,
7531  			connector_type,
7532  			&i2c->base);
7533  
7534  	if (res) {
7535  		DRM_ERROR("connector_init failed\n");
7536  		aconnector->connector_id = -1;
7537  		goto out_free;
7538  	}
7539  
7540  	drm_connector_helper_add(
7541  			&aconnector->base,
7542  			&amdgpu_dm_connector_helper_funcs);
7543  
7544  	amdgpu_dm_connector_init_helper(
7545  		dm,
7546  		aconnector,
7547  		connector_type,
7548  		link,
7549  		link_index);
7550  
7551  	drm_connector_attach_encoder(
7552  		&aconnector->base, &aencoder->base);
7553  
7554  	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7555  		|| connector_type == DRM_MODE_CONNECTOR_eDP)
7556  		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7557  
7558  out_free:
7559  	if (res) {
7560  		kfree(i2c);
7561  		aconnector->i2c = NULL;
7562  	}
7563  	return res;
7564  }
7565  
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)7566  int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7567  {
7568  	switch (adev->mode_info.num_crtc) {
7569  	case 1:
7570  		return 0x1;
7571  	case 2:
7572  		return 0x3;
7573  	case 3:
7574  		return 0x7;
7575  	case 4:
7576  		return 0xf;
7577  	case 5:
7578  		return 0x1f;
7579  	case 6:
7580  	default:
7581  		return 0x3f;
7582  	}
7583  }
7584  
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)7585  static int amdgpu_dm_encoder_init(struct drm_device *dev,
7586  				  struct amdgpu_encoder *aencoder,
7587  				  uint32_t link_index)
7588  {
7589  	struct amdgpu_device *adev = drm_to_adev(dev);
7590  
7591  	int res = drm_encoder_init(dev,
7592  				   &aencoder->base,
7593  				   &amdgpu_dm_encoder_funcs,
7594  				   DRM_MODE_ENCODER_TMDS,
7595  				   NULL);
7596  
7597  	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7598  
7599  	if (!res)
7600  		aencoder->encoder_id = link_index;
7601  	else
7602  		aencoder->encoder_id = -1;
7603  
7604  	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7605  
7606  	return res;
7607  }
7608  
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)7609  static void manage_dm_interrupts(struct amdgpu_device *adev,
7610  				 struct amdgpu_crtc *acrtc,
7611  				 bool enable)
7612  {
7613  	/*
7614  	 * We have no guarantee that the frontend index maps to the same
7615  	 * backend index - some even map to more than one.
7616  	 *
7617  	 * TODO: Use a different interrupt or check DC itself for the mapping.
7618  	 */
7619  	int irq_type =
7620  		amdgpu_display_crtc_idx_to_irq_type(
7621  			adev,
7622  			acrtc->crtc_id);
7623  
7624  	if (enable) {
7625  		drm_crtc_vblank_on(&acrtc->base);
7626  		amdgpu_irq_get(
7627  			adev,
7628  			&adev->pageflip_irq,
7629  			irq_type);
7630  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7631  		amdgpu_irq_get(
7632  			adev,
7633  			&adev->vline0_irq,
7634  			irq_type);
7635  #endif
7636  	} else {
7637  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7638  		amdgpu_irq_put(
7639  			adev,
7640  			&adev->vline0_irq,
7641  			irq_type);
7642  #endif
7643  		amdgpu_irq_put(
7644  			adev,
7645  			&adev->pageflip_irq,
7646  			irq_type);
7647  		drm_crtc_vblank_off(&acrtc->base);
7648  	}
7649  }
7650  
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)7651  static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7652  				      struct amdgpu_crtc *acrtc)
7653  {
7654  	int irq_type =
7655  		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7656  
7657  	/**
7658  	 * This reads the current state for the IRQ and force reapplies
7659  	 * the setting to hardware.
7660  	 */
7661  	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7662  }
7663  
7664  static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)7665  is_scaling_state_different(const struct dm_connector_state *dm_state,
7666  			   const struct dm_connector_state *old_dm_state)
7667  {
7668  	if (dm_state->scaling != old_dm_state->scaling)
7669  		return true;
7670  	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7671  		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7672  			return true;
7673  	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7674  		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7675  			return true;
7676  	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7677  		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7678  		return true;
7679  	return false;
7680  }
7681  
is_content_protection_different(struct drm_crtc_state * new_crtc_state,struct drm_crtc_state * old_crtc_state,struct drm_connector_state * new_conn_state,struct drm_connector_state * old_conn_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)7682  static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
7683  					    struct drm_crtc_state *old_crtc_state,
7684  					    struct drm_connector_state *new_conn_state,
7685  					    struct drm_connector_state *old_conn_state,
7686  					    const struct drm_connector *connector,
7687  					    struct hdcp_workqueue *hdcp_w)
7688  {
7689  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7690  	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7691  
7692  	pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
7693  		connector->index, connector->status, connector->dpms);
7694  	pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
7695  		old_conn_state->content_protection, new_conn_state->content_protection);
7696  
7697  	if (old_crtc_state)
7698  		pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7699  		old_crtc_state->enable,
7700  		old_crtc_state->active,
7701  		old_crtc_state->mode_changed,
7702  		old_crtc_state->active_changed,
7703  		old_crtc_state->connectors_changed);
7704  
7705  	if (new_crtc_state)
7706  		pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7707  		new_crtc_state->enable,
7708  		new_crtc_state->active,
7709  		new_crtc_state->mode_changed,
7710  		new_crtc_state->active_changed,
7711  		new_crtc_state->connectors_changed);
7712  
7713  	/* hdcp content type change */
7714  	if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
7715  	    new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7716  		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7717  		pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
7718  		return true;
7719  	}
7720  
7721  	/* CP is being re enabled, ignore this */
7722  	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7723  	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7724  		if (new_crtc_state && new_crtc_state->mode_changed) {
7725  			new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7726  			pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
7727  			return true;
7728  		}
7729  		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7730  		pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
7731  		return false;
7732  	}
7733  
7734  	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7735  	 *
7736  	 * Handles:	UNDESIRED -> ENABLED
7737  	 */
7738  	if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7739  	    new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7740  		new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7741  
7742  	/* Stream removed and re-enabled
7743  	 *
7744  	 * Can sometimes overlap with the HPD case,
7745  	 * thus set update_hdcp to false to avoid
7746  	 * setting HDCP multiple times.
7747  	 *
7748  	 * Handles:	DESIRED -> DESIRED (Special case)
7749  	 */
7750  	if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
7751  		new_conn_state->crtc && new_conn_state->crtc->enabled &&
7752  		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7753  		dm_con_state->update_hdcp = false;
7754  		pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
7755  			__func__);
7756  		return true;
7757  	}
7758  
7759  	/* Hot-plug, headless s3, dpms
7760  	 *
7761  	 * Only start HDCP if the display is connected/enabled.
7762  	 * update_hdcp flag will be set to false until the next
7763  	 * HPD comes in.
7764  	 *
7765  	 * Handles:	DESIRED -> DESIRED (Special case)
7766  	 */
7767  	if (dm_con_state->update_hdcp &&
7768  	new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7769  	connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7770  		dm_con_state->update_hdcp = false;
7771  		pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
7772  			__func__);
7773  		return true;
7774  	}
7775  
7776  	if (old_conn_state->content_protection == new_conn_state->content_protection) {
7777  		if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7778  			if (new_crtc_state && new_crtc_state->mode_changed) {
7779  				pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
7780  					__func__);
7781  				return true;
7782  			}
7783  			pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
7784  				__func__);
7785  			return false;
7786  		}
7787  
7788  		pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
7789  		return false;
7790  	}
7791  
7792  	if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7793  		pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
7794  			__func__);
7795  		return true;
7796  	}
7797  
7798  	pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
7799  	return false;
7800  }
7801  
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)7802  static void remove_stream(struct amdgpu_device *adev,
7803  			  struct amdgpu_crtc *acrtc,
7804  			  struct dc_stream_state *stream)
7805  {
7806  	/* this is the update mode case */
7807  
7808  	acrtc->otg_inst = -1;
7809  	acrtc->enabled = false;
7810  }
7811  
prepare_flip_isr(struct amdgpu_crtc * acrtc)7812  static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7813  {
7814  
7815  	assert_spin_locked(&acrtc->base.dev->event_lock);
7816  	WARN_ON(acrtc->event);
7817  
7818  	acrtc->event = acrtc->base.state->event;
7819  
7820  	/* Set the flip status */
7821  	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7822  
7823  	/* Mark this event as consumed */
7824  	acrtc->base.state->event = NULL;
7825  
7826  	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7827  		     acrtc->crtc_id);
7828  }
7829  
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)7830  static void update_freesync_state_on_stream(
7831  	struct amdgpu_display_manager *dm,
7832  	struct dm_crtc_state *new_crtc_state,
7833  	struct dc_stream_state *new_stream,
7834  	struct dc_plane_state *surface,
7835  	u32 flip_timestamp_in_us)
7836  {
7837  	struct mod_vrr_params vrr_params;
7838  	struct dc_info_packet vrr_infopacket = {0};
7839  	struct amdgpu_device *adev = dm->adev;
7840  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7841  	unsigned long flags;
7842  	bool pack_sdp_v1_3 = false;
7843  	struct amdgpu_dm_connector *aconn;
7844  	enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
7845  
7846  	if (!new_stream)
7847  		return;
7848  
7849  	/*
7850  	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7851  	 * For now it's sufficient to just guard against these conditions.
7852  	 */
7853  
7854  	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7855  		return;
7856  
7857  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7858  	vrr_params = acrtc->dm_irq_params.vrr_params;
7859  
7860  	if (surface) {
7861  		mod_freesync_handle_preflip(
7862  			dm->freesync_module,
7863  			surface,
7864  			new_stream,
7865  			flip_timestamp_in_us,
7866  			&vrr_params);
7867  
7868  		if (adev->family < AMDGPU_FAMILY_AI &&
7869  		    amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
7870  			mod_freesync_handle_v_update(dm->freesync_module,
7871  						     new_stream, &vrr_params);
7872  
7873  			/* Need to call this before the frame ends. */
7874  			dc_stream_adjust_vmin_vmax(dm->dc,
7875  						   new_crtc_state->stream,
7876  						   &vrr_params.adjust);
7877  		}
7878  	}
7879  
7880  	aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
7881  
7882  	if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
7883  		pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
7884  
7885  		if (aconn->vsdb_info.amd_vsdb_version == 1)
7886  			packet_type = PACKET_TYPE_FS_V1;
7887  		else if (aconn->vsdb_info.amd_vsdb_version == 2)
7888  			packet_type = PACKET_TYPE_FS_V2;
7889  		else if (aconn->vsdb_info.amd_vsdb_version == 3)
7890  			packet_type = PACKET_TYPE_FS_V3;
7891  
7892  		mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
7893  					&new_stream->adaptive_sync_infopacket);
7894  	}
7895  
7896  	mod_freesync_build_vrr_infopacket(
7897  		dm->freesync_module,
7898  		new_stream,
7899  		&vrr_params,
7900  		packet_type,
7901  		TRANSFER_FUNC_UNKNOWN,
7902  		&vrr_infopacket,
7903  		pack_sdp_v1_3);
7904  
7905  	new_crtc_state->freesync_vrr_info_changed |=
7906  		(memcmp(&new_crtc_state->vrr_infopacket,
7907  			&vrr_infopacket,
7908  			sizeof(vrr_infopacket)) != 0);
7909  
7910  	acrtc->dm_irq_params.vrr_params = vrr_params;
7911  	new_crtc_state->vrr_infopacket = vrr_infopacket;
7912  
7913  	new_stream->vrr_infopacket = vrr_infopacket;
7914  	new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
7915  
7916  	if (new_crtc_state->freesync_vrr_info_changed)
7917  		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7918  			      new_crtc_state->base.crtc->base.id,
7919  			      (int)new_crtc_state->base.vrr_enabled,
7920  			      (int)vrr_params.state);
7921  
7922  	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7923  }
7924  
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7925  static void update_stream_irq_parameters(
7926  	struct amdgpu_display_manager *dm,
7927  	struct dm_crtc_state *new_crtc_state)
7928  {
7929  	struct dc_stream_state *new_stream = new_crtc_state->stream;
7930  	struct mod_vrr_params vrr_params;
7931  	struct mod_freesync_config config = new_crtc_state->freesync_config;
7932  	struct amdgpu_device *adev = dm->adev;
7933  	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7934  	unsigned long flags;
7935  
7936  	if (!new_stream)
7937  		return;
7938  
7939  	/*
7940  	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7941  	 * For now it's sufficient to just guard against these conditions.
7942  	 */
7943  	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7944  		return;
7945  
7946  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7947  	vrr_params = acrtc->dm_irq_params.vrr_params;
7948  
7949  	if (new_crtc_state->vrr_supported &&
7950  	    config.min_refresh_in_uhz &&
7951  	    config.max_refresh_in_uhz) {
7952  		/*
7953  		 * if freesync compatible mode was set, config.state will be set
7954  		 * in atomic check
7955  		 */
7956  		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7957  		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7958  		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7959  			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7960  			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7961  			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7962  			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7963  		} else {
7964  			config.state = new_crtc_state->base.vrr_enabled ?
7965  						     VRR_STATE_ACTIVE_VARIABLE :
7966  						     VRR_STATE_INACTIVE;
7967  		}
7968  	} else {
7969  		config.state = VRR_STATE_UNSUPPORTED;
7970  	}
7971  
7972  	mod_freesync_build_vrr_params(dm->freesync_module,
7973  				      new_stream,
7974  				      &config, &vrr_params);
7975  
7976  	new_crtc_state->freesync_config = config;
7977  	/* Copy state for access from DM IRQ handler */
7978  	acrtc->dm_irq_params.freesync_config = config;
7979  	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7980  	acrtc->dm_irq_params.vrr_params = vrr_params;
7981  	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7982  }
7983  
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7984  static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7985  					    struct dm_crtc_state *new_state)
7986  {
7987  	bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
7988  	bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
7989  
7990  	if (!old_vrr_active && new_vrr_active) {
7991  		/* Transition VRR inactive -> active:
7992  		 * While VRR is active, we must not disable vblank irq, as a
7993  		 * reenable after disable would compute bogus vblank/pflip
7994  		 * timestamps if it likely happened inside display front-porch.
7995  		 *
7996  		 * We also need vupdate irq for the actual core vblank handling
7997  		 * at end of vblank.
7998  		 */
7999  		WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
8000  		WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
8001  		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8002  				 __func__, new_state->base.crtc->base.id);
8003  	} else if (old_vrr_active && !new_vrr_active) {
8004  		/* Transition VRR active -> inactive:
8005  		 * Allow vblank irq disable again for fixed refresh rate.
8006  		 */
8007  		WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
8008  		drm_crtc_vblank_put(new_state->base.crtc);
8009  		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8010  				 __func__, new_state->base.crtc->base.id);
8011  	}
8012  }
8013  
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)8014  static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8015  {
8016  	struct drm_plane *plane;
8017  	struct drm_plane_state *old_plane_state;
8018  	int i;
8019  
8020  	/*
8021  	 * TODO: Make this per-stream so we don't issue redundant updates for
8022  	 * commits with multiple streams.
8023  	 */
8024  	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8025  		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8026  			amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
8027  }
8028  
get_mem_type(struct drm_framebuffer * fb)8029  static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
8030  {
8031  	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
8032  
8033  	return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
8034  }
8035  
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)8036  static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8037  				    struct drm_device *dev,
8038  				    struct amdgpu_display_manager *dm,
8039  				    struct drm_crtc *pcrtc,
8040  				    bool wait_for_vblank)
8041  {
8042  	u32 i;
8043  	u64 timestamp_ns = ktime_get_ns();
8044  	struct drm_plane *plane;
8045  	struct drm_plane_state *old_plane_state, *new_plane_state;
8046  	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8047  	struct drm_crtc_state *new_pcrtc_state =
8048  			drm_atomic_get_new_crtc_state(state, pcrtc);
8049  	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8050  	struct dm_crtc_state *dm_old_crtc_state =
8051  			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8052  	int planes_count = 0, vpos, hpos;
8053  	unsigned long flags;
8054  	u32 target_vblank, last_flip_vblank;
8055  	bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
8056  	bool cursor_update = false;
8057  	bool pflip_present = false;
8058  	bool dirty_rects_changed = false;
8059  	struct {
8060  		struct dc_surface_update surface_updates[MAX_SURFACES];
8061  		struct dc_plane_info plane_infos[MAX_SURFACES];
8062  		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8063  		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8064  		struct dc_stream_update stream_update;
8065  	} *bundle;
8066  
8067  	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8068  
8069  	if (!bundle) {
8070  		dm_error("Failed to allocate update bundle\n");
8071  		goto cleanup;
8072  	}
8073  
8074  	/*
8075  	 * Disable the cursor first if we're disabling all the planes.
8076  	 * It'll remain on the screen after the planes are re-enabled
8077  	 * if we don't.
8078  	 */
8079  	if (acrtc_state->active_planes == 0)
8080  		amdgpu_dm_commit_cursors(state);
8081  
8082  	/* update planes when needed */
8083  	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8084  		struct drm_crtc *crtc = new_plane_state->crtc;
8085  		struct drm_crtc_state *new_crtc_state;
8086  		struct drm_framebuffer *fb = new_plane_state->fb;
8087  		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8088  		bool plane_needs_flip;
8089  		struct dc_plane_state *dc_plane;
8090  		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8091  
8092  		/* Cursor plane is handled after stream updates */
8093  		if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8094  			if ((fb && crtc == pcrtc) ||
8095  			    (old_plane_state->fb && old_plane_state->crtc == pcrtc))
8096  				cursor_update = true;
8097  
8098  			continue;
8099  		}
8100  
8101  		if (!fb || !crtc || pcrtc != crtc)
8102  			continue;
8103  
8104  		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8105  		if (!new_crtc_state->active)
8106  			continue;
8107  
8108  		dc_plane = dm_new_plane_state->dc_state;
8109  		if (!dc_plane)
8110  			continue;
8111  
8112  		bundle->surface_updates[planes_count].surface = dc_plane;
8113  		if (new_pcrtc_state->color_mgmt_changed) {
8114  			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8115  			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8116  			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8117  		}
8118  
8119  		amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
8120  				     &bundle->scaling_infos[planes_count]);
8121  
8122  		bundle->surface_updates[planes_count].scaling_info =
8123  			&bundle->scaling_infos[planes_count];
8124  
8125  		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8126  
8127  		pflip_present = pflip_present || plane_needs_flip;
8128  
8129  		if (!plane_needs_flip) {
8130  			planes_count += 1;
8131  			continue;
8132  		}
8133  
8134  		fill_dc_plane_info_and_addr(
8135  			dm->adev, new_plane_state,
8136  			afb->tiling_flags,
8137  			&bundle->plane_infos[planes_count],
8138  			&bundle->flip_addrs[planes_count].address,
8139  			afb->tmz_surface, false);
8140  
8141  		drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
8142  				 new_plane_state->plane->index,
8143  				 bundle->plane_infos[planes_count].dcc.enable);
8144  
8145  		bundle->surface_updates[planes_count].plane_info =
8146  			&bundle->plane_infos[planes_count];
8147  
8148  		if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
8149  		    acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
8150  			fill_dc_dirty_rects(plane, old_plane_state,
8151  					    new_plane_state, new_crtc_state,
8152  					    &bundle->flip_addrs[planes_count],
8153  					    &dirty_rects_changed);
8154  
8155  			/*
8156  			 * If the dirty regions changed, PSR-SU need to be disabled temporarily
8157  			 * and enabled it again after dirty regions are stable to avoid video glitch.
8158  			 * PSR-SU will be enabled in vblank_control_worker() if user pause the video
8159  			 * during the PSR-SU was disabled.
8160  			 */
8161  			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8162  			    acrtc_attach->dm_irq_params.allow_psr_entry &&
8163  #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8164  			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8165  #endif
8166  			    dirty_rects_changed) {
8167  				mutex_lock(&dm->dc_lock);
8168  				acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
8169  				timestamp_ns;
8170  				if (acrtc_state->stream->link->psr_settings.psr_allow_active)
8171  					amdgpu_dm_psr_disable(acrtc_state->stream);
8172  				mutex_unlock(&dm->dc_lock);
8173  			}
8174  		}
8175  
8176  		/*
8177  		 * Only allow immediate flips for fast updates that don't
8178  		 * change memory domain, FB pitch, DCC state, rotation or
8179  		 * mirroring.
8180  		 *
8181  		 * dm_crtc_helper_atomic_check() only accepts async flips with
8182  		 * fast updates.
8183  		 */
8184  		if (crtc->state->async_flip &&
8185  		    (acrtc_state->update_type != UPDATE_TYPE_FAST ||
8186  		     get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
8187  			drm_warn_once(state->dev,
8188  				      "[PLANE:%d:%s] async flip with non-fast update\n",
8189  				      plane->base.id, plane->name);
8190  
8191  		bundle->flip_addrs[planes_count].flip_immediate =
8192  			crtc->state->async_flip &&
8193  			acrtc_state->update_type == UPDATE_TYPE_FAST &&
8194  			get_mem_type(old_plane_state->fb) == get_mem_type(fb);
8195  
8196  		timestamp_ns = ktime_get_ns();
8197  		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8198  		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8199  		bundle->surface_updates[planes_count].surface = dc_plane;
8200  
8201  		if (!bundle->surface_updates[planes_count].surface) {
8202  			DRM_ERROR("No surface for CRTC: id=%d\n",
8203  					acrtc_attach->crtc_id);
8204  			continue;
8205  		}
8206  
8207  		if (plane == pcrtc->primary)
8208  			update_freesync_state_on_stream(
8209  				dm,
8210  				acrtc_state,
8211  				acrtc_state->stream,
8212  				dc_plane,
8213  				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8214  
8215  		drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
8216  				 __func__,
8217  				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8218  				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8219  
8220  		planes_count += 1;
8221  
8222  	}
8223  
8224  	if (pflip_present) {
8225  		if (!vrr_active) {
8226  			/* Use old throttling in non-vrr fixed refresh rate mode
8227  			 * to keep flip scheduling based on target vblank counts
8228  			 * working in a backwards compatible way, e.g., for
8229  			 * clients using the GLX_OML_sync_control extension or
8230  			 * DRI3/Present extension with defined target_msc.
8231  			 */
8232  			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8233  		} else {
8234  			/* For variable refresh rate mode only:
8235  			 * Get vblank of last completed flip to avoid > 1 vrr
8236  			 * flips per video frame by use of throttling, but allow
8237  			 * flip programming anywhere in the possibly large
8238  			 * variable vrr vblank interval for fine-grained flip
8239  			 * timing control and more opportunity to avoid stutter
8240  			 * on late submission of flips.
8241  			 */
8242  			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8243  			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8244  			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8245  		}
8246  
8247  		target_vblank = last_flip_vblank + wait_for_vblank;
8248  
8249  		/*
8250  		 * Wait until we're out of the vertical blank period before the one
8251  		 * targeted by the flip
8252  		 */
8253  		while ((acrtc_attach->enabled &&
8254  			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8255  							    0, &vpos, &hpos, NULL,
8256  							    NULL, &pcrtc->hwmode)
8257  			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8258  			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8259  			(int)(target_vblank -
8260  			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8261  			usleep_range(1000, 1100);
8262  		}
8263  
8264  		/**
8265  		 * Prepare the flip event for the pageflip interrupt to handle.
8266  		 *
8267  		 * This only works in the case where we've already turned on the
8268  		 * appropriate hardware blocks (eg. HUBP) so in the transition case
8269  		 * from 0 -> n planes we have to skip a hardware generated event
8270  		 * and rely on sending it from software.
8271  		 */
8272  		if (acrtc_attach->base.state->event &&
8273  		    acrtc_state->active_planes > 0) {
8274  			drm_crtc_vblank_get(pcrtc);
8275  
8276  			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8277  
8278  			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8279  			prepare_flip_isr(acrtc_attach);
8280  
8281  			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8282  		}
8283  
8284  		if (acrtc_state->stream) {
8285  			if (acrtc_state->freesync_vrr_info_changed)
8286  				bundle->stream_update.vrr_infopacket =
8287  					&acrtc_state->stream->vrr_infopacket;
8288  		}
8289  	} else if (cursor_update && acrtc_state->active_planes > 0 &&
8290  		   acrtc_attach->base.state->event) {
8291  		drm_crtc_vblank_get(pcrtc);
8292  
8293  		spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8294  
8295  		acrtc_attach->event = acrtc_attach->base.state->event;
8296  		acrtc_attach->base.state->event = NULL;
8297  
8298  		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8299  	}
8300  
8301  	/* Update the planes if changed or disable if we don't have any. */
8302  	if ((planes_count || acrtc_state->active_planes == 0) &&
8303  		acrtc_state->stream) {
8304  		/*
8305  		 * If PSR or idle optimizations are enabled then flush out
8306  		 * any pending work before hardware programming.
8307  		 */
8308  		if (dm->vblank_control_workqueue)
8309  			flush_workqueue(dm->vblank_control_workqueue);
8310  
8311  		bundle->stream_update.stream = acrtc_state->stream;
8312  		if (new_pcrtc_state->mode_changed) {
8313  			bundle->stream_update.src = acrtc_state->stream->src;
8314  			bundle->stream_update.dst = acrtc_state->stream->dst;
8315  		}
8316  
8317  		if (new_pcrtc_state->color_mgmt_changed) {
8318  			/*
8319  			 * TODO: This isn't fully correct since we've actually
8320  			 * already modified the stream in place.
8321  			 */
8322  			bundle->stream_update.gamut_remap =
8323  				&acrtc_state->stream->gamut_remap_matrix;
8324  			bundle->stream_update.output_csc_transform =
8325  				&acrtc_state->stream->csc_color_matrix;
8326  			bundle->stream_update.out_transfer_func =
8327  				acrtc_state->stream->out_transfer_func;
8328  		}
8329  
8330  		acrtc_state->stream->abm_level = acrtc_state->abm_level;
8331  		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8332  			bundle->stream_update.abm_level = &acrtc_state->abm_level;
8333  
8334  		mutex_lock(&dm->dc_lock);
8335  		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8336  				acrtc_state->stream->link->psr_settings.psr_allow_active)
8337  			amdgpu_dm_psr_disable(acrtc_state->stream);
8338  		mutex_unlock(&dm->dc_lock);
8339  
8340  		/*
8341  		 * If FreeSync state on the stream has changed then we need to
8342  		 * re-adjust the min/max bounds now that DC doesn't handle this
8343  		 * as part of commit.
8344  		 */
8345  		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8346  			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8347  			dc_stream_adjust_vmin_vmax(
8348  				dm->dc, acrtc_state->stream,
8349  				&acrtc_attach->dm_irq_params.vrr_params.adjust);
8350  			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8351  		}
8352  		mutex_lock(&dm->dc_lock);
8353  		update_planes_and_stream_adapter(dm->dc,
8354  					 acrtc_state->update_type,
8355  					 planes_count,
8356  					 acrtc_state->stream,
8357  					 &bundle->stream_update,
8358  					 bundle->surface_updates);
8359  
8360  		/**
8361  		 * Enable or disable the interrupts on the backend.
8362  		 *
8363  		 * Most pipes are put into power gating when unused.
8364  		 *
8365  		 * When power gating is enabled on a pipe we lose the
8366  		 * interrupt enablement state when power gating is disabled.
8367  		 *
8368  		 * So we need to update the IRQ control state in hardware
8369  		 * whenever the pipe turns on (since it could be previously
8370  		 * power gated) or off (since some pipes can't be power gated
8371  		 * on some ASICs).
8372  		 */
8373  		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8374  			dm_update_pflip_irq_state(drm_to_adev(dev),
8375  						  acrtc_attach);
8376  
8377  		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8378  				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8379  				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8380  			amdgpu_dm_link_setup_psr(acrtc_state->stream);
8381  
8382  		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
8383  		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8384  		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8385  			struct amdgpu_dm_connector *aconn =
8386  				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8387  
8388  			if (aconn->psr_skip_count > 0)
8389  				aconn->psr_skip_count--;
8390  
8391  			/* Allow PSR when skip count is 0. */
8392  			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8393  
8394  			/*
8395  			 * If sink supports PSR SU, there is no need to rely on
8396  			 * a vblank event disable request to enable PSR. PSR SU
8397  			 * can be enabled immediately once OS demonstrates an
8398  			 * adequate number of fast atomic commits to notify KMD
8399  			 * of update events. See `vblank_control_worker()`.
8400  			 */
8401  			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8402  			    acrtc_attach->dm_irq_params.allow_psr_entry &&
8403  #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8404  			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8405  #endif
8406  			    !acrtc_state->stream->link->psr_settings.psr_allow_active &&
8407  			    (timestamp_ns -
8408  			    acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
8409  			    500000000)
8410  				amdgpu_dm_psr_enable(acrtc_state->stream);
8411  		} else {
8412  			acrtc_attach->dm_irq_params.allow_psr_entry = false;
8413  		}
8414  
8415  		mutex_unlock(&dm->dc_lock);
8416  	}
8417  
8418  	/*
8419  	 * Update cursor state *after* programming all the planes.
8420  	 * This avoids redundant programming in the case where we're going
8421  	 * to be disabling a single plane - those pipes are being disabled.
8422  	 */
8423  	if (acrtc_state->active_planes)
8424  		amdgpu_dm_commit_cursors(state);
8425  
8426  cleanup:
8427  	kfree(bundle);
8428  }
8429  
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)8430  static void amdgpu_dm_commit_audio(struct drm_device *dev,
8431  				   struct drm_atomic_state *state)
8432  {
8433  	struct amdgpu_device *adev = drm_to_adev(dev);
8434  	struct amdgpu_dm_connector *aconnector;
8435  	struct drm_connector *connector;
8436  	struct drm_connector_state *old_con_state, *new_con_state;
8437  	struct drm_crtc_state *new_crtc_state;
8438  	struct dm_crtc_state *new_dm_crtc_state;
8439  	const struct dc_stream_status *status;
8440  	int i, inst;
8441  
8442  	/* Notify device removals. */
8443  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8444  		if (old_con_state->crtc != new_con_state->crtc) {
8445  			/* CRTC changes require notification. */
8446  			goto notify;
8447  		}
8448  
8449  		if (!new_con_state->crtc)
8450  			continue;
8451  
8452  		new_crtc_state = drm_atomic_get_new_crtc_state(
8453  			state, new_con_state->crtc);
8454  
8455  		if (!new_crtc_state)
8456  			continue;
8457  
8458  		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8459  			continue;
8460  
8461  notify:
8462  		aconnector = to_amdgpu_dm_connector(connector);
8463  
8464  		mutex_lock(&adev->dm.audio_lock);
8465  		inst = aconnector->audio_inst;
8466  		aconnector->audio_inst = -1;
8467  		mutex_unlock(&adev->dm.audio_lock);
8468  
8469  		amdgpu_dm_audio_eld_notify(adev, inst);
8470  	}
8471  
8472  	/* Notify audio device additions. */
8473  	for_each_new_connector_in_state(state, connector, new_con_state, i) {
8474  		if (!new_con_state->crtc)
8475  			continue;
8476  
8477  		new_crtc_state = drm_atomic_get_new_crtc_state(
8478  			state, new_con_state->crtc);
8479  
8480  		if (!new_crtc_state)
8481  			continue;
8482  
8483  		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8484  			continue;
8485  
8486  		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8487  		if (!new_dm_crtc_state->stream)
8488  			continue;
8489  
8490  		status = dc_stream_get_status(new_dm_crtc_state->stream);
8491  		if (!status)
8492  			continue;
8493  
8494  		aconnector = to_amdgpu_dm_connector(connector);
8495  
8496  		mutex_lock(&adev->dm.audio_lock);
8497  		inst = status->audio_inst;
8498  		aconnector->audio_inst = inst;
8499  		mutex_unlock(&adev->dm.audio_lock);
8500  
8501  		amdgpu_dm_audio_eld_notify(adev, inst);
8502  	}
8503  }
8504  
8505  /*
8506   * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8507   * @crtc_state: the DRM CRTC state
8508   * @stream_state: the DC stream state.
8509   *
8510   * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8511   * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8512   */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)8513  static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8514  						struct dc_stream_state *stream_state)
8515  {
8516  	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8517  }
8518  
amdgpu_dm_commit_streams(struct drm_atomic_state * state,struct dc_state * dc_state)8519  static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
8520  					struct dc_state *dc_state)
8521  {
8522  	struct drm_device *dev = state->dev;
8523  	struct amdgpu_device *adev = drm_to_adev(dev);
8524  	struct amdgpu_display_manager *dm = &adev->dm;
8525  	struct drm_crtc *crtc;
8526  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8527  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8528  	bool mode_set_reset_required = false;
8529  	u32 i;
8530  
8531  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8532  				      new_crtc_state, i) {
8533  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8534  
8535  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8536  
8537  		if (old_crtc_state->active &&
8538  		    (!new_crtc_state->active ||
8539  		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8540  			manage_dm_interrupts(adev, acrtc, false);
8541  			dc_stream_release(dm_old_crtc_state->stream);
8542  		}
8543  	}
8544  
8545  	drm_atomic_helper_calc_timestamping_constants(state);
8546  
8547  	/* update changed items */
8548  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8549  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8550  
8551  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8552  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8553  
8554  		drm_dbg_state(state->dev,
8555  			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
8556  			acrtc->crtc_id,
8557  			new_crtc_state->enable,
8558  			new_crtc_state->active,
8559  			new_crtc_state->planes_changed,
8560  			new_crtc_state->mode_changed,
8561  			new_crtc_state->active_changed,
8562  			new_crtc_state->connectors_changed);
8563  
8564  		/* Disable cursor if disabling crtc */
8565  		if (old_crtc_state->active && !new_crtc_state->active) {
8566  			struct dc_cursor_position position;
8567  
8568  			memset(&position, 0, sizeof(position));
8569  			mutex_lock(&dm->dc_lock);
8570  			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8571  			mutex_unlock(&dm->dc_lock);
8572  		}
8573  
8574  		/* Copy all transient state flags into dc state */
8575  		if (dm_new_crtc_state->stream) {
8576  			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8577  							    dm_new_crtc_state->stream);
8578  		}
8579  
8580  		/* handles headless hotplug case, updating new_state and
8581  		 * aconnector as needed
8582  		 */
8583  
8584  		if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8585  
8586  			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8587  
8588  			if (!dm_new_crtc_state->stream) {
8589  				/*
8590  				 * this could happen because of issues with
8591  				 * userspace notifications delivery.
8592  				 * In this case userspace tries to set mode on
8593  				 * display which is disconnected in fact.
8594  				 * dc_sink is NULL in this case on aconnector.
8595  				 * We expect reset mode will come soon.
8596  				 *
8597  				 * This can also happen when unplug is done
8598  				 * during resume sequence ended
8599  				 *
8600  				 * In this case, we want to pretend we still
8601  				 * have a sink to keep the pipe running so that
8602  				 * hw state is consistent with the sw state
8603  				 */
8604  				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8605  						__func__, acrtc->base.base.id);
8606  				continue;
8607  			}
8608  
8609  			if (dm_old_crtc_state->stream)
8610  				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8611  
8612  			pm_runtime_get_noresume(dev->dev);
8613  
8614  			acrtc->enabled = true;
8615  			acrtc->hw_mode = new_crtc_state->mode;
8616  			crtc->hwmode = new_crtc_state->mode;
8617  			mode_set_reset_required = true;
8618  		} else if (modereset_required(new_crtc_state)) {
8619  			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8620  			/* i.e. reset mode */
8621  			if (dm_old_crtc_state->stream)
8622  				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8623  
8624  			mode_set_reset_required = true;
8625  		}
8626  	} /* for_each_crtc_in_state() */
8627  
8628  	/* if there mode set or reset, disable eDP PSR */
8629  	if (mode_set_reset_required) {
8630  		if (dm->vblank_control_workqueue)
8631  			flush_workqueue(dm->vblank_control_workqueue);
8632  
8633  		amdgpu_dm_psr_disable_all(dm);
8634  	}
8635  
8636  	dm_enable_per_frame_crtc_master_sync(dc_state);
8637  	mutex_lock(&dm->dc_lock);
8638  	WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
8639  
8640  	/* Allow idle optimization when vblank count is 0 for display off */
8641  	if (dm->active_vblank_irq_count == 0)
8642  		dc_allow_idle_optimizations(dm->dc, true);
8643  	mutex_unlock(&dm->dc_lock);
8644  
8645  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8646  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8647  
8648  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8649  
8650  		if (dm_new_crtc_state->stream != NULL) {
8651  			const struct dc_stream_status *status =
8652  					dc_stream_get_status(dm_new_crtc_state->stream);
8653  
8654  			if (!status)
8655  				status = dc_stream_get_status_from_state(dc_state,
8656  									 dm_new_crtc_state->stream);
8657  			if (!status)
8658  				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8659  			else
8660  				acrtc->otg_inst = status->primary_otg_inst;
8661  		}
8662  	}
8663  }
8664  
8665  /**
8666   * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8667   * @state: The atomic state to commit
8668   *
8669   * This will tell DC to commit the constructed DC state from atomic_check,
8670   * programming the hardware. Any failures here implies a hardware failure, since
8671   * atomic check should have filtered anything non-kosher.
8672   */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)8673  static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8674  {
8675  	struct drm_device *dev = state->dev;
8676  	struct amdgpu_device *adev = drm_to_adev(dev);
8677  	struct amdgpu_display_manager *dm = &adev->dm;
8678  	struct dm_atomic_state *dm_state;
8679  	struct dc_state *dc_state = NULL;
8680  	u32 i, j;
8681  	struct drm_crtc *crtc;
8682  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8683  	unsigned long flags;
8684  	bool wait_for_vblank = true;
8685  	struct drm_connector *connector;
8686  	struct drm_connector_state *old_con_state, *new_con_state;
8687  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8688  	int crtc_disable_count = 0;
8689  
8690  	trace_amdgpu_dm_atomic_commit_tail_begin(state);
8691  
8692  	drm_atomic_helper_update_legacy_modeset_state(dev, state);
8693  	drm_dp_mst_atomic_wait_for_dependencies(state);
8694  
8695  	dm_state = dm_atomic_get_new_state(state);
8696  	if (dm_state && dm_state->context) {
8697  		dc_state = dm_state->context;
8698  		amdgpu_dm_commit_streams(state, dc_state);
8699  	}
8700  
8701  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8702  		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8703  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8704  		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8705  
8706  		if (!adev->dm.hdcp_workqueue)
8707  			continue;
8708  
8709  		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
8710  
8711  		if (!connector)
8712  			continue;
8713  
8714  		pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
8715  			connector->index, connector->status, connector->dpms);
8716  		pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
8717  			old_con_state->content_protection, new_con_state->content_protection);
8718  
8719  		if (aconnector->dc_sink) {
8720  			if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
8721  				aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
8722  				pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
8723  				aconnector->dc_sink->edid_caps.display_name);
8724  			}
8725  		}
8726  
8727  		new_crtc_state = NULL;
8728  		old_crtc_state = NULL;
8729  
8730  		if (acrtc) {
8731  			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8732  			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8733  		}
8734  
8735  		if (old_crtc_state)
8736  			pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8737  			old_crtc_state->enable,
8738  			old_crtc_state->active,
8739  			old_crtc_state->mode_changed,
8740  			old_crtc_state->active_changed,
8741  			old_crtc_state->connectors_changed);
8742  
8743  		if (new_crtc_state)
8744  			pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8745  			new_crtc_state->enable,
8746  			new_crtc_state->active,
8747  			new_crtc_state->mode_changed,
8748  			new_crtc_state->active_changed,
8749  			new_crtc_state->connectors_changed);
8750  	}
8751  
8752  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8753  		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8754  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8755  		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8756  
8757  		if (!adev->dm.hdcp_workqueue)
8758  			continue;
8759  
8760  		new_crtc_state = NULL;
8761  		old_crtc_state = NULL;
8762  
8763  		if (acrtc) {
8764  			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8765  			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8766  		}
8767  
8768  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8769  
8770  		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8771  		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8772  			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8773  			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8774  			dm_new_con_state->update_hdcp = true;
8775  			continue;
8776  		}
8777  
8778  		if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
8779  											old_con_state, connector, adev->dm.hdcp_workqueue)) {
8780  			/* when display is unplugged from mst hub, connctor will
8781  			 * be destroyed within dm_dp_mst_connector_destroy. connector
8782  			 * hdcp perperties, like type, undesired, desired, enabled,
8783  			 * will be lost. So, save hdcp properties into hdcp_work within
8784  			 * amdgpu_dm_atomic_commit_tail. if the same display is
8785  			 * plugged back with same display index, its hdcp properties
8786  			 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
8787  			 */
8788  
8789  			bool enable_encryption = false;
8790  
8791  			if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
8792  				enable_encryption = true;
8793  
8794  			if (aconnector->dc_link && aconnector->dc_sink &&
8795  				aconnector->dc_link->type == dc_connection_mst_branch) {
8796  				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
8797  				struct hdcp_workqueue *hdcp_w =
8798  					&hdcp_work[aconnector->dc_link->link_index];
8799  
8800  				hdcp_w->hdcp_content_type[connector->index] =
8801  					new_con_state->hdcp_content_type;
8802  				hdcp_w->content_protection[connector->index] =
8803  					new_con_state->content_protection;
8804  			}
8805  
8806  			if (new_crtc_state && new_crtc_state->mode_changed &&
8807  				new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
8808  				enable_encryption = true;
8809  
8810  			DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
8811  
8812  			hdcp_update_display(
8813  				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8814  				new_con_state->hdcp_content_type, enable_encryption);
8815  		}
8816  	}
8817  
8818  	/* Handle connector state changes */
8819  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8820  		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8821  		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8822  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8823  		struct dc_surface_update *dummy_updates;
8824  		struct dc_stream_update stream_update;
8825  		struct dc_info_packet hdr_packet;
8826  		struct dc_stream_status *status = NULL;
8827  		bool abm_changed, hdr_changed, scaling_changed;
8828  
8829  		memset(&stream_update, 0, sizeof(stream_update));
8830  
8831  		if (acrtc) {
8832  			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8833  			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8834  		}
8835  
8836  		/* Skip any modesets/resets */
8837  		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8838  			continue;
8839  
8840  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8841  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8842  
8843  		scaling_changed = is_scaling_state_different(dm_new_con_state,
8844  							     dm_old_con_state);
8845  
8846  		abm_changed = dm_new_crtc_state->abm_level !=
8847  			      dm_old_crtc_state->abm_level;
8848  
8849  		hdr_changed =
8850  			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8851  
8852  		if (!scaling_changed && !abm_changed && !hdr_changed)
8853  			continue;
8854  
8855  		stream_update.stream = dm_new_crtc_state->stream;
8856  		if (scaling_changed) {
8857  			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8858  					dm_new_con_state, dm_new_crtc_state->stream);
8859  
8860  			stream_update.src = dm_new_crtc_state->stream->src;
8861  			stream_update.dst = dm_new_crtc_state->stream->dst;
8862  		}
8863  
8864  		if (abm_changed) {
8865  			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8866  
8867  			stream_update.abm_level = &dm_new_crtc_state->abm_level;
8868  		}
8869  
8870  		if (hdr_changed) {
8871  			fill_hdr_info_packet(new_con_state, &hdr_packet);
8872  			stream_update.hdr_static_metadata = &hdr_packet;
8873  		}
8874  
8875  		status = dc_stream_get_status(dm_new_crtc_state->stream);
8876  
8877  		if (WARN_ON(!status))
8878  			continue;
8879  
8880  		WARN_ON(!status->plane_count);
8881  
8882  		/*
8883  		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8884  		 * Here we create an empty update on each plane.
8885  		 * To fix this, DC should permit updating only stream properties.
8886  		 */
8887  		dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
8888  		for (j = 0; j < status->plane_count; j++)
8889  			dummy_updates[j].surface = status->plane_states[0];
8890  
8891  
8892  		mutex_lock(&dm->dc_lock);
8893  		dc_update_planes_and_stream(dm->dc,
8894  					    dummy_updates,
8895  					    status->plane_count,
8896  					    dm_new_crtc_state->stream,
8897  					    &stream_update);
8898  		mutex_unlock(&dm->dc_lock);
8899  		kfree(dummy_updates);
8900  	}
8901  
8902  	/**
8903  	 * Enable interrupts for CRTCs that are newly enabled or went through
8904  	 * a modeset. It was intentionally deferred until after the front end
8905  	 * state was modified to wait until the OTG was on and so the IRQ
8906  	 * handlers didn't access stale or invalid state.
8907  	 */
8908  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8909  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8910  #ifdef CONFIG_DEBUG_FS
8911  		enum amdgpu_dm_pipe_crc_source cur_crc_src;
8912  #endif
8913  		/* Count number of newly disabled CRTCs for dropping PM refs later. */
8914  		if (old_crtc_state->active && !new_crtc_state->active)
8915  			crtc_disable_count++;
8916  
8917  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8918  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8919  
8920  		/* For freesync config update on crtc state and params for irq */
8921  		update_stream_irq_parameters(dm, dm_new_crtc_state);
8922  
8923  #ifdef CONFIG_DEBUG_FS
8924  		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8925  		cur_crc_src = acrtc->dm_irq_params.crc_src;
8926  		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8927  #endif
8928  
8929  		if (new_crtc_state->active &&
8930  		    (!old_crtc_state->active ||
8931  		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8932  			dc_stream_retain(dm_new_crtc_state->stream);
8933  			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8934  			manage_dm_interrupts(adev, acrtc, true);
8935  		}
8936  		/* Handle vrr on->off / off->on transitions */
8937  		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
8938  
8939  #ifdef CONFIG_DEBUG_FS
8940  		if (new_crtc_state->active &&
8941  		    (!old_crtc_state->active ||
8942  		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8943  			/**
8944  			 * Frontend may have changed so reapply the CRC capture
8945  			 * settings for the stream.
8946  			 */
8947  			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8948  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8949  				if (amdgpu_dm_crc_window_is_activated(crtc)) {
8950  					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8951  					acrtc->dm_irq_params.window_param.update_win = true;
8952  
8953  					/**
8954  					 * It takes 2 frames for HW to stably generate CRC when
8955  					 * resuming from suspend, so we set skip_frame_cnt 2.
8956  					 */
8957  					acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
8958  					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8959  				}
8960  #endif
8961  				if (amdgpu_dm_crtc_configure_crc_source(
8962  					crtc, dm_new_crtc_state, cur_crc_src))
8963  					DRM_DEBUG_DRIVER("Failed to configure crc source");
8964  			}
8965  		}
8966  #endif
8967  	}
8968  
8969  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8970  		if (new_crtc_state->async_flip)
8971  			wait_for_vblank = false;
8972  
8973  	/* update planes when needed per crtc*/
8974  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8975  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8976  
8977  		if (dm_new_crtc_state->stream)
8978  			amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
8979  	}
8980  
8981  	/* Update audio instances for each connector. */
8982  	amdgpu_dm_commit_audio(dev, state);
8983  
8984  	/* restore the backlight level */
8985  	for (i = 0; i < dm->num_of_edps; i++) {
8986  		if (dm->backlight_dev[i] &&
8987  		    (dm->actual_brightness[i] != dm->brightness[i]))
8988  			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8989  	}
8990  
8991  	/*
8992  	 * send vblank event on all events not handled in flip and
8993  	 * mark consumed event for drm_atomic_helper_commit_hw_done
8994  	 */
8995  	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8996  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8997  
8998  		if (new_crtc_state->event)
8999  			drm_send_event_locked(dev, &new_crtc_state->event->base);
9000  
9001  		new_crtc_state->event = NULL;
9002  	}
9003  	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9004  
9005  	/* Signal HW programming completion */
9006  	drm_atomic_helper_commit_hw_done(state);
9007  
9008  	if (wait_for_vblank)
9009  		drm_atomic_helper_wait_for_flip_done(dev, state);
9010  
9011  	drm_atomic_helper_cleanup_planes(dev, state);
9012  
9013  	/* Don't free the memory if we are hitting this as part of suspend.
9014  	 * This way we don't free any memory during suspend; see
9015  	 * amdgpu_bo_free_kernel().  The memory will be freed in the first
9016  	 * non-suspend modeset or when the driver is torn down.
9017  	 */
9018  	if (!adev->in_suspend) {
9019  		/* return the stolen vga memory back to VRAM */
9020  		if (!adev->mman.keep_stolen_vga_memory)
9021  			amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9022  		amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9023  	}
9024  
9025  	/*
9026  	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9027  	 * so we can put the GPU into runtime suspend if we're not driving any
9028  	 * displays anymore
9029  	 */
9030  	for (i = 0; i < crtc_disable_count; i++)
9031  		pm_runtime_put_autosuspend(dev->dev);
9032  	pm_runtime_mark_last_busy(dev->dev);
9033  }
9034  
dm_force_atomic_commit(struct drm_connector * connector)9035  static int dm_force_atomic_commit(struct drm_connector *connector)
9036  {
9037  	int ret = 0;
9038  	struct drm_device *ddev = connector->dev;
9039  	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9040  	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9041  	struct drm_plane *plane = disconnected_acrtc->base.primary;
9042  	struct drm_connector_state *conn_state;
9043  	struct drm_crtc_state *crtc_state;
9044  	struct drm_plane_state *plane_state;
9045  
9046  	if (!state)
9047  		return -ENOMEM;
9048  
9049  	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9050  
9051  	/* Construct an atomic state to restore previous display setting */
9052  
9053  	/*
9054  	 * Attach connectors to drm_atomic_state
9055  	 */
9056  	conn_state = drm_atomic_get_connector_state(state, connector);
9057  
9058  	ret = PTR_ERR_OR_ZERO(conn_state);
9059  	if (ret)
9060  		goto out;
9061  
9062  	/* Attach crtc to drm_atomic_state*/
9063  	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9064  
9065  	ret = PTR_ERR_OR_ZERO(crtc_state);
9066  	if (ret)
9067  		goto out;
9068  
9069  	/* force a restore */
9070  	crtc_state->mode_changed = true;
9071  
9072  	/* Attach plane to drm_atomic_state */
9073  	plane_state = drm_atomic_get_plane_state(state, plane);
9074  
9075  	ret = PTR_ERR_OR_ZERO(plane_state);
9076  	if (ret)
9077  		goto out;
9078  
9079  	/* Call commit internally with the state we just constructed */
9080  	ret = drm_atomic_commit(state);
9081  
9082  out:
9083  	drm_atomic_state_put(state);
9084  	if (ret)
9085  		DRM_ERROR("Restoring old state failed with %i\n", ret);
9086  
9087  	return ret;
9088  }
9089  
9090  /*
9091   * This function handles all cases when set mode does not come upon hotplug.
9092   * This includes when a display is unplugged then plugged back into the
9093   * same port and when running without usermode desktop manager supprot
9094   */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)9095  void dm_restore_drm_connector_state(struct drm_device *dev,
9096  				    struct drm_connector *connector)
9097  {
9098  	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9099  	struct amdgpu_crtc *disconnected_acrtc;
9100  	struct dm_crtc_state *acrtc_state;
9101  
9102  	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9103  		return;
9104  
9105  	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9106  	if (!disconnected_acrtc)
9107  		return;
9108  
9109  	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9110  	if (!acrtc_state->stream)
9111  		return;
9112  
9113  	/*
9114  	 * If the previous sink is not released and different from the current,
9115  	 * we deduce we are in a state where we can not rely on usermode call
9116  	 * to turn on the display, so we do it here
9117  	 */
9118  	if (acrtc_state->stream->sink != aconnector->dc_sink)
9119  		dm_force_atomic_commit(&aconnector->base);
9120  }
9121  
9122  /*
9123   * Grabs all modesetting locks to serialize against any blocking commits,
9124   * Waits for completion of all non blocking commits.
9125   */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)9126  static int do_aquire_global_lock(struct drm_device *dev,
9127  				 struct drm_atomic_state *state)
9128  {
9129  	struct drm_crtc *crtc;
9130  	struct drm_crtc_commit *commit;
9131  	long ret;
9132  
9133  	/*
9134  	 * Adding all modeset locks to aquire_ctx will
9135  	 * ensure that when the framework release it the
9136  	 * extra locks we are locking here will get released to
9137  	 */
9138  	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9139  	if (ret)
9140  		return ret;
9141  
9142  	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9143  		spin_lock(&crtc->commit_lock);
9144  		commit = list_first_entry_or_null(&crtc->commit_list,
9145  				struct drm_crtc_commit, commit_entry);
9146  		if (commit)
9147  			drm_crtc_commit_get(commit);
9148  		spin_unlock(&crtc->commit_lock);
9149  
9150  		if (!commit)
9151  			continue;
9152  
9153  		/*
9154  		 * Make sure all pending HW programming completed and
9155  		 * page flips done
9156  		 */
9157  		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9158  
9159  		if (ret > 0)
9160  			ret = wait_for_completion_interruptible_timeout(
9161  					&commit->flip_done, 10*HZ);
9162  
9163  		if (ret == 0)
9164  			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
9165  				  crtc->base.id, crtc->name);
9166  
9167  		drm_crtc_commit_put(commit);
9168  	}
9169  
9170  	return ret < 0 ? ret : 0;
9171  }
9172  
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)9173  static void get_freesync_config_for_crtc(
9174  	struct dm_crtc_state *new_crtc_state,
9175  	struct dm_connector_state *new_con_state)
9176  {
9177  	struct mod_freesync_config config = {0};
9178  	struct amdgpu_dm_connector *aconnector =
9179  			to_amdgpu_dm_connector(new_con_state->base.connector);
9180  	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9181  	int vrefresh = drm_mode_vrefresh(mode);
9182  	bool fs_vid_mode = false;
9183  
9184  	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9185  					vrefresh >= aconnector->min_vfreq &&
9186  					vrefresh <= aconnector->max_vfreq;
9187  
9188  	if (new_crtc_state->vrr_supported) {
9189  		new_crtc_state->stream->ignore_msa_timing_param = true;
9190  		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9191  
9192  		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9193  		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9194  		config.vsif_supported = true;
9195  		config.btr = true;
9196  
9197  		if (fs_vid_mode) {
9198  			config.state = VRR_STATE_ACTIVE_FIXED;
9199  			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9200  			goto out;
9201  		} else if (new_crtc_state->base.vrr_enabled) {
9202  			config.state = VRR_STATE_ACTIVE_VARIABLE;
9203  		} else {
9204  			config.state = VRR_STATE_INACTIVE;
9205  		}
9206  	}
9207  out:
9208  	new_crtc_state->freesync_config = config;
9209  }
9210  
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)9211  static void reset_freesync_config_for_crtc(
9212  	struct dm_crtc_state *new_crtc_state)
9213  {
9214  	new_crtc_state->vrr_supported = false;
9215  
9216  	memset(&new_crtc_state->vrr_infopacket, 0,
9217  	       sizeof(new_crtc_state->vrr_infopacket));
9218  }
9219  
9220  static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state)9221  is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9222  				 struct drm_crtc_state *new_crtc_state)
9223  {
9224  	const struct drm_display_mode *old_mode, *new_mode;
9225  
9226  	if (!old_crtc_state || !new_crtc_state)
9227  		return false;
9228  
9229  	old_mode = &old_crtc_state->mode;
9230  	new_mode = &new_crtc_state->mode;
9231  
9232  	if (old_mode->clock       == new_mode->clock &&
9233  	    old_mode->hdisplay    == new_mode->hdisplay &&
9234  	    old_mode->vdisplay    == new_mode->vdisplay &&
9235  	    old_mode->htotal      == new_mode->htotal &&
9236  	    old_mode->vtotal      != new_mode->vtotal &&
9237  	    old_mode->hsync_start == new_mode->hsync_start &&
9238  	    old_mode->vsync_start != new_mode->vsync_start &&
9239  	    old_mode->hsync_end   == new_mode->hsync_end &&
9240  	    old_mode->vsync_end   != new_mode->vsync_end &&
9241  	    old_mode->hskew       == new_mode->hskew &&
9242  	    old_mode->vscan       == new_mode->vscan &&
9243  	    (old_mode->vsync_end - old_mode->vsync_start) ==
9244  	    (new_mode->vsync_end - new_mode->vsync_start))
9245  		return true;
9246  
9247  	return false;
9248  }
9249  
set_freesync_fixed_config(struct dm_crtc_state * dm_new_crtc_state)9250  static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
9251  {
9252  	u64 num, den, res;
9253  	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9254  
9255  	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9256  
9257  	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9258  	den = (unsigned long long)new_crtc_state->mode.htotal *
9259  	      (unsigned long long)new_crtc_state->mode.vtotal;
9260  
9261  	res = div_u64(num, den);
9262  	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9263  }
9264  
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)9265  static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9266  			 struct drm_atomic_state *state,
9267  			 struct drm_crtc *crtc,
9268  			 struct drm_crtc_state *old_crtc_state,
9269  			 struct drm_crtc_state *new_crtc_state,
9270  			 bool enable,
9271  			 bool *lock_and_validation_needed)
9272  {
9273  	struct dm_atomic_state *dm_state = NULL;
9274  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9275  	struct dc_stream_state *new_stream;
9276  	int ret = 0;
9277  
9278  	/*
9279  	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9280  	 * update changed items
9281  	 */
9282  	struct amdgpu_crtc *acrtc = NULL;
9283  	struct amdgpu_dm_connector *aconnector = NULL;
9284  	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9285  	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9286  
9287  	new_stream = NULL;
9288  
9289  	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9290  	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9291  	acrtc = to_amdgpu_crtc(crtc);
9292  	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9293  
9294  	/* TODO This hack should go away */
9295  	if (aconnector && enable) {
9296  		/* Make sure fake sink is created in plug-in scenario */
9297  		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9298  							    &aconnector->base);
9299  		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9300  							    &aconnector->base);
9301  
9302  		if (IS_ERR(drm_new_conn_state)) {
9303  			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9304  			goto fail;
9305  		}
9306  
9307  		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9308  		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9309  
9310  		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9311  			goto skip_modeset;
9312  
9313  		new_stream = create_validate_stream_for_sink(aconnector,
9314  							     &new_crtc_state->mode,
9315  							     dm_new_conn_state,
9316  							     dm_old_crtc_state->stream);
9317  
9318  		/*
9319  		 * we can have no stream on ACTION_SET if a display
9320  		 * was disconnected during S3, in this case it is not an
9321  		 * error, the OS will be updated after detection, and
9322  		 * will do the right thing on next atomic commit
9323  		 */
9324  
9325  		if (!new_stream) {
9326  			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9327  					__func__, acrtc->base.base.id);
9328  			ret = -ENOMEM;
9329  			goto fail;
9330  		}
9331  
9332  		/*
9333  		 * TODO: Check VSDB bits to decide whether this should
9334  		 * be enabled or not.
9335  		 */
9336  		new_stream->triggered_crtc_reset.enabled =
9337  			dm->force_timing_sync;
9338  
9339  		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9340  
9341  		ret = fill_hdr_info_packet(drm_new_conn_state,
9342  					   &new_stream->hdr_static_metadata);
9343  		if (ret)
9344  			goto fail;
9345  
9346  		/*
9347  		 * If we already removed the old stream from the context
9348  		 * (and set the new stream to NULL) then we can't reuse
9349  		 * the old stream even if the stream and scaling are unchanged.
9350  		 * We'll hit the BUG_ON and black screen.
9351  		 *
9352  		 * TODO: Refactor this function to allow this check to work
9353  		 * in all conditions.
9354  		 */
9355  		if (dm_new_crtc_state->stream &&
9356  		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9357  			goto skip_modeset;
9358  
9359  		if (dm_new_crtc_state->stream &&
9360  		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9361  		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9362  			new_crtc_state->mode_changed = false;
9363  			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9364  					 new_crtc_state->mode_changed);
9365  		}
9366  	}
9367  
9368  	/* mode_changed flag may get updated above, need to check again */
9369  	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9370  		goto skip_modeset;
9371  
9372  	drm_dbg_state(state->dev,
9373  		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
9374  		acrtc->crtc_id,
9375  		new_crtc_state->enable,
9376  		new_crtc_state->active,
9377  		new_crtc_state->planes_changed,
9378  		new_crtc_state->mode_changed,
9379  		new_crtc_state->active_changed,
9380  		new_crtc_state->connectors_changed);
9381  
9382  	/* Remove stream for any changed/disabled CRTC */
9383  	if (!enable) {
9384  
9385  		if (!dm_old_crtc_state->stream)
9386  			goto skip_modeset;
9387  
9388  		/* Unset freesync video if it was active before */
9389  		if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
9390  			dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
9391  			dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
9392  		}
9393  
9394  		/* Now check if we should set freesync video mode */
9395  		if (dm_new_crtc_state->stream &&
9396  		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9397  		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
9398  		    is_timing_unchanged_for_freesync(new_crtc_state,
9399  						     old_crtc_state)) {
9400  			new_crtc_state->mode_changed = false;
9401  			DRM_DEBUG_DRIVER(
9402  				"Mode change not required for front porch change, setting mode_changed to %d",
9403  				new_crtc_state->mode_changed);
9404  
9405  			set_freesync_fixed_config(dm_new_crtc_state);
9406  
9407  			goto skip_modeset;
9408  		} else if (aconnector &&
9409  			   is_freesync_video_mode(&new_crtc_state->mode,
9410  						  aconnector)) {
9411  			struct drm_display_mode *high_mode;
9412  
9413  			high_mode = get_highest_refresh_rate_mode(aconnector, false);
9414  			if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
9415  				set_freesync_fixed_config(dm_new_crtc_state);
9416  		}
9417  
9418  		ret = dm_atomic_get_state(state, &dm_state);
9419  		if (ret)
9420  			goto fail;
9421  
9422  		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9423  				crtc->base.id);
9424  
9425  		/* i.e. reset mode */
9426  		if (dc_remove_stream_from_ctx(
9427  				dm->dc,
9428  				dm_state->context,
9429  				dm_old_crtc_state->stream) != DC_OK) {
9430  			ret = -EINVAL;
9431  			goto fail;
9432  		}
9433  
9434  		dc_stream_release(dm_old_crtc_state->stream);
9435  		dm_new_crtc_state->stream = NULL;
9436  
9437  		reset_freesync_config_for_crtc(dm_new_crtc_state);
9438  
9439  		*lock_and_validation_needed = true;
9440  
9441  	} else {/* Add stream for any updated/enabled CRTC */
9442  		/*
9443  		 * Quick fix to prevent NULL pointer on new_stream when
9444  		 * added MST connectors not found in existing crtc_state in the chained mode
9445  		 * TODO: need to dig out the root cause of that
9446  		 */
9447  		if (!aconnector)
9448  			goto skip_modeset;
9449  
9450  		if (modereset_required(new_crtc_state))
9451  			goto skip_modeset;
9452  
9453  		if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
9454  				     dm_old_crtc_state->stream)) {
9455  
9456  			WARN_ON(dm_new_crtc_state->stream);
9457  
9458  			ret = dm_atomic_get_state(state, &dm_state);
9459  			if (ret)
9460  				goto fail;
9461  
9462  			dm_new_crtc_state->stream = new_stream;
9463  
9464  			dc_stream_retain(new_stream);
9465  
9466  			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9467  					 crtc->base.id);
9468  
9469  			if (dc_add_stream_to_ctx(
9470  					dm->dc,
9471  					dm_state->context,
9472  					dm_new_crtc_state->stream) != DC_OK) {
9473  				ret = -EINVAL;
9474  				goto fail;
9475  			}
9476  
9477  			*lock_and_validation_needed = true;
9478  		}
9479  	}
9480  
9481  skip_modeset:
9482  	/* Release extra reference */
9483  	if (new_stream)
9484  		dc_stream_release(new_stream);
9485  
9486  	/*
9487  	 * We want to do dc stream updates that do not require a
9488  	 * full modeset below.
9489  	 */
9490  	if (!(enable && aconnector && new_crtc_state->active))
9491  		return 0;
9492  	/*
9493  	 * Given above conditions, the dc state cannot be NULL because:
9494  	 * 1. We're in the process of enabling CRTCs (just been added
9495  	 *    to the dc context, or already is on the context)
9496  	 * 2. Has a valid connector attached, and
9497  	 * 3. Is currently active and enabled.
9498  	 * => The dc stream state currently exists.
9499  	 */
9500  	BUG_ON(dm_new_crtc_state->stream == NULL);
9501  
9502  	/* Scaling or underscan settings */
9503  	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9504  				drm_atomic_crtc_needs_modeset(new_crtc_state))
9505  		update_stream_scaling_settings(
9506  			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9507  
9508  	/* ABM settings */
9509  	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9510  
9511  	/*
9512  	 * Color management settings. We also update color properties
9513  	 * when a modeset is needed, to ensure it gets reprogrammed.
9514  	 */
9515  	if (dm_new_crtc_state->base.color_mgmt_changed ||
9516  	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9517  		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9518  		if (ret)
9519  			goto fail;
9520  	}
9521  
9522  	/* Update Freesync settings. */
9523  	get_freesync_config_for_crtc(dm_new_crtc_state,
9524  				     dm_new_conn_state);
9525  
9526  	return ret;
9527  
9528  fail:
9529  	if (new_stream)
9530  		dc_stream_release(new_stream);
9531  	return ret;
9532  }
9533  
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)9534  static bool should_reset_plane(struct drm_atomic_state *state,
9535  			       struct drm_plane *plane,
9536  			       struct drm_plane_state *old_plane_state,
9537  			       struct drm_plane_state *new_plane_state)
9538  {
9539  	struct drm_plane *other;
9540  	struct drm_plane_state *old_other_state, *new_other_state;
9541  	struct drm_crtc_state *new_crtc_state;
9542  	int i;
9543  
9544  	/*
9545  	 * TODO: Remove this hack once the checks below are sufficient
9546  	 * enough to determine when we need to reset all the planes on
9547  	 * the stream.
9548  	 */
9549  	if (state->allow_modeset)
9550  		return true;
9551  
9552  	/* Exit early if we know that we're adding or removing the plane. */
9553  	if (old_plane_state->crtc != new_plane_state->crtc)
9554  		return true;
9555  
9556  	/* old crtc == new_crtc == NULL, plane not in context. */
9557  	if (!new_plane_state->crtc)
9558  		return false;
9559  
9560  	new_crtc_state =
9561  		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9562  
9563  	if (!new_crtc_state)
9564  		return true;
9565  
9566  	/* CRTC Degamma changes currently require us to recreate planes. */
9567  	if (new_crtc_state->color_mgmt_changed)
9568  		return true;
9569  
9570  	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9571  		return true;
9572  
9573  	/*
9574  	 * If there are any new primary or overlay planes being added or
9575  	 * removed then the z-order can potentially change. To ensure
9576  	 * correct z-order and pipe acquisition the current DC architecture
9577  	 * requires us to remove and recreate all existing planes.
9578  	 *
9579  	 * TODO: Come up with a more elegant solution for this.
9580  	 */
9581  	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9582  		struct amdgpu_framebuffer *old_afb, *new_afb;
9583  
9584  		if (other->type == DRM_PLANE_TYPE_CURSOR)
9585  			continue;
9586  
9587  		if (old_other_state->crtc != new_plane_state->crtc &&
9588  		    new_other_state->crtc != new_plane_state->crtc)
9589  			continue;
9590  
9591  		if (old_other_state->crtc != new_other_state->crtc)
9592  			return true;
9593  
9594  		/* Src/dst size and scaling updates. */
9595  		if (old_other_state->src_w != new_other_state->src_w ||
9596  		    old_other_state->src_h != new_other_state->src_h ||
9597  		    old_other_state->crtc_w != new_other_state->crtc_w ||
9598  		    old_other_state->crtc_h != new_other_state->crtc_h)
9599  			return true;
9600  
9601  		/* Rotation / mirroring updates. */
9602  		if (old_other_state->rotation != new_other_state->rotation)
9603  			return true;
9604  
9605  		/* Blending updates. */
9606  		if (old_other_state->pixel_blend_mode !=
9607  		    new_other_state->pixel_blend_mode)
9608  			return true;
9609  
9610  		/* Alpha updates. */
9611  		if (old_other_state->alpha != new_other_state->alpha)
9612  			return true;
9613  
9614  		/* Colorspace changes. */
9615  		if (old_other_state->color_range != new_other_state->color_range ||
9616  		    old_other_state->color_encoding != new_other_state->color_encoding)
9617  			return true;
9618  
9619  		/* Framebuffer checks fall at the end. */
9620  		if (!old_other_state->fb || !new_other_state->fb)
9621  			continue;
9622  
9623  		/* Pixel format changes can require bandwidth updates. */
9624  		if (old_other_state->fb->format != new_other_state->fb->format)
9625  			return true;
9626  
9627  		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9628  		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9629  
9630  		/* Tiling and DCC changes also require bandwidth updates. */
9631  		if (old_afb->tiling_flags != new_afb->tiling_flags ||
9632  		    old_afb->base.modifier != new_afb->base.modifier)
9633  			return true;
9634  	}
9635  
9636  	return false;
9637  }
9638  
dm_check_cursor_fb(struct amdgpu_crtc * new_acrtc,struct drm_plane_state * new_plane_state,struct drm_framebuffer * fb)9639  static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9640  			      struct drm_plane_state *new_plane_state,
9641  			      struct drm_framebuffer *fb)
9642  {
9643  	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9644  	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9645  	unsigned int pitch;
9646  	bool linear;
9647  
9648  	if (fb->width > new_acrtc->max_cursor_width ||
9649  	    fb->height > new_acrtc->max_cursor_height) {
9650  		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9651  				 new_plane_state->fb->width,
9652  				 new_plane_state->fb->height);
9653  		return -EINVAL;
9654  	}
9655  	if (new_plane_state->src_w != fb->width << 16 ||
9656  	    new_plane_state->src_h != fb->height << 16) {
9657  		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9658  		return -EINVAL;
9659  	}
9660  
9661  	/* Pitch in pixels */
9662  	pitch = fb->pitches[0] / fb->format->cpp[0];
9663  
9664  	if (fb->width != pitch) {
9665  		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9666  				 fb->width, pitch);
9667  		return -EINVAL;
9668  	}
9669  
9670  	switch (pitch) {
9671  	case 64:
9672  	case 128:
9673  	case 256:
9674  		/* FB pitch is supported by cursor plane */
9675  		break;
9676  	default:
9677  		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9678  		return -EINVAL;
9679  	}
9680  
9681  	/* Core DRM takes care of checking FB modifiers, so we only need to
9682  	 * check tiling flags when the FB doesn't have a modifier.
9683  	 */
9684  	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9685  		if (adev->family < AMDGPU_FAMILY_AI) {
9686  			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9687  				 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9688  				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9689  		} else {
9690  			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9691  		}
9692  		if (!linear) {
9693  			DRM_DEBUG_ATOMIC("Cursor FB not linear");
9694  			return -EINVAL;
9695  		}
9696  	}
9697  
9698  	return 0;
9699  }
9700  
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed,bool * is_top_most_overlay)9701  static int dm_update_plane_state(struct dc *dc,
9702  				 struct drm_atomic_state *state,
9703  				 struct drm_plane *plane,
9704  				 struct drm_plane_state *old_plane_state,
9705  				 struct drm_plane_state *new_plane_state,
9706  				 bool enable,
9707  				 bool *lock_and_validation_needed,
9708  				 bool *is_top_most_overlay)
9709  {
9710  
9711  	struct dm_atomic_state *dm_state = NULL;
9712  	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9713  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9714  	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9715  	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9716  	struct amdgpu_crtc *new_acrtc;
9717  	bool needs_reset;
9718  	int ret = 0;
9719  
9720  
9721  	new_plane_crtc = new_plane_state->crtc;
9722  	old_plane_crtc = old_plane_state->crtc;
9723  	dm_new_plane_state = to_dm_plane_state(new_plane_state);
9724  	dm_old_plane_state = to_dm_plane_state(old_plane_state);
9725  
9726  	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9727  		if (!enable || !new_plane_crtc ||
9728  			drm_atomic_plane_disabling(plane->state, new_plane_state))
9729  			return 0;
9730  
9731  		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9732  
9733  		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9734  			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9735  			return -EINVAL;
9736  		}
9737  
9738  		if (new_plane_state->fb) {
9739  			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9740  						 new_plane_state->fb);
9741  			if (ret)
9742  				return ret;
9743  		}
9744  
9745  		return 0;
9746  	}
9747  
9748  	needs_reset = should_reset_plane(state, plane, old_plane_state,
9749  					 new_plane_state);
9750  
9751  	/* Remove any changed/removed planes */
9752  	if (!enable) {
9753  		if (!needs_reset)
9754  			return 0;
9755  
9756  		if (!old_plane_crtc)
9757  			return 0;
9758  
9759  		old_crtc_state = drm_atomic_get_old_crtc_state(
9760  				state, old_plane_crtc);
9761  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9762  
9763  		if (!dm_old_crtc_state->stream)
9764  			return 0;
9765  
9766  		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9767  				plane->base.id, old_plane_crtc->base.id);
9768  
9769  		ret = dm_atomic_get_state(state, &dm_state);
9770  		if (ret)
9771  			return ret;
9772  
9773  		if (!dc_remove_plane_from_context(
9774  				dc,
9775  				dm_old_crtc_state->stream,
9776  				dm_old_plane_state->dc_state,
9777  				dm_state->context)) {
9778  
9779  			return -EINVAL;
9780  		}
9781  
9782  		if (dm_old_plane_state->dc_state)
9783  			dc_plane_state_release(dm_old_plane_state->dc_state);
9784  
9785  		dm_new_plane_state->dc_state = NULL;
9786  
9787  		*lock_and_validation_needed = true;
9788  
9789  	} else { /* Add new planes */
9790  		struct dc_plane_state *dc_new_plane_state;
9791  
9792  		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9793  			return 0;
9794  
9795  		if (!new_plane_crtc)
9796  			return 0;
9797  
9798  		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9799  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9800  
9801  		if (!dm_new_crtc_state->stream)
9802  			return 0;
9803  
9804  		if (!needs_reset)
9805  			return 0;
9806  
9807  		ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9808  		if (ret)
9809  			return ret;
9810  
9811  		WARN_ON(dm_new_plane_state->dc_state);
9812  
9813  		dc_new_plane_state = dc_create_plane_state(dc);
9814  		if (!dc_new_plane_state)
9815  			return -ENOMEM;
9816  
9817  		/* Block top most plane from being a video plane */
9818  		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9819  			if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
9820  				return -EINVAL;
9821  
9822  			*is_top_most_overlay = false;
9823  		}
9824  
9825  		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9826  				 plane->base.id, new_plane_crtc->base.id);
9827  
9828  		ret = fill_dc_plane_attributes(
9829  			drm_to_adev(new_plane_crtc->dev),
9830  			dc_new_plane_state,
9831  			new_plane_state,
9832  			new_crtc_state);
9833  		if (ret) {
9834  			dc_plane_state_release(dc_new_plane_state);
9835  			return ret;
9836  		}
9837  
9838  		ret = dm_atomic_get_state(state, &dm_state);
9839  		if (ret) {
9840  			dc_plane_state_release(dc_new_plane_state);
9841  			return ret;
9842  		}
9843  
9844  		/*
9845  		 * Any atomic check errors that occur after this will
9846  		 * not need a release. The plane state will be attached
9847  		 * to the stream, and therefore part of the atomic
9848  		 * state. It'll be released when the atomic state is
9849  		 * cleaned.
9850  		 */
9851  		if (!dc_add_plane_to_context(
9852  				dc,
9853  				dm_new_crtc_state->stream,
9854  				dc_new_plane_state,
9855  				dm_state->context)) {
9856  
9857  			dc_plane_state_release(dc_new_plane_state);
9858  			return -EINVAL;
9859  		}
9860  
9861  		dm_new_plane_state->dc_state = dc_new_plane_state;
9862  
9863  		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9864  
9865  		/* Tell DC to do a full surface update every time there
9866  		 * is a plane change. Inefficient, but works for now.
9867  		 */
9868  		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9869  
9870  		*lock_and_validation_needed = true;
9871  	}
9872  
9873  
9874  	return ret;
9875  }
9876  
dm_get_oriented_plane_size(struct drm_plane_state * plane_state,int * src_w,int * src_h)9877  static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9878  				       int *src_w, int *src_h)
9879  {
9880  	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9881  	case DRM_MODE_ROTATE_90:
9882  	case DRM_MODE_ROTATE_270:
9883  		*src_w = plane_state->src_h >> 16;
9884  		*src_h = plane_state->src_w >> 16;
9885  		break;
9886  	case DRM_MODE_ROTATE_0:
9887  	case DRM_MODE_ROTATE_180:
9888  	default:
9889  		*src_w = plane_state->src_w >> 16;
9890  		*src_h = plane_state->src_h >> 16;
9891  		break;
9892  	}
9893  }
9894  
dm_check_crtc_cursor(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)9895  static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9896  				struct drm_crtc *crtc,
9897  				struct drm_crtc_state *new_crtc_state)
9898  {
9899  	struct drm_plane *cursor = crtc->cursor, *underlying;
9900  	struct drm_plane_state *new_cursor_state, *new_underlying_state;
9901  	int i;
9902  	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9903  	int cursor_src_w, cursor_src_h;
9904  	int underlying_src_w, underlying_src_h;
9905  
9906  	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9907  	 * cursor per pipe but it's going to inherit the scaling and
9908  	 * positioning from the underlying pipe. Check the cursor plane's
9909  	 * blending properties match the underlying planes'.
9910  	 */
9911  
9912  	new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
9913  	if (!new_cursor_state || !new_cursor_state->fb)
9914  		return 0;
9915  
9916  	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
9917  	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
9918  	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
9919  
9920  	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9921  		/* Narrow down to non-cursor planes on the same CRTC as the cursor */
9922  		if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9923  			continue;
9924  
9925  		/* Ignore disabled planes */
9926  		if (!new_underlying_state->fb)
9927  			continue;
9928  
9929  		dm_get_oriented_plane_size(new_underlying_state,
9930  					   &underlying_src_w, &underlying_src_h);
9931  		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
9932  		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
9933  
9934  		if (cursor_scale_w != underlying_scale_w ||
9935  		    cursor_scale_h != underlying_scale_h) {
9936  			drm_dbg_atomic(crtc->dev,
9937  				       "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9938  				       cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9939  			return -EINVAL;
9940  		}
9941  
9942  		/* If this plane covers the whole CRTC, no need to check planes underneath */
9943  		if (new_underlying_state->crtc_x <= 0 &&
9944  		    new_underlying_state->crtc_y <= 0 &&
9945  		    new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9946  		    new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9947  			break;
9948  	}
9949  
9950  	return 0;
9951  }
9952  
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)9953  static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9954  {
9955  	struct drm_connector *connector;
9956  	struct drm_connector_state *conn_state, *old_conn_state;
9957  	struct amdgpu_dm_connector *aconnector = NULL;
9958  	int i;
9959  
9960  	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
9961  		if (!conn_state->crtc)
9962  			conn_state = old_conn_state;
9963  
9964  		if (conn_state->crtc != crtc)
9965  			continue;
9966  
9967  		aconnector = to_amdgpu_dm_connector(connector);
9968  		if (!aconnector->mst_output_port || !aconnector->mst_root)
9969  			aconnector = NULL;
9970  		else
9971  			break;
9972  	}
9973  
9974  	if (!aconnector)
9975  		return 0;
9976  
9977  	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
9978  }
9979  
9980  /**
9981   * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9982   *
9983   * @dev: The DRM device
9984   * @state: The atomic state to commit
9985   *
9986   * Validate that the given atomic state is programmable by DC into hardware.
9987   * This involves constructing a &struct dc_state reflecting the new hardware
9988   * state we wish to commit, then querying DC to see if it is programmable. It's
9989   * important not to modify the existing DC state. Otherwise, atomic_check
9990   * may unexpectedly commit hardware changes.
9991   *
9992   * When validating the DC state, it's important that the right locks are
9993   * acquired. For full updates case which removes/adds/updates streams on one
9994   * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9995   * that any such full update commit will wait for completion of any outstanding
9996   * flip using DRMs synchronization events.
9997   *
9998   * Note that DM adds the affected connectors for all CRTCs in state, when that
9999   * might not seem necessary. This is because DC stream creation requires the
10000   * DC sink, which is tied to the DRM connector state. Cleaning this up should
10001   * be possible but non-trivial - a possible TODO item.
10002   *
10003   * Return: -Error code if validation failed.
10004   */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)10005  static int amdgpu_dm_atomic_check(struct drm_device *dev,
10006  				  struct drm_atomic_state *state)
10007  {
10008  	struct amdgpu_device *adev = drm_to_adev(dev);
10009  	struct dm_atomic_state *dm_state = NULL;
10010  	struct dc *dc = adev->dm.dc;
10011  	struct drm_connector *connector;
10012  	struct drm_connector_state *old_con_state, *new_con_state;
10013  	struct drm_crtc *crtc;
10014  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10015  	struct drm_plane *plane;
10016  	struct drm_plane_state *old_plane_state, *new_plane_state;
10017  	enum dc_status status;
10018  	int ret, i;
10019  	bool lock_and_validation_needed = false;
10020  	bool is_top_most_overlay = true;
10021  	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10022  	struct drm_dp_mst_topology_mgr *mgr;
10023  	struct drm_dp_mst_topology_state *mst_state;
10024  	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10025  
10026  	trace_amdgpu_dm_atomic_check_begin(state);
10027  
10028  	ret = drm_atomic_helper_check_modeset(dev, state);
10029  	if (ret) {
10030  		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10031  		goto fail;
10032  	}
10033  
10034  	/* Check connector changes */
10035  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10036  		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10037  		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10038  
10039  		/* Skip connectors that are disabled or part of modeset already. */
10040  		if (!new_con_state->crtc)
10041  			continue;
10042  
10043  		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10044  		if (IS_ERR(new_crtc_state)) {
10045  			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10046  			ret = PTR_ERR(new_crtc_state);
10047  			goto fail;
10048  		}
10049  
10050  		if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
10051  		    dm_old_con_state->scaling != dm_new_con_state->scaling)
10052  			new_crtc_state->connectors_changed = true;
10053  	}
10054  
10055  	if (dc_resource_is_dsc_encoding_supported(dc)) {
10056  		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10057  			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10058  				ret = add_affected_mst_dsc_crtcs(state, crtc);
10059  				if (ret) {
10060  					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10061  					goto fail;
10062  				}
10063  			}
10064  		}
10065  	}
10066  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10067  		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10068  
10069  		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10070  		    !new_crtc_state->color_mgmt_changed &&
10071  		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10072  			dm_old_crtc_state->dsc_force_changed == false)
10073  			continue;
10074  
10075  		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10076  		if (ret) {
10077  			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10078  			goto fail;
10079  		}
10080  
10081  		if (!new_crtc_state->enable)
10082  			continue;
10083  
10084  		ret = drm_atomic_add_affected_connectors(state, crtc);
10085  		if (ret) {
10086  			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10087  			goto fail;
10088  		}
10089  
10090  		ret = drm_atomic_add_affected_planes(state, crtc);
10091  		if (ret) {
10092  			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10093  			goto fail;
10094  		}
10095  
10096  		if (dm_old_crtc_state->dsc_force_changed)
10097  			new_crtc_state->mode_changed = true;
10098  	}
10099  
10100  	/*
10101  	 * Add all primary and overlay planes on the CRTC to the state
10102  	 * whenever a plane is enabled to maintain correct z-ordering
10103  	 * and to enable fast surface updates.
10104  	 */
10105  	drm_for_each_crtc(crtc, dev) {
10106  		bool modified = false;
10107  
10108  		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10109  			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10110  				continue;
10111  
10112  			if (new_plane_state->crtc == crtc ||
10113  			    old_plane_state->crtc == crtc) {
10114  				modified = true;
10115  				break;
10116  			}
10117  		}
10118  
10119  		if (!modified)
10120  			continue;
10121  
10122  		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10123  			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10124  				continue;
10125  
10126  			new_plane_state =
10127  				drm_atomic_get_plane_state(state, plane);
10128  
10129  			if (IS_ERR(new_plane_state)) {
10130  				ret = PTR_ERR(new_plane_state);
10131  				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10132  				goto fail;
10133  			}
10134  		}
10135  	}
10136  
10137  	/*
10138  	 * DC consults the zpos (layer_index in DC terminology) to determine the
10139  	 * hw plane on which to enable the hw cursor (see
10140  	 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
10141  	 * atomic state, so call drm helper to normalize zpos.
10142  	 */
10143  	ret = drm_atomic_normalize_zpos(dev, state);
10144  	if (ret) {
10145  		drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
10146  		goto fail;
10147  	}
10148  
10149  	/* Remove exiting planes if they are modified */
10150  	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10151  		if (old_plane_state->fb && new_plane_state->fb &&
10152  		    get_mem_type(old_plane_state->fb) !=
10153  		    get_mem_type(new_plane_state->fb))
10154  			lock_and_validation_needed = true;
10155  
10156  		ret = dm_update_plane_state(dc, state, plane,
10157  					    old_plane_state,
10158  					    new_plane_state,
10159  					    false,
10160  					    &lock_and_validation_needed,
10161  					    &is_top_most_overlay);
10162  		if (ret) {
10163  			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10164  			goto fail;
10165  		}
10166  	}
10167  
10168  	/* Disable all crtcs which require disable */
10169  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10170  		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10171  					   old_crtc_state,
10172  					   new_crtc_state,
10173  					   false,
10174  					   &lock_and_validation_needed);
10175  		if (ret) {
10176  			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10177  			goto fail;
10178  		}
10179  	}
10180  
10181  	/* Enable all crtcs which require enable */
10182  	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10183  		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10184  					   old_crtc_state,
10185  					   new_crtc_state,
10186  					   true,
10187  					   &lock_and_validation_needed);
10188  		if (ret) {
10189  			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10190  			goto fail;
10191  		}
10192  	}
10193  
10194  	/* Add new/modified planes */
10195  	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10196  		ret = dm_update_plane_state(dc, state, plane,
10197  					    old_plane_state,
10198  					    new_plane_state,
10199  					    true,
10200  					    &lock_and_validation_needed,
10201  					    &is_top_most_overlay);
10202  		if (ret) {
10203  			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10204  			goto fail;
10205  		}
10206  	}
10207  
10208  	if (dc_resource_is_dsc_encoding_supported(dc)) {
10209  		ret = pre_validate_dsc(state, &dm_state, vars);
10210  		if (ret != 0)
10211  			goto fail;
10212  	}
10213  
10214  	/* Run this here since we want to validate the streams we created */
10215  	ret = drm_atomic_helper_check_planes(dev, state);
10216  	if (ret) {
10217  		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
10218  		goto fail;
10219  	}
10220  
10221  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10222  		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10223  		if (dm_new_crtc_state->mpo_requested)
10224  			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
10225  	}
10226  
10227  	/* Check cursor planes scaling */
10228  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10229  		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10230  		if (ret) {
10231  			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
10232  			goto fail;
10233  		}
10234  	}
10235  
10236  	if (state->legacy_cursor_update) {
10237  		/*
10238  		 * This is a fast cursor update coming from the plane update
10239  		 * helper, check if it can be done asynchronously for better
10240  		 * performance.
10241  		 */
10242  		state->async_update =
10243  			!drm_atomic_helper_async_check(dev, state);
10244  
10245  		/*
10246  		 * Skip the remaining global validation if this is an async
10247  		 * update. Cursor updates can be done without affecting
10248  		 * state or bandwidth calcs and this avoids the performance
10249  		 * penalty of locking the private state object and
10250  		 * allocating a new dc_state.
10251  		 */
10252  		if (state->async_update)
10253  			return 0;
10254  	}
10255  
10256  	/* Check scaling and underscan changes*/
10257  	/* TODO Removed scaling changes validation due to inability to commit
10258  	 * new stream into context w\o causing full reset. Need to
10259  	 * decide how to handle.
10260  	 */
10261  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10262  		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10263  		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10264  		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10265  
10266  		/* Skip any modesets/resets */
10267  		if (!acrtc || drm_atomic_crtc_needs_modeset(
10268  				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10269  			continue;
10270  
10271  		/* Skip any thing not scale or underscan changes */
10272  		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10273  			continue;
10274  
10275  		lock_and_validation_needed = true;
10276  	}
10277  
10278  	/* set the slot info for each mst_state based on the link encoding format */
10279  	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10280  		struct amdgpu_dm_connector *aconnector;
10281  		struct drm_connector *connector;
10282  		struct drm_connector_list_iter iter;
10283  		u8 link_coding_cap;
10284  
10285  		drm_connector_list_iter_begin(dev, &iter);
10286  		drm_for_each_connector_iter(connector, &iter) {
10287  			if (connector->index == mst_state->mgr->conn_base_id) {
10288  				aconnector = to_amdgpu_dm_connector(connector);
10289  				link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10290  				drm_dp_mst_update_slots(mst_state, link_coding_cap);
10291  
10292  				break;
10293  			}
10294  		}
10295  		drm_connector_list_iter_end(&iter);
10296  	}
10297  
10298  	/**
10299  	 * Streams and planes are reset when there are changes that affect
10300  	 * bandwidth. Anything that affects bandwidth needs to go through
10301  	 * DC global validation to ensure that the configuration can be applied
10302  	 * to hardware.
10303  	 *
10304  	 * We have to currently stall out here in atomic_check for outstanding
10305  	 * commits to finish in this case because our IRQ handlers reference
10306  	 * DRM state directly - we can end up disabling interrupts too early
10307  	 * if we don't.
10308  	 *
10309  	 * TODO: Remove this stall and drop DM state private objects.
10310  	 */
10311  	if (lock_and_validation_needed) {
10312  		ret = dm_atomic_get_state(state, &dm_state);
10313  		if (ret) {
10314  			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
10315  			goto fail;
10316  		}
10317  
10318  		ret = do_aquire_global_lock(dev, state);
10319  		if (ret) {
10320  			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
10321  			goto fail;
10322  		}
10323  
10324  		ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
10325  		if (ret) {
10326  			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
10327  			ret = -EINVAL;
10328  			goto fail;
10329  		}
10330  
10331  		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10332  		if (ret) {
10333  			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
10334  			goto fail;
10335  		}
10336  
10337  		/*
10338  		 * Perform validation of MST topology in the state:
10339  		 * We need to perform MST atomic check before calling
10340  		 * dc_validate_global_state(), or there is a chance
10341  		 * to get stuck in an infinite loop and hang eventually.
10342  		 */
10343  		ret = drm_dp_mst_atomic_check(state);
10344  		if (ret) {
10345  			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
10346  			goto fail;
10347  		}
10348  		status = dc_validate_global_state(dc, dm_state->context, true);
10349  		if (status != DC_OK) {
10350  			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
10351  				       dc_status_to_str(status), status);
10352  			ret = -EINVAL;
10353  			goto fail;
10354  		}
10355  	} else {
10356  		/*
10357  		 * The commit is a fast update. Fast updates shouldn't change
10358  		 * the DC context, affect global validation, and can have their
10359  		 * commit work done in parallel with other commits not touching
10360  		 * the same resource. If we have a new DC context as part of
10361  		 * the DM atomic state from validation we need to free it and
10362  		 * retain the existing one instead.
10363  		 *
10364  		 * Furthermore, since the DM atomic state only contains the DC
10365  		 * context and can safely be annulled, we can free the state
10366  		 * and clear the associated private object now to free
10367  		 * some memory and avoid a possible use-after-free later.
10368  		 */
10369  
10370  		for (i = 0; i < state->num_private_objs; i++) {
10371  			struct drm_private_obj *obj = state->private_objs[i].ptr;
10372  
10373  			if (obj->funcs == adev->dm.atomic_obj.funcs) {
10374  				int j = state->num_private_objs-1;
10375  
10376  				dm_atomic_destroy_state(obj,
10377  						state->private_objs[i].state);
10378  
10379  				/* If i is not at the end of the array then the
10380  				 * last element needs to be moved to where i was
10381  				 * before the array can safely be truncated.
10382  				 */
10383  				if (i != j)
10384  					state->private_objs[i] =
10385  						state->private_objs[j];
10386  
10387  				state->private_objs[j].ptr = NULL;
10388  				state->private_objs[j].state = NULL;
10389  				state->private_objs[j].old_state = NULL;
10390  				state->private_objs[j].new_state = NULL;
10391  
10392  				state->num_private_objs = j;
10393  				break;
10394  			}
10395  		}
10396  	}
10397  
10398  	/* Store the overall update type for use later in atomic check. */
10399  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10400  		struct dm_crtc_state *dm_new_crtc_state =
10401  			to_dm_crtc_state(new_crtc_state);
10402  
10403  		/*
10404  		 * Only allow async flips for fast updates that don't change
10405  		 * the FB pitch, the DCC state, rotation, etc.
10406  		 */
10407  		if (new_crtc_state->async_flip && lock_and_validation_needed) {
10408  			drm_dbg_atomic(crtc->dev,
10409  				       "[CRTC:%d:%s] async flips are only supported for fast updates\n",
10410  				       crtc->base.id, crtc->name);
10411  			ret = -EINVAL;
10412  			goto fail;
10413  		}
10414  
10415  		dm_new_crtc_state->update_type = lock_and_validation_needed ?
10416  			UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
10417  	}
10418  
10419  	/* Must be success */
10420  	WARN_ON(ret);
10421  
10422  	trace_amdgpu_dm_atomic_check_finish(state, ret);
10423  
10424  	return ret;
10425  
10426  fail:
10427  	if (ret == -EDEADLK)
10428  		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10429  	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10430  		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10431  	else
10432  		DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
10433  
10434  	trace_amdgpu_dm_atomic_check_finish(state, ret);
10435  
10436  	return ret;
10437  }
10438  
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)10439  static bool is_dp_capable_without_timing_msa(struct dc *dc,
10440  					     struct amdgpu_dm_connector *amdgpu_dm_connector)
10441  {
10442  	u8 dpcd_data;
10443  	bool capable = false;
10444  
10445  	if (amdgpu_dm_connector->dc_link &&
10446  		dm_helpers_dp_read_dpcd(
10447  				NULL,
10448  				amdgpu_dm_connector->dc_link,
10449  				DP_DOWN_STREAM_PORT_COUNT,
10450  				&dpcd_data,
10451  				sizeof(dpcd_data))) {
10452  		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10453  	}
10454  
10455  	return capable;
10456  }
10457  
dm_edid_parser_send_cea(struct amdgpu_display_manager * dm,unsigned int offset,unsigned int total_length,u8 * data,unsigned int length,struct amdgpu_hdmi_vsdb_info * vsdb)10458  static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10459  		unsigned int offset,
10460  		unsigned int total_length,
10461  		u8 *data,
10462  		unsigned int length,
10463  		struct amdgpu_hdmi_vsdb_info *vsdb)
10464  {
10465  	bool res;
10466  	union dmub_rb_cmd cmd;
10467  	struct dmub_cmd_send_edid_cea *input;
10468  	struct dmub_cmd_edid_cea_output *output;
10469  
10470  	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10471  		return false;
10472  
10473  	memset(&cmd, 0, sizeof(cmd));
10474  
10475  	input = &cmd.edid_cea.data.input;
10476  
10477  	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10478  	cmd.edid_cea.header.sub_type = 0;
10479  	cmd.edid_cea.header.payload_bytes =
10480  		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10481  	input->offset = offset;
10482  	input->length = length;
10483  	input->cea_total_length = total_length;
10484  	memcpy(input->payload, data, length);
10485  
10486  	res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
10487  	if (!res) {
10488  		DRM_ERROR("EDID CEA parser failed\n");
10489  		return false;
10490  	}
10491  
10492  	output = &cmd.edid_cea.data.output;
10493  
10494  	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10495  		if (!output->ack.success) {
10496  			DRM_ERROR("EDID CEA ack failed at offset %d\n",
10497  					output->ack.offset);
10498  		}
10499  	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10500  		if (!output->amd_vsdb.vsdb_found)
10501  			return false;
10502  
10503  		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10504  		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10505  		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10506  		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10507  	} else {
10508  		DRM_WARN("Unknown EDID CEA parser results\n");
10509  		return false;
10510  	}
10511  
10512  	return true;
10513  }
10514  
parse_edid_cea_dmcu(struct amdgpu_display_manager * dm,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)10515  static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10516  		u8 *edid_ext, int len,
10517  		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10518  {
10519  	int i;
10520  
10521  	/* send extension block to DMCU for parsing */
10522  	for (i = 0; i < len; i += 8) {
10523  		bool res;
10524  		int offset;
10525  
10526  		/* send 8 bytes a time */
10527  		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10528  			return false;
10529  
10530  		if (i+8 == len) {
10531  			/* EDID block sent completed, expect result */
10532  			int version, min_rate, max_rate;
10533  
10534  			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10535  			if (res) {
10536  				/* amd vsdb found */
10537  				vsdb_info->freesync_supported = 1;
10538  				vsdb_info->amd_vsdb_version = version;
10539  				vsdb_info->min_refresh_rate_hz = min_rate;
10540  				vsdb_info->max_refresh_rate_hz = max_rate;
10541  				return true;
10542  			}
10543  			/* not amd vsdb */
10544  			return false;
10545  		}
10546  
10547  		/* check for ack*/
10548  		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10549  		if (!res)
10550  			return false;
10551  	}
10552  
10553  	return false;
10554  }
10555  
parse_edid_cea_dmub(struct amdgpu_display_manager * dm,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)10556  static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10557  		u8 *edid_ext, int len,
10558  		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10559  {
10560  	int i;
10561  
10562  	/* send extension block to DMCU for parsing */
10563  	for (i = 0; i < len; i += 8) {
10564  		/* send 8 bytes a time */
10565  		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10566  			return false;
10567  	}
10568  
10569  	return vsdb_info->freesync_supported;
10570  }
10571  
parse_edid_cea(struct amdgpu_dm_connector * aconnector,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)10572  static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10573  		u8 *edid_ext, int len,
10574  		struct amdgpu_hdmi_vsdb_info *vsdb_info)
10575  {
10576  	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10577  	bool ret;
10578  
10579  	mutex_lock(&adev->dm.dc_lock);
10580  	if (adev->dm.dmub_srv)
10581  		ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10582  	else
10583  		ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10584  	mutex_unlock(&adev->dm.dc_lock);
10585  	return ret;
10586  }
10587  
parse_amd_vsdb(struct amdgpu_dm_connector * aconnector,struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)10588  static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10589  			  struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10590  {
10591  	u8 *edid_ext = NULL;
10592  	int i;
10593  	int j = 0;
10594  
10595  	if (edid == NULL || edid->extensions == 0)
10596  		return -ENODEV;
10597  
10598  	/* Find DisplayID extension */
10599  	for (i = 0; i < edid->extensions; i++) {
10600  		edid_ext = (void *)(edid + (i + 1));
10601  		if (edid_ext[0] == DISPLAYID_EXT)
10602  			break;
10603  	}
10604  
10605  	while (j < EDID_LENGTH) {
10606  		struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
10607  		unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
10608  
10609  		if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
10610  				amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
10611  			vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
10612  			vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
10613  			DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
10614  
10615  			return true;
10616  		}
10617  		j++;
10618  	}
10619  
10620  	return false;
10621  }
10622  
parse_hdmi_amd_vsdb(struct amdgpu_dm_connector * aconnector,struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)10623  static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10624  		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10625  {
10626  	u8 *edid_ext = NULL;
10627  	int i;
10628  	bool valid_vsdb_found = false;
10629  
10630  	/*----- drm_find_cea_extension() -----*/
10631  	/* No EDID or EDID extensions */
10632  	if (edid == NULL || edid->extensions == 0)
10633  		return -ENODEV;
10634  
10635  	/* Find CEA extension */
10636  	for (i = 0; i < edid->extensions; i++) {
10637  		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10638  		if (edid_ext[0] == CEA_EXT)
10639  			break;
10640  	}
10641  
10642  	if (i == edid->extensions)
10643  		return -ENODEV;
10644  
10645  	/*----- cea_db_offsets() -----*/
10646  	if (edid_ext[0] != CEA_EXT)
10647  		return -ENODEV;
10648  
10649  	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10650  
10651  	return valid_vsdb_found ? i : -ENODEV;
10652  }
10653  
10654  /**
10655   * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
10656   *
10657   * @connector: Connector to query.
10658   * @edid: EDID from monitor
10659   *
10660   * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
10661   * track of some of the display information in the internal data struct used by
10662   * amdgpu_dm. This function checks which type of connector we need to set the
10663   * FreeSync parameters.
10664   */
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)10665  void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10666  				    struct edid *edid)
10667  {
10668  	int i = 0;
10669  	struct detailed_timing *timing;
10670  	struct detailed_non_pixel *data;
10671  	struct detailed_data_monitor_range *range;
10672  	struct amdgpu_dm_connector *amdgpu_dm_connector =
10673  			to_amdgpu_dm_connector(connector);
10674  	struct dm_connector_state *dm_con_state = NULL;
10675  	struct dc_sink *sink;
10676  
10677  	struct drm_device *dev = connector->dev;
10678  	struct amdgpu_device *adev = drm_to_adev(dev);
10679  	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10680  	bool freesync_capable = false;
10681  	enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
10682  
10683  	if (!connector->state) {
10684  		DRM_ERROR("%s - Connector has no state", __func__);
10685  		goto update;
10686  	}
10687  
10688  	sink = amdgpu_dm_connector->dc_sink ?
10689  		amdgpu_dm_connector->dc_sink :
10690  		amdgpu_dm_connector->dc_em_sink;
10691  
10692  	if (!edid || !sink) {
10693  		dm_con_state = to_dm_connector_state(connector->state);
10694  
10695  		amdgpu_dm_connector->min_vfreq = 0;
10696  		amdgpu_dm_connector->max_vfreq = 0;
10697  		amdgpu_dm_connector->pixel_clock_mhz = 0;
10698  		connector->display_info.monitor_range.min_vfreq = 0;
10699  		connector->display_info.monitor_range.max_vfreq = 0;
10700  		freesync_capable = false;
10701  
10702  		goto update;
10703  	}
10704  
10705  	dm_con_state = to_dm_connector_state(connector->state);
10706  
10707  	if (!adev->dm.freesync_module)
10708  		goto update;
10709  
10710  	if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10711  		|| sink->sink_signal == SIGNAL_TYPE_EDP) {
10712  		bool edid_check_required = false;
10713  
10714  		if (edid) {
10715  			edid_check_required = is_dp_capable_without_timing_msa(
10716  						adev->dm.dc,
10717  						amdgpu_dm_connector);
10718  		}
10719  
10720  		if (edid_check_required == true && (edid->version > 1 ||
10721  		   (edid->version == 1 && edid->revision > 1))) {
10722  			for (i = 0; i < 4; i++) {
10723  
10724  				timing	= &edid->detailed_timings[i];
10725  				data	= &timing->data.other_data;
10726  				range	= &data->data.range;
10727  				/*
10728  				 * Check if monitor has continuous frequency mode
10729  				 */
10730  				if (data->type != EDID_DETAIL_MONITOR_RANGE)
10731  					continue;
10732  				/*
10733  				 * Check for flag range limits only. If flag == 1 then
10734  				 * no additional timing information provided.
10735  				 * Default GTF, GTF Secondary curve and CVT are not
10736  				 * supported
10737  				 */
10738  				if (range->flags != 1)
10739  					continue;
10740  
10741  				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10742  				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10743  				amdgpu_dm_connector->pixel_clock_mhz =
10744  					range->pixel_clock_mhz * 10;
10745  
10746  				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10747  				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10748  
10749  				break;
10750  			}
10751  
10752  			if (amdgpu_dm_connector->max_vfreq -
10753  			    amdgpu_dm_connector->min_vfreq > 10) {
10754  
10755  				freesync_capable = true;
10756  			}
10757  		}
10758  		parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10759  
10760  		if (vsdb_info.replay_mode) {
10761  			amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
10762  			amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
10763  			amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
10764  		}
10765  
10766  	} else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10767  		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10768  		if (i >= 0 && vsdb_info.freesync_supported) {
10769  			timing  = &edid->detailed_timings[i];
10770  			data    = &timing->data.other_data;
10771  
10772  			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10773  			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10774  			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10775  				freesync_capable = true;
10776  
10777  			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10778  			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10779  		}
10780  	}
10781  
10782  	as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
10783  
10784  	if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
10785  		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10786  		if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
10787  
10788  			amdgpu_dm_connector->pack_sdp_v1_3 = true;
10789  			amdgpu_dm_connector->as_type = as_type;
10790  			amdgpu_dm_connector->vsdb_info = vsdb_info;
10791  
10792  			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10793  			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10794  			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10795  				freesync_capable = true;
10796  
10797  			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10798  			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10799  		}
10800  	}
10801  
10802  update:
10803  	if (dm_con_state)
10804  		dm_con_state->freesync_capable = freesync_capable;
10805  
10806  	if (connector->vrr_capable_property)
10807  		drm_connector_set_vrr_capable_property(connector,
10808  						       freesync_capable);
10809  }
10810  
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)10811  void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10812  {
10813  	struct amdgpu_device *adev = drm_to_adev(dev);
10814  	struct dc *dc = adev->dm.dc;
10815  	int i;
10816  
10817  	mutex_lock(&adev->dm.dc_lock);
10818  	if (dc->current_state) {
10819  		for (i = 0; i < dc->current_state->stream_count; ++i)
10820  			dc->current_state->streams[i]
10821  				->triggered_crtc_reset.enabled =
10822  				adev->dm.force_timing_sync;
10823  
10824  		dm_enable_per_frame_crtc_master_sync(dc->current_state);
10825  		dc_trigger_sync(dc, dc->current_state);
10826  	}
10827  	mutex_unlock(&adev->dm.dc_lock);
10828  }
10829  
dm_write_reg_func(const struct dc_context * ctx,uint32_t address,u32 value,const char * func_name)10830  void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10831  		       u32 value, const char *func_name)
10832  {
10833  #ifdef DM_CHECK_ADDR_0
10834  	if (address == 0) {
10835  		DC_ERR("invalid register write. address = 0");
10836  		return;
10837  	}
10838  #endif
10839  	cgs_write_register(ctx->cgs_device, address, value);
10840  	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10841  }
10842  
dm_read_reg_func(const struct dc_context * ctx,uint32_t address,const char * func_name)10843  uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10844  			  const char *func_name)
10845  {
10846  	u32 value;
10847  #ifdef DM_CHECK_ADDR_0
10848  	if (address == 0) {
10849  		DC_ERR("invalid register read; address = 0\n");
10850  		return 0;
10851  	}
10852  #endif
10853  
10854  	if (ctx->dmub_srv &&
10855  	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10856  	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10857  		ASSERT(false);
10858  		return 0;
10859  	}
10860  
10861  	value = cgs_read_register(ctx->cgs_device, address);
10862  
10863  	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10864  
10865  	return value;
10866  }
10867  
amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context * ctx,unsigned int link_index,struct aux_payload * payload,enum aux_return_code_type * operation_result)10868  int amdgpu_dm_process_dmub_aux_transfer_sync(
10869  		struct dc_context *ctx,
10870  		unsigned int link_index,
10871  		struct aux_payload *payload,
10872  		enum aux_return_code_type *operation_result)
10873  {
10874  	struct amdgpu_device *adev = ctx->driver_context;
10875  	struct dmub_notification *p_notify = adev->dm.dmub_notify;
10876  	int ret = -1;
10877  
10878  	mutex_lock(&adev->dm.dpia_aux_lock);
10879  	if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
10880  		*operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10881  		goto out;
10882  	}
10883  
10884  	if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10885  		DRM_ERROR("wait_for_completion_timeout timeout!");
10886  		*operation_result = AUX_RET_ERROR_TIMEOUT;
10887  		goto out;
10888  	}
10889  
10890  	if (p_notify->result != AUX_RET_SUCCESS) {
10891  		/*
10892  		 * Transient states before tunneling is enabled could
10893  		 * lead to this error. We can ignore this for now.
10894  		 */
10895  		if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
10896  			DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
10897  					payload->address, payload->length,
10898  					p_notify->result);
10899  		}
10900  		*operation_result = AUX_RET_ERROR_INVALID_REPLY;
10901  		goto out;
10902  	}
10903  
10904  
10905  	payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10906  	if (!payload->write && p_notify->aux_reply.length &&
10907  			(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
10908  
10909  		if (payload->length != p_notify->aux_reply.length) {
10910  			DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
10911  				p_notify->aux_reply.length,
10912  					payload->address, payload->length);
10913  			*operation_result = AUX_RET_ERROR_INVALID_REPLY;
10914  			goto out;
10915  		}
10916  
10917  		memcpy(payload->data, p_notify->aux_reply.data,
10918  				p_notify->aux_reply.length);
10919  	}
10920  
10921  	/* success */
10922  	ret = p_notify->aux_reply.length;
10923  	*operation_result = p_notify->result;
10924  out:
10925  	reinit_completion(&adev->dm.dmub_aux_transfer_done);
10926  	mutex_unlock(&adev->dm.dpia_aux_lock);
10927  	return ret;
10928  }
10929  
amdgpu_dm_process_dmub_set_config_sync(struct dc_context * ctx,unsigned int link_index,struct set_config_cmd_payload * payload,enum set_config_status * operation_result)10930  int amdgpu_dm_process_dmub_set_config_sync(
10931  		struct dc_context *ctx,
10932  		unsigned int link_index,
10933  		struct set_config_cmd_payload *payload,
10934  		enum set_config_status *operation_result)
10935  {
10936  	struct amdgpu_device *adev = ctx->driver_context;
10937  	bool is_cmd_complete;
10938  	int ret;
10939  
10940  	mutex_lock(&adev->dm.dpia_aux_lock);
10941  	is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
10942  			link_index, payload, adev->dm.dmub_notify);
10943  
10944  	if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10945  		ret = 0;
10946  		*operation_result = adev->dm.dmub_notify->sc_status;
10947  	} else {
10948  		DRM_ERROR("wait_for_completion_timeout timeout!");
10949  		ret = -1;
10950  		*operation_result = SET_CONFIG_UNKNOWN_ERROR;
10951  	}
10952  
10953  	if (!is_cmd_complete)
10954  		reinit_completion(&adev->dm.dmub_aux_transfer_done);
10955  	mutex_unlock(&adev->dm.dpia_aux_lock);
10956  	return ret;
10957  }
10958  
10959  /*
10960   * Check whether seamless boot is supported.
10961   *
10962   * So far we only support seamless boot on CHIP_VANGOGH.
10963   * If everything goes well, we may consider expanding
10964   * seamless boot to other ASICs.
10965   */
check_seamless_boot_capability(struct amdgpu_device * adev)10966  bool check_seamless_boot_capability(struct amdgpu_device *adev)
10967  {
10968  	switch (adev->ip_versions[DCE_HWIP][0]) {
10969  	case IP_VERSION(3, 0, 1):
10970  		if (!adev->mman.keep_stolen_vga_memory)
10971  			return true;
10972  		break;
10973  	default:
10974  		break;
10975  	}
10976  
10977  	return false;
10978  }
10979  
dm_execute_dmub_cmd(const struct dc_context * ctx,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)10980  bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
10981  {
10982  	return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
10983  }
10984  
dm_execute_dmub_cmd_list(const struct dc_context * ctx,unsigned int count,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)10985  bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
10986  {
10987  	return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
10988  }
10989