1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/pci.h>
25
26 #include <drm/drm_fourcc.h>
27 #include <drm/drm_vblank.h>
28
29 #include "amdgpu.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_i2c.h"
32 #include "atom.h"
33 #include "amdgpu_atombios.h"
34 #include "atombios_crtc.h"
35 #include "atombios_encoders.h"
36 #include "amdgpu_pll.h"
37 #include "amdgpu_connectors.h"
38 #include "amdgpu_display.h"
39
40 #include "bif/bif_3_0_d.h"
41 #include "bif/bif_3_0_sh_mask.h"
42 #include "oss/oss_1_0_d.h"
43 #include "oss/oss_1_0_sh_mask.h"
44 #include "gca/gfx_6_0_d.h"
45 #include "gca/gfx_6_0_sh_mask.h"
46 #include "gmc/gmc_6_0_d.h"
47 #include "gmc/gmc_6_0_sh_mask.h"
48 #include "dce/dce_6_0_d.h"
49 #include "dce/dce_6_0_sh_mask.h"
50 #include "gca/gfx_7_2_enum.h"
51 #include "dce_v6_0.h"
52 #include "si_enums.h"
53
54 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
55 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
56
57 static const u32 crtc_offsets[6] =
58 {
59 SI_CRTC0_REGISTER_OFFSET,
60 SI_CRTC1_REGISTER_OFFSET,
61 SI_CRTC2_REGISTER_OFFSET,
62 SI_CRTC3_REGISTER_OFFSET,
63 SI_CRTC4_REGISTER_OFFSET,
64 SI_CRTC5_REGISTER_OFFSET
65 };
66
67 static const u32 hpd_offsets[] =
68 {
69 mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
70 mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
71 mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
72 mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
73 mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
74 mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
75 };
76
77 static const uint32_t dig_offsets[] = {
78 SI_CRTC0_REGISTER_OFFSET,
79 SI_CRTC1_REGISTER_OFFSET,
80 SI_CRTC2_REGISTER_OFFSET,
81 SI_CRTC3_REGISTER_OFFSET,
82 SI_CRTC4_REGISTER_OFFSET,
83 SI_CRTC5_REGISTER_OFFSET,
84 (0x13830 - 0x7030) >> 2,
85 };
86
87 static const struct {
88 uint32_t reg;
89 uint32_t vblank;
90 uint32_t vline;
91 uint32_t hpd;
92
93 } interrupt_status_offsets[6] = { {
94 .reg = mmDISP_INTERRUPT_STATUS,
95 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
96 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
97 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
98 }, {
99 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
100 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
101 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
102 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
103 }, {
104 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
105 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
106 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
107 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
108 }, {
109 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
110 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
111 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
112 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
113 }, {
114 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
115 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
116 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
117 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
118 }, {
119 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
120 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
121 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
122 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
123 } };
124
dce_v6_0_audio_endpt_rreg(struct amdgpu_device * adev,u32 block_offset,u32 reg)125 static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
126 u32 block_offset, u32 reg)
127 {
128 unsigned long flags;
129 u32 r;
130
131 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
132 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
133 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
134 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
135
136 return r;
137 }
138
dce_v6_0_audio_endpt_wreg(struct amdgpu_device * adev,u32 block_offset,u32 reg,u32 v)139 static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
140 u32 block_offset, u32 reg, u32 v)
141 {
142 unsigned long flags;
143
144 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
145 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
146 reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
147 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
148 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
149 }
150
dce_v6_0_vblank_get_counter(struct amdgpu_device * adev,int crtc)151 static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
152 {
153 if (crtc >= adev->mode_info.num_crtc)
154 return 0;
155 else
156 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
157 }
158
dce_v6_0_pageflip_interrupt_init(struct amdgpu_device * adev)159 static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
160 {
161 unsigned i;
162
163 /* Enable pflip interrupts */
164 for (i = 0; i < adev->mode_info.num_crtc; i++)
165 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
166 }
167
dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device * adev)168 static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
169 {
170 unsigned i;
171
172 /* Disable pflip interrupts */
173 for (i = 0; i < adev->mode_info.num_crtc; i++)
174 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
175 }
176
177 /**
178 * dce_v6_0_page_flip - pageflip callback.
179 *
180 * @adev: amdgpu_device pointer
181 * @crtc_id: crtc to cleanup pageflip on
182 * @crtc_base: new address of the crtc (GPU MC address)
183 * @async: asynchronous flip
184 *
185 * Does the actual pageflip (evergreen+).
186 * During vblank we take the crtc lock and wait for the update_pending
187 * bit to go high, when it does, we release the lock, and allow the
188 * double buffered update to take place.
189 * Returns the current update pending status.
190 */
dce_v6_0_page_flip(struct amdgpu_device * adev,int crtc_id,u64 crtc_base,bool async)191 static void dce_v6_0_page_flip(struct amdgpu_device *adev,
192 int crtc_id, u64 crtc_base, bool async)
193 {
194 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
195 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
196
197 /* flip at hsync for async, default is vsync */
198 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
199 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
200 /* update pitch */
201 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
202 fb->pitches[0] / fb->format->cpp[0]);
203 /* update the scanout addresses */
204 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
205 upper_32_bits(crtc_base));
206 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
207 (u32)crtc_base);
208
209 /* post the write */
210 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
211 }
212
dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)213 static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
214 u32 *vbl, u32 *position)
215 {
216 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
217 return -EINVAL;
218 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
219 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
220
221 return 0;
222
223 }
224
225 /**
226 * dce_v6_0_hpd_sense - hpd sense callback.
227 *
228 * @adev: amdgpu_device pointer
229 * @hpd: hpd (hotplug detect) pin
230 *
231 * Checks if a digital monitor is connected (evergreen+).
232 * Returns true if connected, false if not connected.
233 */
dce_v6_0_hpd_sense(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)234 static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
235 enum amdgpu_hpd_id hpd)
236 {
237 bool connected = false;
238
239 if (hpd >= adev->mode_info.num_hpd)
240 return connected;
241
242 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
243 connected = true;
244
245 return connected;
246 }
247
248 /**
249 * dce_v6_0_hpd_set_polarity - hpd set polarity callback.
250 *
251 * @adev: amdgpu_device pointer
252 * @hpd: hpd (hotplug detect) pin
253 *
254 * Set the polarity of the hpd pin (evergreen+).
255 */
dce_v6_0_hpd_set_polarity(struct amdgpu_device * adev,enum amdgpu_hpd_id hpd)256 static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
257 enum amdgpu_hpd_id hpd)
258 {
259 u32 tmp;
260 bool connected = dce_v6_0_hpd_sense(adev, hpd);
261
262 if (hpd >= adev->mode_info.num_hpd)
263 return;
264
265 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
266 if (connected)
267 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
268 else
269 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
270 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
271 }
272
273 /**
274 * dce_v6_0_hpd_init - hpd setup callback.
275 *
276 * @adev: amdgpu_device pointer
277 *
278 * Setup the hpd pins used by the card (evergreen+).
279 * Enable the pin, set the polarity, and enable the hpd interrupts.
280 */
dce_v6_0_hpd_init(struct amdgpu_device * adev)281 static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
282 {
283 struct drm_device *dev = adev_to_drm(adev);
284 struct drm_connector *connector;
285 struct drm_connector_list_iter iter;
286 u32 tmp;
287
288 drm_connector_list_iter_begin(dev, &iter);
289 drm_for_each_connector_iter(connector, &iter) {
290 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
291
292 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
293 continue;
294
295 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
296 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
297 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
298
299 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
300 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
301 /* don't try to enable hpd on eDP or LVDS avoid breaking the
302 * aux dp channel on imac and help (but not completely fix)
303 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
304 * also avoid interrupt storms during dpms.
305 */
306 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
307 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
308 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
309 continue;
310 }
311
312 dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
313 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
314 }
315 drm_connector_list_iter_end(&iter);
316 }
317
318 /**
319 * dce_v6_0_hpd_fini - hpd tear down callback.
320 *
321 * @adev: amdgpu_device pointer
322 *
323 * Tear down the hpd pins used by the card (evergreen+).
324 * Disable the hpd interrupts.
325 */
dce_v6_0_hpd_fini(struct amdgpu_device * adev)326 static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
327 {
328 struct drm_device *dev = adev_to_drm(adev);
329 struct drm_connector *connector;
330 struct drm_connector_list_iter iter;
331 u32 tmp;
332
333 drm_connector_list_iter_begin(dev, &iter);
334 drm_for_each_connector_iter(connector, &iter) {
335 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
336
337 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
338 continue;
339
340 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
341 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
342 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
343
344 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
345 }
346 drm_connector_list_iter_end(&iter);
347 }
348
dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device * adev)349 static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
350 {
351 return mmDC_GPIO_HPD_A;
352 }
353
dce_v6_0_set_vga_render_state(struct amdgpu_device * adev,bool render)354 static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
355 bool render)
356 {
357 if (!render)
358 WREG32(mmVGA_RENDER_CONTROL,
359 RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
360
361 }
362
dce_v6_0_get_num_crtc(struct amdgpu_device * adev)363 static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
364 {
365 switch (adev->asic_type) {
366 case CHIP_TAHITI:
367 case CHIP_PITCAIRN:
368 case CHIP_VERDE:
369 return 6;
370 case CHIP_OLAND:
371 return 2;
372 default:
373 return 0;
374 }
375 }
376
dce_v6_0_disable_dce(struct amdgpu_device * adev)377 void dce_v6_0_disable_dce(struct amdgpu_device *adev)
378 {
379 /*Disable VGA render and enabled crtc, if has DCE engine*/
380 if (amdgpu_atombios_has_dce_engine_info(adev)) {
381 u32 tmp;
382 int crtc_enabled, i;
383
384 dce_v6_0_set_vga_render_state(adev, false);
385
386 /*Disable crtc*/
387 for (i = 0; i < dce_v6_0_get_num_crtc(adev); i++) {
388 crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) &
389 CRTC_CONTROL__CRTC_MASTER_EN_MASK;
390 if (crtc_enabled) {
391 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
392 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
393 tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
394 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
395 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
396 }
397 }
398 }
399 }
400
dce_v6_0_program_fmt(struct drm_encoder * encoder)401 static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
402 {
403
404 struct drm_device *dev = encoder->dev;
405 struct amdgpu_device *adev = drm_to_adev(dev);
406 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
407 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
408 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
409 int bpc = 0;
410 u32 tmp = 0;
411 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
412
413 if (connector) {
414 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
415 bpc = amdgpu_connector_get_monitor_bpc(connector);
416 dither = amdgpu_connector->dither;
417 }
418
419 /* LVDS FMT is set up by atom */
420 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
421 return;
422
423 if (bpc == 0)
424 return;
425
426
427 switch (bpc) {
428 case 6:
429 if (dither == AMDGPU_FMT_DITHER_ENABLE)
430 /* XXX sort out optimal dither settings */
431 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
432 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
433 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK);
434 else
435 tmp |= FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK;
436 break;
437 case 8:
438 if (dither == AMDGPU_FMT_DITHER_ENABLE)
439 /* XXX sort out optimal dither settings */
440 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
441 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
442 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
443 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
444 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK);
445 else
446 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
447 FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK);
448 break;
449 case 10:
450 default:
451 /* not needed */
452 break;
453 }
454
455 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
456 }
457
458 /**
459 * si_get_number_of_dram_channels - get the number of dram channels
460 *
461 * @adev: amdgpu_device pointer
462 *
463 * Look up the number of video ram channels (CIK).
464 * Used for display watermark bandwidth calculations
465 * Returns the number of dram channels
466 */
si_get_number_of_dram_channels(struct amdgpu_device * adev)467 static u32 si_get_number_of_dram_channels(struct amdgpu_device *adev)
468 {
469 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
470
471 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
472 case 0:
473 default:
474 return 1;
475 case 1:
476 return 2;
477 case 2:
478 return 4;
479 case 3:
480 return 8;
481 case 4:
482 return 3;
483 case 5:
484 return 6;
485 case 6:
486 return 10;
487 case 7:
488 return 12;
489 case 8:
490 return 16;
491 }
492 }
493
494 struct dce6_wm_params {
495 u32 dram_channels; /* number of dram channels */
496 u32 yclk; /* bandwidth per dram data pin in kHz */
497 u32 sclk; /* engine clock in kHz */
498 u32 disp_clk; /* display clock in kHz */
499 u32 src_width; /* viewport width */
500 u32 active_time; /* active display time in ns */
501 u32 blank_time; /* blank time in ns */
502 bool interlaced; /* mode is interlaced */
503 fixed20_12 vsc; /* vertical scale ratio */
504 u32 num_heads; /* number of active crtcs */
505 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
506 u32 lb_size; /* line buffer allocated to pipe */
507 u32 vtaps; /* vertical scaler taps */
508 };
509
510 /**
511 * dce_v6_0_dram_bandwidth - get the dram bandwidth
512 *
513 * @wm: watermark calculation data
514 *
515 * Calculate the raw dram bandwidth (CIK).
516 * Used for display watermark bandwidth calculations
517 * Returns the dram bandwidth in MBytes/s
518 */
dce_v6_0_dram_bandwidth(struct dce6_wm_params * wm)519 static u32 dce_v6_0_dram_bandwidth(struct dce6_wm_params *wm)
520 {
521 /* Calculate raw DRAM Bandwidth */
522 fixed20_12 dram_efficiency; /* 0.7 */
523 fixed20_12 yclk, dram_channels, bandwidth;
524 fixed20_12 a;
525
526 a.full = dfixed_const(1000);
527 yclk.full = dfixed_const(wm->yclk);
528 yclk.full = dfixed_div(yclk, a);
529 dram_channels.full = dfixed_const(wm->dram_channels * 4);
530 a.full = dfixed_const(10);
531 dram_efficiency.full = dfixed_const(7);
532 dram_efficiency.full = dfixed_div(dram_efficiency, a);
533 bandwidth.full = dfixed_mul(dram_channels, yclk);
534 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
535
536 return dfixed_trunc(bandwidth);
537 }
538
539 /**
540 * dce_v6_0_dram_bandwidth_for_display - get the dram bandwidth for display
541 *
542 * @wm: watermark calculation data
543 *
544 * Calculate the dram bandwidth used for display (CIK).
545 * Used for display watermark bandwidth calculations
546 * Returns the dram bandwidth for display in MBytes/s
547 */
dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params * wm)548 static u32 dce_v6_0_dram_bandwidth_for_display(struct dce6_wm_params *wm)
549 {
550 /* Calculate DRAM Bandwidth and the part allocated to display. */
551 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
552 fixed20_12 yclk, dram_channels, bandwidth;
553 fixed20_12 a;
554
555 a.full = dfixed_const(1000);
556 yclk.full = dfixed_const(wm->yclk);
557 yclk.full = dfixed_div(yclk, a);
558 dram_channels.full = dfixed_const(wm->dram_channels * 4);
559 a.full = dfixed_const(10);
560 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
561 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
562 bandwidth.full = dfixed_mul(dram_channels, yclk);
563 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
564
565 return dfixed_trunc(bandwidth);
566 }
567
568 /**
569 * dce_v6_0_data_return_bandwidth - get the data return bandwidth
570 *
571 * @wm: watermark calculation data
572 *
573 * Calculate the data return bandwidth used for display (CIK).
574 * Used for display watermark bandwidth calculations
575 * Returns the data return bandwidth in MBytes/s
576 */
dce_v6_0_data_return_bandwidth(struct dce6_wm_params * wm)577 static u32 dce_v6_0_data_return_bandwidth(struct dce6_wm_params *wm)
578 {
579 /* Calculate the display Data return Bandwidth */
580 fixed20_12 return_efficiency; /* 0.8 */
581 fixed20_12 sclk, bandwidth;
582 fixed20_12 a;
583
584 a.full = dfixed_const(1000);
585 sclk.full = dfixed_const(wm->sclk);
586 sclk.full = dfixed_div(sclk, a);
587 a.full = dfixed_const(10);
588 return_efficiency.full = dfixed_const(8);
589 return_efficiency.full = dfixed_div(return_efficiency, a);
590 a.full = dfixed_const(32);
591 bandwidth.full = dfixed_mul(a, sclk);
592 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
593
594 return dfixed_trunc(bandwidth);
595 }
596
597 /**
598 * dce_v6_0_dmif_request_bandwidth - get the dmif bandwidth
599 *
600 * @wm: watermark calculation data
601 *
602 * Calculate the dmif bandwidth used for display (CIK).
603 * Used for display watermark bandwidth calculations
604 * Returns the dmif bandwidth in MBytes/s
605 */
dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params * wm)606 static u32 dce_v6_0_dmif_request_bandwidth(struct dce6_wm_params *wm)
607 {
608 /* Calculate the DMIF Request Bandwidth */
609 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
610 fixed20_12 disp_clk, bandwidth;
611 fixed20_12 a, b;
612
613 a.full = dfixed_const(1000);
614 disp_clk.full = dfixed_const(wm->disp_clk);
615 disp_clk.full = dfixed_div(disp_clk, a);
616 a.full = dfixed_const(32);
617 b.full = dfixed_mul(a, disp_clk);
618
619 a.full = dfixed_const(10);
620 disp_clk_request_efficiency.full = dfixed_const(8);
621 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
622
623 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
624
625 return dfixed_trunc(bandwidth);
626 }
627
628 /**
629 * dce_v6_0_available_bandwidth - get the min available bandwidth
630 *
631 * @wm: watermark calculation data
632 *
633 * Calculate the min available bandwidth used for display (CIK).
634 * Used for display watermark bandwidth calculations
635 * Returns the min available bandwidth in MBytes/s
636 */
dce_v6_0_available_bandwidth(struct dce6_wm_params * wm)637 static u32 dce_v6_0_available_bandwidth(struct dce6_wm_params *wm)
638 {
639 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
640 u32 dram_bandwidth = dce_v6_0_dram_bandwidth(wm);
641 u32 data_return_bandwidth = dce_v6_0_data_return_bandwidth(wm);
642 u32 dmif_req_bandwidth = dce_v6_0_dmif_request_bandwidth(wm);
643
644 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
645 }
646
647 /**
648 * dce_v6_0_average_bandwidth - get the average available bandwidth
649 *
650 * @wm: watermark calculation data
651 *
652 * Calculate the average available bandwidth used for display (CIK).
653 * Used for display watermark bandwidth calculations
654 * Returns the average available bandwidth in MBytes/s
655 */
dce_v6_0_average_bandwidth(struct dce6_wm_params * wm)656 static u32 dce_v6_0_average_bandwidth(struct dce6_wm_params *wm)
657 {
658 /* Calculate the display mode Average Bandwidth
659 * DisplayMode should contain the source and destination dimensions,
660 * timing, etc.
661 */
662 fixed20_12 bpp;
663 fixed20_12 line_time;
664 fixed20_12 src_width;
665 fixed20_12 bandwidth;
666 fixed20_12 a;
667
668 a.full = dfixed_const(1000);
669 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
670 line_time.full = dfixed_div(line_time, a);
671 bpp.full = dfixed_const(wm->bytes_per_pixel);
672 src_width.full = dfixed_const(wm->src_width);
673 bandwidth.full = dfixed_mul(src_width, bpp);
674 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
675 bandwidth.full = dfixed_div(bandwidth, line_time);
676
677 return dfixed_trunc(bandwidth);
678 }
679
680 /**
681 * dce_v6_0_latency_watermark - get the latency watermark
682 *
683 * @wm: watermark calculation data
684 *
685 * Calculate the latency watermark (CIK).
686 * Used for display watermark bandwidth calculations
687 * Returns the latency watermark in ns
688 */
dce_v6_0_latency_watermark(struct dce6_wm_params * wm)689 static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
690 {
691 /* First calculate the latency in ns */
692 u32 mc_latency = 2000; /* 2000 ns. */
693 u32 available_bandwidth = dce_v6_0_available_bandwidth(wm);
694 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
695 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
696 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
697 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
698 (wm->num_heads * cursor_line_pair_return_time);
699 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
700 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
701 u32 tmp, dmif_size = 12288;
702 fixed20_12 a, b, c;
703
704 if (wm->num_heads == 0)
705 return 0;
706
707 a.full = dfixed_const(2);
708 b.full = dfixed_const(1);
709 if ((wm->vsc.full > a.full) ||
710 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
711 (wm->vtaps >= 5) ||
712 ((wm->vsc.full >= a.full) && wm->interlaced))
713 max_src_lines_per_dst_line = 4;
714 else
715 max_src_lines_per_dst_line = 2;
716
717 a.full = dfixed_const(available_bandwidth);
718 b.full = dfixed_const(wm->num_heads);
719 a.full = dfixed_div(a, b);
720 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
721 tmp = min(dfixed_trunc(a), tmp);
722
723 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
724
725 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
726 b.full = dfixed_const(1000);
727 c.full = dfixed_const(lb_fill_bw);
728 b.full = dfixed_div(c, b);
729 a.full = dfixed_div(a, b);
730 line_fill_time = dfixed_trunc(a);
731
732 if (line_fill_time < wm->active_time)
733 return latency;
734 else
735 return latency + (line_fill_time - wm->active_time);
736
737 }
738
739 /**
740 * dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display - check
741 * average and available dram bandwidth
742 *
743 * @wm: watermark calculation data
744 *
745 * Check if the display average bandwidth fits in the display
746 * dram bandwidth (CIK).
747 * Used for display watermark bandwidth calculations
748 * Returns true if the display fits, false if not.
749 */
dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params * wm)750 static bool dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
751 {
752 if (dce_v6_0_average_bandwidth(wm) <=
753 (dce_v6_0_dram_bandwidth_for_display(wm) / wm->num_heads))
754 return true;
755 else
756 return false;
757 }
758
759 /**
760 * dce_v6_0_average_bandwidth_vs_available_bandwidth - check
761 * average and available bandwidth
762 *
763 * @wm: watermark calculation data
764 *
765 * Check if the display average bandwidth fits in the display
766 * available bandwidth (CIK).
767 * Used for display watermark bandwidth calculations
768 * Returns true if the display fits, false if not.
769 */
dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params * wm)770 static bool dce_v6_0_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
771 {
772 if (dce_v6_0_average_bandwidth(wm) <=
773 (dce_v6_0_available_bandwidth(wm) / wm->num_heads))
774 return true;
775 else
776 return false;
777 }
778
779 /**
780 * dce_v6_0_check_latency_hiding - check latency hiding
781 *
782 * @wm: watermark calculation data
783 *
784 * Check latency hiding (CIK).
785 * Used for display watermark bandwidth calculations
786 * Returns true if the display fits, false if not.
787 */
dce_v6_0_check_latency_hiding(struct dce6_wm_params * wm)788 static bool dce_v6_0_check_latency_hiding(struct dce6_wm_params *wm)
789 {
790 u32 lb_partitions = wm->lb_size / wm->src_width;
791 u32 line_time = wm->active_time + wm->blank_time;
792 u32 latency_tolerant_lines;
793 u32 latency_hiding;
794 fixed20_12 a;
795
796 a.full = dfixed_const(1);
797 if (wm->vsc.full > a.full)
798 latency_tolerant_lines = 1;
799 else {
800 if (lb_partitions <= (wm->vtaps + 1))
801 latency_tolerant_lines = 1;
802 else
803 latency_tolerant_lines = 2;
804 }
805
806 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
807
808 if (dce_v6_0_latency_watermark(wm) <= latency_hiding)
809 return true;
810 else
811 return false;
812 }
813
814 /**
815 * dce_v6_0_program_watermarks - program display watermarks
816 *
817 * @adev: amdgpu_device pointer
818 * @amdgpu_crtc: the selected display controller
819 * @lb_size: line buffer size
820 * @num_heads: number of display controllers in use
821 *
822 * Calculate and program the display watermarks for the
823 * selected display controller (CIK).
824 */
dce_v6_0_program_watermarks(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,u32 lb_size,u32 num_heads)825 static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
826 struct amdgpu_crtc *amdgpu_crtc,
827 u32 lb_size, u32 num_heads)
828 {
829 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
830 struct dce6_wm_params wm_low, wm_high;
831 u32 dram_channels;
832 u32 active_time;
833 u32 line_time = 0;
834 u32 latency_watermark_a = 0, latency_watermark_b = 0;
835 u32 priority_a_mark = 0, priority_b_mark = 0;
836 u32 priority_a_cnt = PRIORITY_OFF;
837 u32 priority_b_cnt = PRIORITY_OFF;
838 u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
839 fixed20_12 a, b, c;
840
841 if (amdgpu_crtc->base.enabled && num_heads && mode) {
842 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
843 (u32)mode->clock);
844 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
845 (u32)mode->clock);
846 line_time = min(line_time, (u32)65535);
847 priority_a_cnt = 0;
848 priority_b_cnt = 0;
849
850 dram_channels = si_get_number_of_dram_channels(adev);
851
852 /* watermark for high clocks */
853 if (adev->pm.dpm_enabled) {
854 wm_high.yclk =
855 amdgpu_dpm_get_mclk(adev, false) * 10;
856 wm_high.sclk =
857 amdgpu_dpm_get_sclk(adev, false) * 10;
858 } else {
859 wm_high.yclk = adev->pm.current_mclk * 10;
860 wm_high.sclk = adev->pm.current_sclk * 10;
861 }
862
863 wm_high.disp_clk = mode->clock;
864 wm_high.src_width = mode->crtc_hdisplay;
865 wm_high.active_time = active_time;
866 wm_high.blank_time = line_time - wm_high.active_time;
867 wm_high.interlaced = false;
868 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
869 wm_high.interlaced = true;
870 wm_high.vsc = amdgpu_crtc->vsc;
871 wm_high.vtaps = 1;
872 if (amdgpu_crtc->rmx_type != RMX_OFF)
873 wm_high.vtaps = 2;
874 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
875 wm_high.lb_size = lb_size;
876 wm_high.dram_channels = dram_channels;
877 wm_high.num_heads = num_heads;
878
879 if (adev->pm.dpm_enabled) {
880 /* watermark for low clocks */
881 wm_low.yclk =
882 amdgpu_dpm_get_mclk(adev, true) * 10;
883 wm_low.sclk =
884 amdgpu_dpm_get_sclk(adev, true) * 10;
885 } else {
886 wm_low.yclk = adev->pm.current_mclk * 10;
887 wm_low.sclk = adev->pm.current_sclk * 10;
888 }
889
890 wm_low.disp_clk = mode->clock;
891 wm_low.src_width = mode->crtc_hdisplay;
892 wm_low.active_time = active_time;
893 wm_low.blank_time = line_time - wm_low.active_time;
894 wm_low.interlaced = false;
895 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
896 wm_low.interlaced = true;
897 wm_low.vsc = amdgpu_crtc->vsc;
898 wm_low.vtaps = 1;
899 if (amdgpu_crtc->rmx_type != RMX_OFF)
900 wm_low.vtaps = 2;
901 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
902 wm_low.lb_size = lb_size;
903 wm_low.dram_channels = dram_channels;
904 wm_low.num_heads = num_heads;
905
906 /* set for high clocks */
907 latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
908 /* set for low clocks */
909 latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
910
911 /* possibly force display priority to high */
912 /* should really do this at mode validation time... */
913 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
914 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
915 !dce_v6_0_check_latency_hiding(&wm_high) ||
916 (adev->mode_info.disp_priority == 2)) {
917 DRM_DEBUG_KMS("force priority to high\n");
918 priority_a_cnt |= PRIORITY_ALWAYS_ON;
919 priority_b_cnt |= PRIORITY_ALWAYS_ON;
920 }
921 if (!dce_v6_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
922 !dce_v6_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
923 !dce_v6_0_check_latency_hiding(&wm_low) ||
924 (adev->mode_info.disp_priority == 2)) {
925 DRM_DEBUG_KMS("force priority to high\n");
926 priority_a_cnt |= PRIORITY_ALWAYS_ON;
927 priority_b_cnt |= PRIORITY_ALWAYS_ON;
928 }
929
930 a.full = dfixed_const(1000);
931 b.full = dfixed_const(mode->clock);
932 b.full = dfixed_div(b, a);
933 c.full = dfixed_const(latency_watermark_a);
934 c.full = dfixed_mul(c, b);
935 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
936 c.full = dfixed_div(c, a);
937 a.full = dfixed_const(16);
938 c.full = dfixed_div(c, a);
939 priority_a_mark = dfixed_trunc(c);
940 priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
941
942 a.full = dfixed_const(1000);
943 b.full = dfixed_const(mode->clock);
944 b.full = dfixed_div(b, a);
945 c.full = dfixed_const(latency_watermark_b);
946 c.full = dfixed_mul(c, b);
947 c.full = dfixed_mul(c, amdgpu_crtc->hsc);
948 c.full = dfixed_div(c, a);
949 a.full = dfixed_const(16);
950 c.full = dfixed_div(c, a);
951 priority_b_mark = dfixed_trunc(c);
952 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
953
954 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
955 }
956
957 /* select wm A */
958 arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
959 tmp = arb_control3;
960 tmp &= ~LATENCY_WATERMARK_MASK(3);
961 tmp |= LATENCY_WATERMARK_MASK(1);
962 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
963 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
964 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
965 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
966 /* select wm B */
967 tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
968 tmp &= ~LATENCY_WATERMARK_MASK(3);
969 tmp |= LATENCY_WATERMARK_MASK(2);
970 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
971 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
972 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
973 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
974 /* restore original selection */
975 WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, arb_control3);
976
977 /* write the priority marks */
978 WREG32(mmPRIORITY_A_CNT + amdgpu_crtc->crtc_offset, priority_a_cnt);
979 WREG32(mmPRIORITY_B_CNT + amdgpu_crtc->crtc_offset, priority_b_cnt);
980
981 /* save values for DPM */
982 amdgpu_crtc->line_time = line_time;
983 amdgpu_crtc->wm_high = latency_watermark_a;
984
985 /* Save number of lines the linebuffer leads before the scanout */
986 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
987 }
988
989 /* watermark setup */
dce_v6_0_line_buffer_adjust(struct amdgpu_device * adev,struct amdgpu_crtc * amdgpu_crtc,struct drm_display_mode * mode,struct drm_display_mode * other_mode)990 static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
991 struct amdgpu_crtc *amdgpu_crtc,
992 struct drm_display_mode *mode,
993 struct drm_display_mode *other_mode)
994 {
995 u32 tmp, buffer_alloc, i;
996 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
997 /*
998 * Line Buffer Setup
999 * There are 3 line buffers, each one shared by 2 display controllers.
1000 * mmDC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1001 * the display controllers. The paritioning is done via one of four
1002 * preset allocations specified in bits 21:20:
1003 * 0 - half lb
1004 * 2 - whole lb, other crtc must be disabled
1005 */
1006 /* this can get tricky if we have two large displays on a paired group
1007 * of crtcs. Ideally for multiple large displays we'd assign them to
1008 * non-linked crtcs for maximum line buffer allocation.
1009 */
1010 if (amdgpu_crtc->base.enabled && mode) {
1011 if (other_mode) {
1012 tmp = 0; /* 1/2 */
1013 buffer_alloc = 1;
1014 } else {
1015 tmp = 2; /* whole */
1016 buffer_alloc = 2;
1017 }
1018 } else {
1019 tmp = 0;
1020 buffer_alloc = 0;
1021 }
1022
1023 WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
1024 DC_LB_MEMORY_CONFIG(tmp));
1025
1026 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1027 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
1028 for (i = 0; i < adev->usec_timeout; i++) {
1029 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1030 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
1031 break;
1032 udelay(1);
1033 }
1034
1035 if (amdgpu_crtc->base.enabled && mode) {
1036 switch (tmp) {
1037 case 0:
1038 default:
1039 return 4096 * 2;
1040 case 2:
1041 return 8192 * 2;
1042 }
1043 }
1044
1045 /* controller not enabled, so no lb used */
1046 return 0;
1047 }
1048
1049
1050 /**
1051 * dce_v6_0_bandwidth_update - program display watermarks
1052 *
1053 * @adev: amdgpu_device pointer
1054 *
1055 * Calculate and program the display watermarks and line
1056 * buffer allocation (CIK).
1057 */
dce_v6_0_bandwidth_update(struct amdgpu_device * adev)1058 static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
1059 {
1060 struct drm_display_mode *mode0 = NULL;
1061 struct drm_display_mode *mode1 = NULL;
1062 u32 num_heads = 0, lb_size;
1063 int i;
1064
1065 if (!adev->mode_info.mode_config_initialized)
1066 return;
1067
1068 amdgpu_display_update_priority(adev);
1069
1070 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1071 if (adev->mode_info.crtcs[i]->base.enabled)
1072 num_heads++;
1073 }
1074 for (i = 0; i < adev->mode_info.num_crtc; i += 2) {
1075 mode0 = &adev->mode_info.crtcs[i]->base.mode;
1076 mode1 = &adev->mode_info.crtcs[i+1]->base.mode;
1077 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode0, mode1);
1078 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i], lb_size, num_heads);
1079 lb_size = dce_v6_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i+1], mode1, mode0);
1080 dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
1081 }
1082 }
1083
dce_v6_0_audio_get_connected_pins(struct amdgpu_device * adev)1084 static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
1085 {
1086 int i;
1087 u32 tmp;
1088
1089 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1090 tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
1091 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1092 if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
1093 PORT_CONNECTIVITY))
1094 adev->mode_info.audio.pin[i].connected = false;
1095 else
1096 adev->mode_info.audio.pin[i].connected = true;
1097 }
1098
1099 }
1100
dce_v6_0_audio_get_pin(struct amdgpu_device * adev)1101 static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *adev)
1102 {
1103 int i;
1104
1105 dce_v6_0_audio_get_connected_pins(adev);
1106
1107 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1108 if (adev->mode_info.audio.pin[i].connected)
1109 return &adev->mode_info.audio.pin[i];
1110 }
1111 DRM_ERROR("No connected audio pins found!\n");
1112 return NULL;
1113 }
1114
dce_v6_0_audio_select_pin(struct drm_encoder * encoder)1115 static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
1116 {
1117 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1118 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1119 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1120
1121 if (!dig || !dig->afmt || !dig->afmt->pin)
1122 return;
1123
1124 WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
1125 REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
1126 dig->afmt->pin->id));
1127 }
1128
dce_v6_0_audio_write_latency_fields(struct drm_encoder * encoder,struct drm_display_mode * mode)1129 static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
1130 struct drm_display_mode *mode)
1131 {
1132 struct drm_device *dev = encoder->dev;
1133 struct amdgpu_device *adev = drm_to_adev(dev);
1134 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1135 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1136 struct drm_connector *connector;
1137 struct drm_connector_list_iter iter;
1138 struct amdgpu_connector *amdgpu_connector = NULL;
1139 int interlace = 0;
1140 u32 tmp;
1141
1142 drm_connector_list_iter_begin(dev, &iter);
1143 drm_for_each_connector_iter(connector, &iter) {
1144 if (connector->encoder == encoder) {
1145 amdgpu_connector = to_amdgpu_connector(connector);
1146 break;
1147 }
1148 }
1149 drm_connector_list_iter_end(&iter);
1150
1151 if (!amdgpu_connector) {
1152 DRM_ERROR("Couldn't find encoder's connector\n");
1153 return;
1154 }
1155
1156 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1157 interlace = 1;
1158
1159 if (connector->latency_present[interlace]) {
1160 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1161 VIDEO_LIPSYNC, connector->video_latency[interlace]);
1162 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1163 AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1164 } else {
1165 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1166 VIDEO_LIPSYNC, 0);
1167 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1168 AUDIO_LIPSYNC, 0);
1169 }
1170 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1171 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1172 }
1173
dce_v6_0_audio_write_speaker_allocation(struct drm_encoder * encoder)1174 static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1175 {
1176 struct drm_device *dev = encoder->dev;
1177 struct amdgpu_device *adev = drm_to_adev(dev);
1178 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1179 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1180 struct drm_connector *connector;
1181 struct drm_connector_list_iter iter;
1182 struct amdgpu_connector *amdgpu_connector = NULL;
1183 u8 *sadb = NULL;
1184 int sad_count;
1185 u32 tmp;
1186
1187 drm_connector_list_iter_begin(dev, &iter);
1188 drm_for_each_connector_iter(connector, &iter) {
1189 if (connector->encoder == encoder) {
1190 amdgpu_connector = to_amdgpu_connector(connector);
1191 break;
1192 }
1193 }
1194 drm_connector_list_iter_end(&iter);
1195
1196 if (!amdgpu_connector) {
1197 DRM_ERROR("Couldn't find encoder's connector\n");
1198 return;
1199 }
1200
1201 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1202 if (sad_count < 0) {
1203 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1204 sad_count = 0;
1205 }
1206
1207 /* program the speaker allocation */
1208 tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1209 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1210 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1211 HDMI_CONNECTION, 0);
1212 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1213 DP_CONNECTION, 0);
1214
1215 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
1216 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1217 DP_CONNECTION, 1);
1218 else
1219 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1220 HDMI_CONNECTION, 1);
1221
1222 if (sad_count)
1223 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1224 SPEAKER_ALLOCATION, sadb[0]);
1225 else
1226 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1227 SPEAKER_ALLOCATION, 5); /* stereo */
1228
1229 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1230 ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1231
1232 kfree(sadb);
1233 }
1234
dce_v6_0_audio_write_sad_regs(struct drm_encoder * encoder)1235 static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
1236 {
1237 struct drm_device *dev = encoder->dev;
1238 struct amdgpu_device *adev = drm_to_adev(dev);
1239 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1240 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1241 struct drm_connector *connector;
1242 struct drm_connector_list_iter iter;
1243 struct amdgpu_connector *amdgpu_connector = NULL;
1244 struct cea_sad *sads;
1245 int i, sad_count;
1246
1247 static const u16 eld_reg_to_type[][2] = {
1248 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1249 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1250 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1251 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1252 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1253 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1254 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1255 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1256 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1257 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1258 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1259 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1260 };
1261
1262 drm_connector_list_iter_begin(dev, &iter);
1263 drm_for_each_connector_iter(connector, &iter) {
1264 if (connector->encoder == encoder) {
1265 amdgpu_connector = to_amdgpu_connector(connector);
1266 break;
1267 }
1268 }
1269 drm_connector_list_iter_end(&iter);
1270
1271 if (!amdgpu_connector) {
1272 DRM_ERROR("Couldn't find encoder's connector\n");
1273 return;
1274 }
1275
1276 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1277 if (sad_count < 0)
1278 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1279 if (sad_count <= 0)
1280 return;
1281
1282 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1283 u32 tmp = 0;
1284 u8 stereo_freqs = 0;
1285 int max_channels = -1;
1286 int j;
1287
1288 for (j = 0; j < sad_count; j++) {
1289 struct cea_sad *sad = &sads[j];
1290
1291 if (sad->format == eld_reg_to_type[i][1]) {
1292 if (sad->channels > max_channels) {
1293 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1294 MAX_CHANNELS, sad->channels);
1295 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1296 DESCRIPTOR_BYTE_2, sad->byte2);
1297 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1298 SUPPORTED_FREQUENCIES, sad->freq);
1299 max_channels = sad->channels;
1300 }
1301
1302 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1303 stereo_freqs |= sad->freq;
1304 else
1305 break;
1306 }
1307 }
1308
1309 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1310 SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1311 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1312 }
1313
1314 kfree(sads);
1315
1316 }
1317
dce_v6_0_audio_enable(struct amdgpu_device * adev,struct amdgpu_audio_pin * pin,bool enable)1318 static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
1319 struct amdgpu_audio_pin *pin,
1320 bool enable)
1321 {
1322 if (!pin)
1323 return;
1324
1325 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1326 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1327 }
1328
1329 static const u32 pin_offsets[7] =
1330 {
1331 (0x1780 - 0x1780),
1332 (0x1786 - 0x1780),
1333 (0x178c - 0x1780),
1334 (0x1792 - 0x1780),
1335 (0x1798 - 0x1780),
1336 (0x179d - 0x1780),
1337 (0x17a4 - 0x1780),
1338 };
1339
dce_v6_0_audio_init(struct amdgpu_device * adev)1340 static int dce_v6_0_audio_init(struct amdgpu_device *adev)
1341 {
1342 int i;
1343
1344 if (!amdgpu_audio)
1345 return 0;
1346
1347 adev->mode_info.audio.enabled = true;
1348
1349 switch (adev->asic_type) {
1350 case CHIP_TAHITI:
1351 case CHIP_PITCAIRN:
1352 case CHIP_VERDE:
1353 default:
1354 adev->mode_info.audio.num_pins = 6;
1355 break;
1356 case CHIP_OLAND:
1357 adev->mode_info.audio.num_pins = 2;
1358 break;
1359 }
1360
1361 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1362 adev->mode_info.audio.pin[i].channels = -1;
1363 adev->mode_info.audio.pin[i].rate = -1;
1364 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1365 adev->mode_info.audio.pin[i].status_bits = 0;
1366 adev->mode_info.audio.pin[i].category_code = 0;
1367 adev->mode_info.audio.pin[i].connected = false;
1368 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1369 adev->mode_info.audio.pin[i].id = i;
1370 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1371 }
1372
1373 return 0;
1374 }
1375
dce_v6_0_audio_fini(struct amdgpu_device * adev)1376 static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
1377 {
1378 int i;
1379
1380 if (!amdgpu_audio)
1381 return;
1382
1383 if (!adev->mode_info.audio.enabled)
1384 return;
1385
1386 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1387 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1388
1389 adev->mode_info.audio.enabled = false;
1390 }
1391
dce_v6_0_audio_set_vbi_packet(struct drm_encoder * encoder)1392 static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
1393 {
1394 struct drm_device *dev = encoder->dev;
1395 struct amdgpu_device *adev = drm_to_adev(dev);
1396 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1397 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1398 u32 tmp;
1399
1400 tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1401 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1402 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
1403 tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
1404 WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1405 }
1406
dce_v6_0_audio_set_acr(struct drm_encoder * encoder,uint32_t clock,int bpc)1407 static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
1408 uint32_t clock, int bpc)
1409 {
1410 struct drm_device *dev = encoder->dev;
1411 struct amdgpu_device *adev = drm_to_adev(dev);
1412 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1413 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1414 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1415 u32 tmp;
1416
1417 tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1418 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1419 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
1420 bpc > 8 ? 0 : 1);
1421 WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1422
1423 tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1424 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1425 WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1426 tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1427 tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1428 WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1429
1430 tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1431 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1432 WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1433 tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1434 tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1435 WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1436
1437 tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1438 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1439 WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1440 tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1441 tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1442 WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1443 }
1444
dce_v6_0_audio_set_avi_infoframe(struct drm_encoder * encoder,struct drm_display_mode * mode)1445 static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
1446 struct drm_display_mode *mode)
1447 {
1448 struct drm_device *dev = encoder->dev;
1449 struct amdgpu_device *adev = drm_to_adev(dev);
1450 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1451 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1452 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1453 struct hdmi_avi_infoframe frame;
1454 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1455 uint8_t *payload = buffer + 3;
1456 uint8_t *header = buffer;
1457 ssize_t err;
1458 u32 tmp;
1459
1460 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1461 if (err < 0) {
1462 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1463 return;
1464 }
1465
1466 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1467 if (err < 0) {
1468 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1469 return;
1470 }
1471
1472 WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1473 payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
1474 WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1475 payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
1476 WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1477 payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
1478 WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1479 payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
1480
1481 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1482 /* anything other than 0 */
1483 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
1484 HDMI_AUDIO_INFO_LINE, 2);
1485 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1486 }
1487
dce_v6_0_audio_set_dto(struct drm_encoder * encoder,u32 clock)1488 static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1489 {
1490 struct drm_device *dev = encoder->dev;
1491 struct amdgpu_device *adev = drm_to_adev(dev);
1492 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1493 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1494 u32 tmp;
1495
1496 /*
1497 * Two dtos: generally use dto0 for hdmi, dto1 for dp.
1498 * Express [24MHz / target pixel clock] as an exact rational
1499 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1500 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1501 */
1502 tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1503 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1504 DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
1505 if (em == ATOM_ENCODER_MODE_HDMI) {
1506 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1507 DCCG_AUDIO_DTO_SEL, 0);
1508 } else if (ENCODER_MODE_IS_DP(em)) {
1509 tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
1510 DCCG_AUDIO_DTO_SEL, 1);
1511 }
1512 WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1513 if (em == ATOM_ENCODER_MODE_HDMI) {
1514 WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
1515 WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
1516 } else if (ENCODER_MODE_IS_DP(em)) {
1517 WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
1518 WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
1519 }
1520 }
1521
dce_v6_0_audio_set_packet(struct drm_encoder * encoder)1522 static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
1523 {
1524 struct drm_device *dev = encoder->dev;
1525 struct amdgpu_device *adev = drm_to_adev(dev);
1526 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1527 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1528 u32 tmp;
1529
1530 tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1531 tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1532 WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1533
1534 tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1535 tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1536 WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1537
1538 tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1539 tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1540 WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1541
1542 tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1543 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1544 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1545 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1546 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1547 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1548 tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1549 WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1550
1551 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
1552 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
1553 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
1554
1555 tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1556 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1557 tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1558 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1559
1560 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1561 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
1562 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1563 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1564 }
1565
dce_v6_0_audio_set_mute(struct drm_encoder * encoder,bool mute)1566 static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
1567 {
1568 struct drm_device *dev = encoder->dev;
1569 struct amdgpu_device *adev = drm_to_adev(dev);
1570 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1571 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1572 u32 tmp;
1573
1574 tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
1575 tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
1576 WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
1577 }
1578
dce_v6_0_audio_hdmi_enable(struct drm_encoder * encoder,bool enable)1579 static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
1580 {
1581 struct drm_device *dev = encoder->dev;
1582 struct amdgpu_device *adev = drm_to_adev(dev);
1583 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1584 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1585 u32 tmp;
1586
1587 if (enable) {
1588 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1589 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1590 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1591 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1592 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1593 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1594
1595 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1596 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1597 WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1598
1599 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1600 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1601 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1602 } else {
1603 tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1604 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
1605 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
1606 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
1607 tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
1608 WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1609
1610 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1611 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
1612 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1613 }
1614 }
1615
dce_v6_0_audio_dp_enable(struct drm_encoder * encoder,bool enable)1616 static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
1617 {
1618 struct drm_device *dev = encoder->dev;
1619 struct amdgpu_device *adev = drm_to_adev(dev);
1620 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1621 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1622 u32 tmp;
1623
1624 if (enable) {
1625 tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1626 tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1627 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1628
1629 tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
1630 tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
1631 WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
1632
1633 tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
1634 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
1635 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
1636 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
1637 tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
1638 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
1639 } else {
1640 WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
1641 }
1642 }
1643
dce_v6_0_afmt_setmode(struct drm_encoder * encoder,struct drm_display_mode * mode)1644 static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
1645 struct drm_display_mode *mode)
1646 {
1647 struct drm_device *dev = encoder->dev;
1648 struct amdgpu_device *adev = drm_to_adev(dev);
1649 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1650 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1651 struct drm_connector *connector;
1652 struct drm_connector_list_iter iter;
1653 struct amdgpu_connector *amdgpu_connector = NULL;
1654 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
1655 int bpc = 8;
1656
1657 if (!dig || !dig->afmt)
1658 return;
1659
1660 drm_connector_list_iter_begin(dev, &iter);
1661 drm_for_each_connector_iter(connector, &iter) {
1662 if (connector->encoder == encoder) {
1663 amdgpu_connector = to_amdgpu_connector(connector);
1664 break;
1665 }
1666 }
1667 drm_connector_list_iter_end(&iter);
1668
1669 if (!amdgpu_connector) {
1670 DRM_ERROR("Couldn't find encoder's connector\n");
1671 return;
1672 }
1673
1674 if (!dig->afmt->enabled)
1675 return;
1676
1677 dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
1678 if (!dig->afmt->pin)
1679 return;
1680
1681 if (encoder->crtc) {
1682 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1683 bpc = amdgpu_crtc->bpc;
1684 }
1685
1686 /* disable audio before setting up hw */
1687 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1688
1689 dce_v6_0_audio_set_mute(encoder, true);
1690 dce_v6_0_audio_write_speaker_allocation(encoder);
1691 dce_v6_0_audio_write_sad_regs(encoder);
1692 dce_v6_0_audio_write_latency_fields(encoder, mode);
1693 if (em == ATOM_ENCODER_MODE_HDMI) {
1694 dce_v6_0_audio_set_dto(encoder, mode->clock);
1695 dce_v6_0_audio_set_vbi_packet(encoder);
1696 dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
1697 } else if (ENCODER_MODE_IS_DP(em)) {
1698 dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
1699 }
1700 dce_v6_0_audio_set_packet(encoder);
1701 dce_v6_0_audio_select_pin(encoder);
1702 dce_v6_0_audio_set_avi_infoframe(encoder, mode);
1703 dce_v6_0_audio_set_mute(encoder, false);
1704 if (em == ATOM_ENCODER_MODE_HDMI) {
1705 dce_v6_0_audio_hdmi_enable(encoder, 1);
1706 } else if (ENCODER_MODE_IS_DP(em)) {
1707 dce_v6_0_audio_dp_enable(encoder, 1);
1708 }
1709
1710 /* enable audio after setting up hw */
1711 dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
1712 }
1713
dce_v6_0_afmt_enable(struct drm_encoder * encoder,bool enable)1714 static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1715 {
1716 struct drm_device *dev = encoder->dev;
1717 struct amdgpu_device *adev = drm_to_adev(dev);
1718 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1719 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1720
1721 if (!dig || !dig->afmt)
1722 return;
1723
1724 /* Silent, r600_hdmi_enable will raise WARN for us */
1725 if (enable && dig->afmt->enabled)
1726 return;
1727
1728 if (!enable && !dig->afmt->enabled)
1729 return;
1730
1731 if (!enable && dig->afmt->pin) {
1732 dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
1733 dig->afmt->pin = NULL;
1734 }
1735
1736 dig->afmt->enabled = enable;
1737
1738 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1739 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1740 }
1741
dce_v6_0_afmt_init(struct amdgpu_device * adev)1742 static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
1743 {
1744 int i, j;
1745
1746 for (i = 0; i < adev->mode_info.num_dig; i++)
1747 adev->mode_info.afmt[i] = NULL;
1748
1749 /* DCE6 has audio blocks tied to DIG encoders */
1750 for (i = 0; i < adev->mode_info.num_dig; i++) {
1751 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1752 if (adev->mode_info.afmt[i]) {
1753 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1754 adev->mode_info.afmt[i]->id = i;
1755 } else {
1756 for (j = 0; j < i; j++) {
1757 kfree(adev->mode_info.afmt[j]);
1758 adev->mode_info.afmt[j] = NULL;
1759 }
1760 DRM_ERROR("Out of memory allocating afmt table\n");
1761 return -ENOMEM;
1762 }
1763 }
1764 return 0;
1765 }
1766
dce_v6_0_afmt_fini(struct amdgpu_device * adev)1767 static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
1768 {
1769 int i;
1770
1771 for (i = 0; i < adev->mode_info.num_dig; i++) {
1772 kfree(adev->mode_info.afmt[i]);
1773 adev->mode_info.afmt[i] = NULL;
1774 }
1775 }
1776
1777 static const u32 vga_control_regs[6] =
1778 {
1779 mmD1VGA_CONTROL,
1780 mmD2VGA_CONTROL,
1781 mmD3VGA_CONTROL,
1782 mmD4VGA_CONTROL,
1783 mmD5VGA_CONTROL,
1784 mmD6VGA_CONTROL,
1785 };
1786
dce_v6_0_vga_enable(struct drm_crtc * crtc,bool enable)1787 static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
1788 {
1789 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1790 struct drm_device *dev = crtc->dev;
1791 struct amdgpu_device *adev = drm_to_adev(dev);
1792 u32 vga_control;
1793
1794 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1795 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | (enable ? 1 : 0));
1796 }
1797
dce_v6_0_grph_enable(struct drm_crtc * crtc,bool enable)1798 static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
1799 {
1800 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1801 struct drm_device *dev = crtc->dev;
1802 struct amdgpu_device *adev = drm_to_adev(dev);
1803
1804 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
1805 }
1806
dce_v6_0_crtc_do_set_base(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,int atomic)1807 static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
1808 struct drm_framebuffer *fb,
1809 int x, int y, int atomic)
1810 {
1811 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1812 struct drm_device *dev = crtc->dev;
1813 struct amdgpu_device *adev = drm_to_adev(dev);
1814 struct drm_framebuffer *target_fb;
1815 struct drm_gem_object *obj;
1816 struct amdgpu_bo *abo;
1817 uint64_t fb_location, tiling_flags;
1818 uint32_t fb_format, fb_pitch_pixels, pipe_config;
1819 u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
1820 u32 viewport_w, viewport_h;
1821 int r;
1822 bool bypass_lut = false;
1823
1824 /* no fb bound */
1825 if (!atomic && !crtc->primary->fb) {
1826 DRM_DEBUG_KMS("No FB bound\n");
1827 return 0;
1828 }
1829
1830 if (atomic)
1831 target_fb = fb;
1832 else
1833 target_fb = crtc->primary->fb;
1834
1835 /* If atomic, assume fb object is pinned & idle & fenced and
1836 * just update base pointers
1837 */
1838 obj = target_fb->obj[0];
1839 abo = gem_to_amdgpu_bo(obj);
1840 r = amdgpu_bo_reserve(abo, false);
1841 if (unlikely(r != 0))
1842 return r;
1843
1844 if (!atomic) {
1845 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1846 if (unlikely(r != 0)) {
1847 amdgpu_bo_unreserve(abo);
1848 return -EINVAL;
1849 }
1850 }
1851 fb_location = amdgpu_bo_gpu_offset(abo);
1852
1853 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1854 amdgpu_bo_unreserve(abo);
1855
1856 switch (target_fb->format->format) {
1857 case DRM_FORMAT_C8:
1858 fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
1859 GRPH_FORMAT(GRPH_FORMAT_INDEXED));
1860 break;
1861 case DRM_FORMAT_XRGB4444:
1862 case DRM_FORMAT_ARGB4444:
1863 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1864 GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
1865 #ifdef __BIG_ENDIAN
1866 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1867 #endif
1868 break;
1869 case DRM_FORMAT_XRGB1555:
1870 case DRM_FORMAT_ARGB1555:
1871 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1872 GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
1873 #ifdef __BIG_ENDIAN
1874 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1875 #endif
1876 break;
1877 case DRM_FORMAT_BGRX5551:
1878 case DRM_FORMAT_BGRA5551:
1879 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1880 GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
1881 #ifdef __BIG_ENDIAN
1882 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1883 #endif
1884 break;
1885 case DRM_FORMAT_RGB565:
1886 fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
1887 GRPH_FORMAT(GRPH_FORMAT_ARGB565));
1888 #ifdef __BIG_ENDIAN
1889 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
1890 #endif
1891 break;
1892 case DRM_FORMAT_XRGB8888:
1893 case DRM_FORMAT_ARGB8888:
1894 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1895 GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1896 #ifdef __BIG_ENDIAN
1897 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1898 #endif
1899 break;
1900 case DRM_FORMAT_XRGB2101010:
1901 case DRM_FORMAT_ARGB2101010:
1902 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1903 GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
1904 #ifdef __BIG_ENDIAN
1905 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1906 #endif
1907 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1908 bypass_lut = true;
1909 break;
1910 case DRM_FORMAT_BGRX1010102:
1911 case DRM_FORMAT_BGRA1010102:
1912 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1913 GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
1914 #ifdef __BIG_ENDIAN
1915 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1916 #endif
1917 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1918 bypass_lut = true;
1919 break;
1920 case DRM_FORMAT_XBGR8888:
1921 case DRM_FORMAT_ABGR8888:
1922 fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
1923 GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
1924 fb_swap = (GRPH_RED_CROSSBAR(GRPH_RED_SEL_B) |
1925 GRPH_BLUE_CROSSBAR(GRPH_BLUE_SEL_R));
1926 #ifdef __BIG_ENDIAN
1927 fb_swap |= GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
1928 #endif
1929 break;
1930 default:
1931 DRM_ERROR("Unsupported screen format %p4cc\n",
1932 &target_fb->format->format);
1933 return -EINVAL;
1934 }
1935
1936 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1937 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1938
1939 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1940 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1941 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1942 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1943 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1944
1945 fb_format |= GRPH_NUM_BANKS(num_banks);
1946 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
1947 fb_format |= GRPH_TILE_SPLIT(tile_split);
1948 fb_format |= GRPH_BANK_WIDTH(bankw);
1949 fb_format |= GRPH_BANK_HEIGHT(bankh);
1950 fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
1951 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1952 fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
1953 }
1954
1955 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1956 fb_format |= GRPH_PIPE_CONFIG(pipe_config);
1957
1958 dce_v6_0_vga_enable(crtc, false);
1959
1960 /* Make sure surface address is updated at vertical blank rather than
1961 * horizontal blank
1962 */
1963 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1964
1965 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1966 upper_32_bits(fb_location));
1967 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1968 upper_32_bits(fb_location));
1969 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1970 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1971 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1972 (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1973 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1974 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1975
1976 /*
1977 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1978 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1979 * retain the full precision throughout the pipeline.
1980 */
1981 WREG32_P(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset,
1982 (bypass_lut ? GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK : 0),
1983 ~GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK);
1984
1985 if (bypass_lut)
1986 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1987
1988 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1989 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1990 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1991 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1992 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1993 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1994
1995 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1996 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1997
1998 dce_v6_0_grph_enable(crtc, true);
1999
2000 WREG32(mmDESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2001 target_fb->height);
2002 x &= ~3;
2003 y &= ~1;
2004 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2005 (x << 16) | y);
2006 viewport_w = crtc->mode.hdisplay;
2007 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2008
2009 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2010 (viewport_w << 16) | viewport_h);
2011
2012 /* set pageflip to happen anywhere in vblank interval */
2013 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2014
2015 if (!atomic && fb && fb != crtc->primary->fb) {
2016 abo = gem_to_amdgpu_bo(fb->obj[0]);
2017 r = amdgpu_bo_reserve(abo, true);
2018 if (unlikely(r != 0))
2019 return r;
2020 amdgpu_bo_unpin(abo);
2021 amdgpu_bo_unreserve(abo);
2022 }
2023
2024 /* Bytes per pixel may have changed */
2025 dce_v6_0_bandwidth_update(adev);
2026
2027 return 0;
2028
2029 }
2030
dce_v6_0_set_interleave(struct drm_crtc * crtc,struct drm_display_mode * mode)2031 static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
2032 struct drm_display_mode *mode)
2033 {
2034 struct drm_device *dev = crtc->dev;
2035 struct amdgpu_device *adev = drm_to_adev(dev);
2036 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2037
2038 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2039 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
2040 INTERLEAVE_EN);
2041 else
2042 WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2043 }
2044
dce_v6_0_crtc_load_lut(struct drm_crtc * crtc)2045 static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
2046 {
2047
2048 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2049 struct drm_device *dev = crtc->dev;
2050 struct amdgpu_device *adev = drm_to_adev(dev);
2051 u16 *r, *g, *b;
2052 int i;
2053
2054 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2055
2056 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2057 ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2058 (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2059 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2060 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2061 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2062 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2063 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2064 ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2065 (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2066
2067 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2068
2069 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2070 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2071 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2072
2073 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2074 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2075 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2076
2077 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2078 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2079
2080 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2081 r = crtc->gamma_store;
2082 g = r + crtc->gamma_size;
2083 b = g + crtc->gamma_size;
2084 for (i = 0; i < 256; i++) {
2085 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2086 ((*r++ & 0xffc0) << 14) |
2087 ((*g++ & 0xffc0) << 4) |
2088 (*b++ >> 6));
2089 }
2090
2091 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2092 ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2093 (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2094 ICON_DEGAMMA_MODE(0) |
2095 (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2096 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2097 ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2098 (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2099 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2100 ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2101 (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2102 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2103 ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2104 (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2105 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2106 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2107
2108
2109 }
2110
dce_v6_0_pick_dig_encoder(struct drm_encoder * encoder)2111 static int dce_v6_0_pick_dig_encoder(struct drm_encoder *encoder)
2112 {
2113 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2114 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2115
2116 switch (amdgpu_encoder->encoder_id) {
2117 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2118 return dig->linkb ? 1 : 0;
2119 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2120 return dig->linkb ? 3 : 2;
2121 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2122 return dig->linkb ? 5 : 4;
2123 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2124 return 6;
2125 default:
2126 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2127 return 0;
2128 }
2129 }
2130
2131 /**
2132 * dce_v6_0_pick_pll - Allocate a PPLL for use by the crtc.
2133 *
2134 * @crtc: drm crtc
2135 *
2136 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2137 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2138 * monitors a dedicated PPLL must be used. If a particular board has
2139 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2140 * as there is no need to program the PLL itself. If we are not able to
2141 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2142 * avoid messing up an existing monitor.
2143 *
2144 *
2145 */
dce_v6_0_pick_pll(struct drm_crtc * crtc)2146 static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
2147 {
2148 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2149 struct drm_device *dev = crtc->dev;
2150 struct amdgpu_device *adev = drm_to_adev(dev);
2151 u32 pll_in_use;
2152 int pll;
2153
2154 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2155 if (adev->clock.dp_extclk)
2156 /* skip PPLL programming if using ext clock */
2157 return ATOM_PPLL_INVALID;
2158 else
2159 return ATOM_PPLL0;
2160 } else {
2161 /* use the same PPLL for all monitors with the same clock */
2162 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2163 if (pll != ATOM_PPLL_INVALID)
2164 return pll;
2165 }
2166
2167 /* PPLL1, and PPLL2 */
2168 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2169 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2170 return ATOM_PPLL2;
2171 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2172 return ATOM_PPLL1;
2173 DRM_ERROR("unable to allocate a PPLL\n");
2174 return ATOM_PPLL_INVALID;
2175 }
2176
dce_v6_0_lock_cursor(struct drm_crtc * crtc,bool lock)2177 static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2178 {
2179 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2180 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2181 uint32_t cur_lock;
2182
2183 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2184 if (lock)
2185 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2186 else
2187 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2188 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2189 }
2190
dce_v6_0_hide_cursor(struct drm_crtc * crtc)2191 static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
2192 {
2193 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2194 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2195
2196 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2197 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2198 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2199
2200
2201 }
2202
dce_v6_0_show_cursor(struct drm_crtc * crtc)2203 static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
2204 {
2205 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2206 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2207
2208 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2209 upper_32_bits(amdgpu_crtc->cursor_addr));
2210 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2211 lower_32_bits(amdgpu_crtc->cursor_addr));
2212
2213 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2214 CUR_CONTROL__CURSOR_EN_MASK |
2215 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2216 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2217
2218 }
2219
dce_v6_0_cursor_move_locked(struct drm_crtc * crtc,int x,int y)2220 static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
2221 int x, int y)
2222 {
2223 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2224 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2225 int xorigin = 0, yorigin = 0;
2226
2227 int w = amdgpu_crtc->cursor_width;
2228
2229 amdgpu_crtc->cursor_x = x;
2230 amdgpu_crtc->cursor_y = y;
2231
2232 /* avivo cursor are offset into the total surface */
2233 x += crtc->x;
2234 y += crtc->y;
2235 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2236
2237 if (x < 0) {
2238 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2239 x = 0;
2240 }
2241 if (y < 0) {
2242 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2243 y = 0;
2244 }
2245
2246 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2247 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2248 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2249 ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2250
2251 return 0;
2252 }
2253
dce_v6_0_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)2254 static int dce_v6_0_crtc_cursor_move(struct drm_crtc *crtc,
2255 int x, int y)
2256 {
2257 int ret;
2258
2259 dce_v6_0_lock_cursor(crtc, true);
2260 ret = dce_v6_0_cursor_move_locked(crtc, x, y);
2261 dce_v6_0_lock_cursor(crtc, false);
2262
2263 return ret;
2264 }
2265
dce_v6_0_crtc_cursor_set2(struct drm_crtc * crtc,struct drm_file * file_priv,uint32_t handle,uint32_t width,uint32_t height,int32_t hot_x,int32_t hot_y)2266 static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
2267 struct drm_file *file_priv,
2268 uint32_t handle,
2269 uint32_t width,
2270 uint32_t height,
2271 int32_t hot_x,
2272 int32_t hot_y)
2273 {
2274 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2275 struct drm_gem_object *obj;
2276 struct amdgpu_bo *aobj;
2277 int ret;
2278
2279 if (!handle) {
2280 /* turn off cursor */
2281 dce_v6_0_hide_cursor(crtc);
2282 obj = NULL;
2283 goto unpin;
2284 }
2285
2286 if ((width > amdgpu_crtc->max_cursor_width) ||
2287 (height > amdgpu_crtc->max_cursor_height)) {
2288 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2289 return -EINVAL;
2290 }
2291
2292 obj = drm_gem_object_lookup(file_priv, handle);
2293 if (!obj) {
2294 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2295 return -ENOENT;
2296 }
2297
2298 aobj = gem_to_amdgpu_bo(obj);
2299 ret = amdgpu_bo_reserve(aobj, false);
2300 if (ret != 0) {
2301 drm_gem_object_put(obj);
2302 return ret;
2303 }
2304
2305 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2306 amdgpu_bo_unreserve(aobj);
2307 if (ret) {
2308 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2309 drm_gem_object_put(obj);
2310 return ret;
2311 }
2312 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2313
2314 dce_v6_0_lock_cursor(crtc, true);
2315
2316 if (width != amdgpu_crtc->cursor_width ||
2317 height != amdgpu_crtc->cursor_height ||
2318 hot_x != amdgpu_crtc->cursor_hot_x ||
2319 hot_y != amdgpu_crtc->cursor_hot_y) {
2320 int x, y;
2321
2322 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2323 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2324
2325 dce_v6_0_cursor_move_locked(crtc, x, y);
2326
2327 amdgpu_crtc->cursor_width = width;
2328 amdgpu_crtc->cursor_height = height;
2329 amdgpu_crtc->cursor_hot_x = hot_x;
2330 amdgpu_crtc->cursor_hot_y = hot_y;
2331 }
2332
2333 dce_v6_0_show_cursor(crtc);
2334 dce_v6_0_lock_cursor(crtc, false);
2335
2336 unpin:
2337 if (amdgpu_crtc->cursor_bo) {
2338 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2339 ret = amdgpu_bo_reserve(aobj, true);
2340 if (likely(ret == 0)) {
2341 amdgpu_bo_unpin(aobj);
2342 amdgpu_bo_unreserve(aobj);
2343 }
2344 drm_gem_object_put(amdgpu_crtc->cursor_bo);
2345 }
2346
2347 amdgpu_crtc->cursor_bo = obj;
2348 return 0;
2349 }
2350
dce_v6_0_cursor_reset(struct drm_crtc * crtc)2351 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
2352 {
2353 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2354
2355 if (amdgpu_crtc->cursor_bo) {
2356 dce_v6_0_lock_cursor(crtc, true);
2357
2358 dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2359 amdgpu_crtc->cursor_y);
2360
2361 dce_v6_0_show_cursor(crtc);
2362 dce_v6_0_lock_cursor(crtc, false);
2363 }
2364 }
2365
dce_v6_0_crtc_gamma_set(struct drm_crtc * crtc,u16 * red,u16 * green,u16 * blue,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2366 static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2367 u16 *blue, uint32_t size,
2368 struct drm_modeset_acquire_ctx *ctx)
2369 {
2370 dce_v6_0_crtc_load_lut(crtc);
2371
2372 return 0;
2373 }
2374
dce_v6_0_crtc_destroy(struct drm_crtc * crtc)2375 static void dce_v6_0_crtc_destroy(struct drm_crtc *crtc)
2376 {
2377 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2378
2379 drm_crtc_cleanup(crtc);
2380 kfree(amdgpu_crtc);
2381 }
2382
2383 static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
2384 .cursor_set2 = dce_v6_0_crtc_cursor_set2,
2385 .cursor_move = dce_v6_0_crtc_cursor_move,
2386 .gamma_set = dce_v6_0_crtc_gamma_set,
2387 .set_config = amdgpu_display_crtc_set_config,
2388 .destroy = dce_v6_0_crtc_destroy,
2389 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2390 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2391 .enable_vblank = amdgpu_enable_vblank_kms,
2392 .disable_vblank = amdgpu_disable_vblank_kms,
2393 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2394 };
2395
dce_v6_0_crtc_dpms(struct drm_crtc * crtc,int mode)2396 static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2397 {
2398 struct drm_device *dev = crtc->dev;
2399 struct amdgpu_device *adev = drm_to_adev(dev);
2400 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2401 unsigned type;
2402
2403 switch (mode) {
2404 case DRM_MODE_DPMS_ON:
2405 amdgpu_crtc->enabled = true;
2406 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2407 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2408 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2409 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2410 amdgpu_crtc->crtc_id);
2411 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2412 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2413 drm_crtc_vblank_on(crtc);
2414 dce_v6_0_crtc_load_lut(crtc);
2415 break;
2416 case DRM_MODE_DPMS_STANDBY:
2417 case DRM_MODE_DPMS_SUSPEND:
2418 case DRM_MODE_DPMS_OFF:
2419 drm_crtc_vblank_off(crtc);
2420 if (amdgpu_crtc->enabled)
2421 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2422 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2423 amdgpu_crtc->enabled = false;
2424 break;
2425 }
2426 /* adjust pm to dpms */
2427 amdgpu_dpm_compute_clocks(adev);
2428 }
2429
dce_v6_0_crtc_prepare(struct drm_crtc * crtc)2430 static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
2431 {
2432 /* disable crtc pair power gating before programming */
2433 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2434 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2435 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2436 }
2437
dce_v6_0_crtc_commit(struct drm_crtc * crtc)2438 static void dce_v6_0_crtc_commit(struct drm_crtc *crtc)
2439 {
2440 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2441 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2442 }
2443
dce_v6_0_crtc_disable(struct drm_crtc * crtc)2444 static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
2445 {
2446
2447 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2448 struct drm_device *dev = crtc->dev;
2449 struct amdgpu_device *adev = drm_to_adev(dev);
2450 struct amdgpu_atom_ss ss;
2451 int i;
2452
2453 dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2454 if (crtc->primary->fb) {
2455 int r;
2456 struct amdgpu_bo *abo;
2457
2458 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2459 r = amdgpu_bo_reserve(abo, true);
2460 if (unlikely(r))
2461 DRM_ERROR("failed to reserve abo before unpin\n");
2462 else {
2463 amdgpu_bo_unpin(abo);
2464 amdgpu_bo_unreserve(abo);
2465 }
2466 }
2467 /* disable the GRPH */
2468 dce_v6_0_grph_enable(crtc, false);
2469
2470 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2471
2472 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2473 if (adev->mode_info.crtcs[i] &&
2474 adev->mode_info.crtcs[i]->enabled &&
2475 i != amdgpu_crtc->crtc_id &&
2476 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2477 /* one other crtc is using this pll don't turn
2478 * off the pll
2479 */
2480 goto done;
2481 }
2482 }
2483
2484 switch (amdgpu_crtc->pll_id) {
2485 case ATOM_PPLL1:
2486 case ATOM_PPLL2:
2487 /* disable the ppll */
2488 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2489 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2490 break;
2491 default:
2492 break;
2493 }
2494 done:
2495 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2496 amdgpu_crtc->adjusted_clock = 0;
2497 amdgpu_crtc->encoder = NULL;
2498 amdgpu_crtc->connector = NULL;
2499 }
2500
dce_v6_0_crtc_mode_set(struct drm_crtc * crtc,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode,int x,int y,struct drm_framebuffer * old_fb)2501 static int dce_v6_0_crtc_mode_set(struct drm_crtc *crtc,
2502 struct drm_display_mode *mode,
2503 struct drm_display_mode *adjusted_mode,
2504 int x, int y, struct drm_framebuffer *old_fb)
2505 {
2506 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2507
2508 if (!amdgpu_crtc->adjusted_clock)
2509 return -EINVAL;
2510
2511 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2512 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2513 dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2514 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2515 amdgpu_atombios_crtc_scaler_setup(crtc);
2516 dce_v6_0_cursor_reset(crtc);
2517 /* update the hw version fpr dpm */
2518 amdgpu_crtc->hw_mode = *adjusted_mode;
2519
2520 return 0;
2521 }
2522
dce_v6_0_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)2523 static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
2524 const struct drm_display_mode *mode,
2525 struct drm_display_mode *adjusted_mode)
2526 {
2527
2528 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2529 struct drm_device *dev = crtc->dev;
2530 struct drm_encoder *encoder;
2531
2532 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2534 if (encoder->crtc == crtc) {
2535 amdgpu_crtc->encoder = encoder;
2536 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2537 break;
2538 }
2539 }
2540 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2541 amdgpu_crtc->encoder = NULL;
2542 amdgpu_crtc->connector = NULL;
2543 return false;
2544 }
2545 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2546 return false;
2547 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2548 return false;
2549 /* pick pll */
2550 amdgpu_crtc->pll_id = dce_v6_0_pick_pll(crtc);
2551 /* if we can't get a PPLL for a non-DP encoder, fail */
2552 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2553 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2554 return false;
2555
2556 return true;
2557 }
2558
dce_v6_0_crtc_set_base(struct drm_crtc * crtc,int x,int y,struct drm_framebuffer * old_fb)2559 static int dce_v6_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2560 struct drm_framebuffer *old_fb)
2561 {
2562 return dce_v6_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2563 }
2564
dce_v6_0_crtc_set_base_atomic(struct drm_crtc * crtc,struct drm_framebuffer * fb,int x,int y,enum mode_set_atomic state)2565 static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2566 struct drm_framebuffer *fb,
2567 int x, int y, enum mode_set_atomic state)
2568 {
2569 return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
2570 }
2571
2572 static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
2573 .dpms = dce_v6_0_crtc_dpms,
2574 .mode_fixup = dce_v6_0_crtc_mode_fixup,
2575 .mode_set = dce_v6_0_crtc_mode_set,
2576 .mode_set_base = dce_v6_0_crtc_set_base,
2577 .mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
2578 .prepare = dce_v6_0_crtc_prepare,
2579 .commit = dce_v6_0_crtc_commit,
2580 .disable = dce_v6_0_crtc_disable,
2581 .get_scanout_position = amdgpu_crtc_get_scanout_position,
2582 };
2583
dce_v6_0_crtc_init(struct amdgpu_device * adev,int index)2584 static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
2585 {
2586 struct amdgpu_crtc *amdgpu_crtc;
2587
2588 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2589 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2590 if (amdgpu_crtc == NULL)
2591 return -ENOMEM;
2592
2593 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
2594
2595 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2596 amdgpu_crtc->crtc_id = index;
2597 adev->mode_info.crtcs[index] = amdgpu_crtc;
2598
2599 amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
2600 amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
2601 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2602 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2603
2604 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2605
2606 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2607 amdgpu_crtc->adjusted_clock = 0;
2608 amdgpu_crtc->encoder = NULL;
2609 amdgpu_crtc->connector = NULL;
2610 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
2611
2612 return 0;
2613 }
2614
dce_v6_0_early_init(void * handle)2615 static int dce_v6_0_early_init(void *handle)
2616 {
2617 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2618
2619 adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
2620 adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
2621
2622 dce_v6_0_set_display_funcs(adev);
2623
2624 adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
2625
2626 switch (adev->asic_type) {
2627 case CHIP_TAHITI:
2628 case CHIP_PITCAIRN:
2629 case CHIP_VERDE:
2630 adev->mode_info.num_hpd = 6;
2631 adev->mode_info.num_dig = 6;
2632 break;
2633 case CHIP_OLAND:
2634 adev->mode_info.num_hpd = 2;
2635 adev->mode_info.num_dig = 2;
2636 break;
2637 default:
2638 return -EINVAL;
2639 }
2640
2641 dce_v6_0_set_irq_funcs(adev);
2642
2643 return 0;
2644 }
2645
dce_v6_0_sw_init(void * handle)2646 static int dce_v6_0_sw_init(void *handle)
2647 {
2648 int r, i;
2649 bool ret;
2650 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2651
2652 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2653 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2654 if (r)
2655 return r;
2656 }
2657
2658 for (i = 8; i < 20; i += 2) {
2659 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2660 if (r)
2661 return r;
2662 }
2663
2664 /* HPD hotplug */
2665 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2666 if (r)
2667 return r;
2668
2669 adev->mode_info.mode_config_initialized = true;
2670
2671 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2672 adev_to_drm(adev)->mode_config.async_page_flip = true;
2673 adev_to_drm(adev)->mode_config.max_width = 16384;
2674 adev_to_drm(adev)->mode_config.max_height = 16384;
2675 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2676 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2677 adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2678 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
2679
2680 r = amdgpu_display_modeset_create_props(adev);
2681 if (r)
2682 return r;
2683
2684 adev_to_drm(adev)->mode_config.max_width = 16384;
2685 adev_to_drm(adev)->mode_config.max_height = 16384;
2686
2687 /* allocate crtcs */
2688 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2689 r = dce_v6_0_crtc_init(adev, i);
2690 if (r)
2691 return r;
2692 }
2693
2694 ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
2695 if (ret)
2696 amdgpu_display_print_display_setup(adev_to_drm(adev));
2697 else
2698 return -EINVAL;
2699
2700 /* setup afmt */
2701 r = dce_v6_0_afmt_init(adev);
2702 if (r)
2703 return r;
2704
2705 r = dce_v6_0_audio_init(adev);
2706 if (r)
2707 return r;
2708
2709 drm_kms_helper_poll_init(adev_to_drm(adev));
2710
2711 return r;
2712 }
2713
dce_v6_0_sw_fini(void * handle)2714 static int dce_v6_0_sw_fini(void *handle)
2715 {
2716 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2717
2718 kfree(adev->mode_info.bios_hardcoded_edid);
2719
2720 drm_kms_helper_poll_fini(adev_to_drm(adev));
2721
2722 dce_v6_0_audio_fini(adev);
2723 dce_v6_0_afmt_fini(adev);
2724
2725 drm_mode_config_cleanup(adev_to_drm(adev));
2726 adev->mode_info.mode_config_initialized = false;
2727
2728 return 0;
2729 }
2730
dce_v6_0_hw_init(void * handle)2731 static int dce_v6_0_hw_init(void *handle)
2732 {
2733 int i;
2734 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2735
2736 /* disable vga render */
2737 dce_v6_0_set_vga_render_state(adev, false);
2738 /* init dig PHYs, disp eng pll */
2739 amdgpu_atombios_encoder_init_dig(adev);
2740 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2741
2742 /* initialize hpd */
2743 dce_v6_0_hpd_init(adev);
2744
2745 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2746 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2747 }
2748
2749 dce_v6_0_pageflip_interrupt_init(adev);
2750
2751 return 0;
2752 }
2753
dce_v6_0_hw_fini(void * handle)2754 static int dce_v6_0_hw_fini(void *handle)
2755 {
2756 int i;
2757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2758
2759 dce_v6_0_hpd_fini(adev);
2760
2761 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2762 dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2763 }
2764
2765 dce_v6_0_pageflip_interrupt_fini(adev);
2766
2767 return 0;
2768 }
2769
dce_v6_0_suspend(void * handle)2770 static int dce_v6_0_suspend(void *handle)
2771 {
2772 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2773 int r;
2774
2775 r = amdgpu_display_suspend_helper(adev);
2776 if (r)
2777 return r;
2778 adev->mode_info.bl_level =
2779 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2780
2781 return dce_v6_0_hw_fini(handle);
2782 }
2783
dce_v6_0_resume(void * handle)2784 static int dce_v6_0_resume(void *handle)
2785 {
2786 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2787 int ret;
2788
2789 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2790 adev->mode_info.bl_level);
2791
2792 ret = dce_v6_0_hw_init(handle);
2793
2794 /* turn on the BL */
2795 if (adev->mode_info.bl_encoder) {
2796 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2797 adev->mode_info.bl_encoder);
2798 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2799 bl_level);
2800 }
2801 if (ret)
2802 return ret;
2803
2804 return amdgpu_display_resume_helper(adev);
2805 }
2806
dce_v6_0_is_idle(void * handle)2807 static bool dce_v6_0_is_idle(void *handle)
2808 {
2809 return true;
2810 }
2811
dce_v6_0_wait_for_idle(void * handle)2812 static int dce_v6_0_wait_for_idle(void *handle)
2813 {
2814 return 0;
2815 }
2816
dce_v6_0_soft_reset(void * handle)2817 static int dce_v6_0_soft_reset(void *handle)
2818 {
2819 DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
2820 return 0;
2821 }
2822
dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2823 static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2824 int crtc,
2825 enum amdgpu_interrupt_state state)
2826 {
2827 u32 reg_block, interrupt_mask;
2828
2829 if (crtc >= adev->mode_info.num_crtc) {
2830 DRM_DEBUG("invalid crtc %d\n", crtc);
2831 return;
2832 }
2833
2834 switch (crtc) {
2835 case 0:
2836 reg_block = SI_CRTC0_REGISTER_OFFSET;
2837 break;
2838 case 1:
2839 reg_block = SI_CRTC1_REGISTER_OFFSET;
2840 break;
2841 case 2:
2842 reg_block = SI_CRTC2_REGISTER_OFFSET;
2843 break;
2844 case 3:
2845 reg_block = SI_CRTC3_REGISTER_OFFSET;
2846 break;
2847 case 4:
2848 reg_block = SI_CRTC4_REGISTER_OFFSET;
2849 break;
2850 case 5:
2851 reg_block = SI_CRTC5_REGISTER_OFFSET;
2852 break;
2853 default:
2854 DRM_DEBUG("invalid crtc %d\n", crtc);
2855 return;
2856 }
2857
2858 switch (state) {
2859 case AMDGPU_IRQ_STATE_DISABLE:
2860 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2861 interrupt_mask &= ~VBLANK_INT_MASK;
2862 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2863 break;
2864 case AMDGPU_IRQ_STATE_ENABLE:
2865 interrupt_mask = RREG32(mmINT_MASK + reg_block);
2866 interrupt_mask |= VBLANK_INT_MASK;
2867 WREG32(mmINT_MASK + reg_block, interrupt_mask);
2868 break;
2869 default:
2870 break;
2871 }
2872 }
2873
dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device * adev,int crtc,enum amdgpu_interrupt_state state)2874 static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2875 int crtc,
2876 enum amdgpu_interrupt_state state)
2877 {
2878
2879 }
2880
dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)2881 static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2882 struct amdgpu_irq_src *src,
2883 unsigned type,
2884 enum amdgpu_interrupt_state state)
2885 {
2886 u32 dc_hpd_int_cntl;
2887
2888 if (type >= adev->mode_info.num_hpd) {
2889 DRM_DEBUG("invalid hdp %d\n", type);
2890 return 0;
2891 }
2892
2893 switch (state) {
2894 case AMDGPU_IRQ_STATE_DISABLE:
2895 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2896 dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
2897 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2898 break;
2899 case AMDGPU_IRQ_STATE_ENABLE:
2900 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2901 dc_hpd_int_cntl |= DC_HPDx_INT_EN;
2902 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
2903 break;
2904 default:
2905 break;
2906 }
2907
2908 return 0;
2909 }
2910
dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)2911 static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
2912 struct amdgpu_irq_src *src,
2913 unsigned type,
2914 enum amdgpu_interrupt_state state)
2915 {
2916 switch (type) {
2917 case AMDGPU_CRTC_IRQ_VBLANK1:
2918 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 0, state);
2919 break;
2920 case AMDGPU_CRTC_IRQ_VBLANK2:
2921 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 1, state);
2922 break;
2923 case AMDGPU_CRTC_IRQ_VBLANK3:
2924 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 2, state);
2925 break;
2926 case AMDGPU_CRTC_IRQ_VBLANK4:
2927 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 3, state);
2928 break;
2929 case AMDGPU_CRTC_IRQ_VBLANK5:
2930 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 4, state);
2931 break;
2932 case AMDGPU_CRTC_IRQ_VBLANK6:
2933 dce_v6_0_set_crtc_vblank_interrupt_state(adev, 5, state);
2934 break;
2935 case AMDGPU_CRTC_IRQ_VLINE1:
2936 dce_v6_0_set_crtc_vline_interrupt_state(adev, 0, state);
2937 break;
2938 case AMDGPU_CRTC_IRQ_VLINE2:
2939 dce_v6_0_set_crtc_vline_interrupt_state(adev, 1, state);
2940 break;
2941 case AMDGPU_CRTC_IRQ_VLINE3:
2942 dce_v6_0_set_crtc_vline_interrupt_state(adev, 2, state);
2943 break;
2944 case AMDGPU_CRTC_IRQ_VLINE4:
2945 dce_v6_0_set_crtc_vline_interrupt_state(adev, 3, state);
2946 break;
2947 case AMDGPU_CRTC_IRQ_VLINE5:
2948 dce_v6_0_set_crtc_vline_interrupt_state(adev, 4, state);
2949 break;
2950 case AMDGPU_CRTC_IRQ_VLINE6:
2951 dce_v6_0_set_crtc_vline_interrupt_state(adev, 5, state);
2952 break;
2953 default:
2954 break;
2955 }
2956 return 0;
2957 }
2958
dce_v6_0_crtc_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)2959 static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
2960 struct amdgpu_irq_src *source,
2961 struct amdgpu_iv_entry *entry)
2962 {
2963 unsigned crtc = entry->src_id - 1;
2964 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
2965 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
2966 crtc);
2967
2968 switch (entry->src_data[0]) {
2969 case 0: /* vblank */
2970 if (disp_int & interrupt_status_offsets[crtc].vblank)
2971 WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
2972 else
2973 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2974
2975 if (amdgpu_irq_enabled(adev, source, irq_type)) {
2976 drm_handle_vblank(adev_to_drm(adev), crtc);
2977 }
2978 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
2979 break;
2980 case 1: /* vline */
2981 if (disp_int & interrupt_status_offsets[crtc].vline)
2982 WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
2983 else
2984 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
2985
2986 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
2987 break;
2988 default:
2989 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
2990 break;
2991 }
2992
2993 return 0;
2994 }
2995
dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)2996 static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
2997 struct amdgpu_irq_src *src,
2998 unsigned type,
2999 enum amdgpu_interrupt_state state)
3000 {
3001 u32 reg;
3002
3003 if (type >= adev->mode_info.num_crtc) {
3004 DRM_ERROR("invalid pageflip crtc %d\n", type);
3005 return -EINVAL;
3006 }
3007
3008 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3009 if (state == AMDGPU_IRQ_STATE_DISABLE)
3010 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3011 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3012 else
3013 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3014 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3015
3016 return 0;
3017 }
3018
dce_v6_0_pageflip_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3019 static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
3020 struct amdgpu_irq_src *source,
3021 struct amdgpu_iv_entry *entry)
3022 {
3023 unsigned long flags;
3024 unsigned crtc_id;
3025 struct amdgpu_crtc *amdgpu_crtc;
3026 struct amdgpu_flip_work *works;
3027
3028 crtc_id = (entry->src_id - 8) >> 1;
3029 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3030
3031 if (crtc_id >= adev->mode_info.num_crtc) {
3032 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3033 return -EINVAL;
3034 }
3035
3036 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3037 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3038 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3039 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3040
3041 /* IRQ could occur when in initial stage */
3042 if (amdgpu_crtc == NULL)
3043 return 0;
3044
3045 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3046 works = amdgpu_crtc->pflip_works;
3047 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3048 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3049 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3050 amdgpu_crtc->pflip_status,
3051 AMDGPU_FLIP_SUBMITTED);
3052 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3053 return 0;
3054 }
3055
3056 /* page flip completed. clean up */
3057 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3058 amdgpu_crtc->pflip_works = NULL;
3059
3060 /* wakeup usersapce */
3061 if (works->event)
3062 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3063
3064 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3065
3066 drm_crtc_vblank_put(&amdgpu_crtc->base);
3067 schedule_work(&works->unpin_work);
3068
3069 return 0;
3070 }
3071
dce_v6_0_hpd_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3072 static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
3073 struct amdgpu_irq_src *source,
3074 struct amdgpu_iv_entry *entry)
3075 {
3076 uint32_t disp_int, mask, tmp;
3077 unsigned hpd;
3078
3079 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3080 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3081 return 0;
3082 }
3083
3084 hpd = entry->src_data[0];
3085 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3086 mask = interrupt_status_offsets[hpd].hpd;
3087
3088 if (disp_int & mask) {
3089 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3090 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3091 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3092 schedule_work(&adev->hotplug_work);
3093 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3094 }
3095
3096 return 0;
3097
3098 }
3099
dce_v6_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)3100 static int dce_v6_0_set_clockgating_state(void *handle,
3101 enum amd_clockgating_state state)
3102 {
3103 return 0;
3104 }
3105
dce_v6_0_set_powergating_state(void * handle,enum amd_powergating_state state)3106 static int dce_v6_0_set_powergating_state(void *handle,
3107 enum amd_powergating_state state)
3108 {
3109 return 0;
3110 }
3111
3112 static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
3113 .name = "dce_v6_0",
3114 .early_init = dce_v6_0_early_init,
3115 .late_init = NULL,
3116 .sw_init = dce_v6_0_sw_init,
3117 .sw_fini = dce_v6_0_sw_fini,
3118 .hw_init = dce_v6_0_hw_init,
3119 .hw_fini = dce_v6_0_hw_fini,
3120 .suspend = dce_v6_0_suspend,
3121 .resume = dce_v6_0_resume,
3122 .is_idle = dce_v6_0_is_idle,
3123 .wait_for_idle = dce_v6_0_wait_for_idle,
3124 .soft_reset = dce_v6_0_soft_reset,
3125 .set_clockgating_state = dce_v6_0_set_clockgating_state,
3126 .set_powergating_state = dce_v6_0_set_powergating_state,
3127 };
3128
3129 static void
dce_v6_0_encoder_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3130 dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
3131 struct drm_display_mode *mode,
3132 struct drm_display_mode *adjusted_mode)
3133 {
3134
3135 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3136 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3137
3138 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3139
3140 /* need to call this here rather than in prepare() since we need some crtc info */
3141 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3142
3143 /* set scaler clears this on some chips */
3144 dce_v6_0_set_interleave(encoder->crtc, mode);
3145
3146 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
3147 dce_v6_0_afmt_enable(encoder, true);
3148 dce_v6_0_afmt_setmode(encoder, adjusted_mode);
3149 }
3150 }
3151
dce_v6_0_encoder_prepare(struct drm_encoder * encoder)3152 static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
3153 {
3154
3155 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3156 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3157 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3158
3159 if ((amdgpu_encoder->active_device &
3160 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3161 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3162 ENCODER_OBJECT_ID_NONE)) {
3163 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3164 if (dig) {
3165 dig->dig_encoder = dce_v6_0_pick_dig_encoder(encoder);
3166 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3167 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3168 }
3169 }
3170
3171 amdgpu_atombios_scratch_regs_lock(adev, true);
3172
3173 if (connector) {
3174 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3175
3176 /* select the clock/data port if it uses a router */
3177 if (amdgpu_connector->router.cd_valid)
3178 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3179
3180 /* turn eDP panel on for mode set */
3181 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3182 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3183 ATOM_TRANSMITTER_ACTION_POWER_ON);
3184 }
3185
3186 /* this is needed for the pll/ss setup to work correctly in some cases */
3187 amdgpu_atombios_encoder_set_crtc_source(encoder);
3188 /* set up the FMT blocks */
3189 dce_v6_0_program_fmt(encoder);
3190 }
3191
dce_v6_0_encoder_commit(struct drm_encoder * encoder)3192 static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
3193 {
3194
3195 struct drm_device *dev = encoder->dev;
3196 struct amdgpu_device *adev = drm_to_adev(dev);
3197
3198 /* need to call this here as we need the crtc set up */
3199 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3200 amdgpu_atombios_scratch_regs_lock(adev, false);
3201 }
3202
dce_v6_0_encoder_disable(struct drm_encoder * encoder)3203 static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
3204 {
3205
3206 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3207 struct amdgpu_encoder_atom_dig *dig;
3208 int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
3209
3210 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3211
3212 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3213 if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
3214 dce_v6_0_afmt_enable(encoder, false);
3215 dig = amdgpu_encoder->enc_priv;
3216 dig->dig_encoder = -1;
3217 }
3218 amdgpu_encoder->active_device = 0;
3219 }
3220
3221 /* these are handled by the primary encoders */
dce_v6_0_ext_prepare(struct drm_encoder * encoder)3222 static void dce_v6_0_ext_prepare(struct drm_encoder *encoder)
3223 {
3224
3225 }
3226
dce_v6_0_ext_commit(struct drm_encoder * encoder)3227 static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
3228 {
3229
3230 }
3231
3232 static void
dce_v6_0_ext_mode_set(struct drm_encoder * encoder,struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3233 dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
3234 struct drm_display_mode *mode,
3235 struct drm_display_mode *adjusted_mode)
3236 {
3237
3238 }
3239
dce_v6_0_ext_disable(struct drm_encoder * encoder)3240 static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
3241 {
3242
3243 }
3244
3245 static void
dce_v6_0_ext_dpms(struct drm_encoder * encoder,int mode)3246 dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
3247 {
3248
3249 }
3250
dce_v6_0_ext_mode_fixup(struct drm_encoder * encoder,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)3251 static bool dce_v6_0_ext_mode_fixup(struct drm_encoder *encoder,
3252 const struct drm_display_mode *mode,
3253 struct drm_display_mode *adjusted_mode)
3254 {
3255 return true;
3256 }
3257
3258 static const struct drm_encoder_helper_funcs dce_v6_0_ext_helper_funcs = {
3259 .dpms = dce_v6_0_ext_dpms,
3260 .mode_fixup = dce_v6_0_ext_mode_fixup,
3261 .prepare = dce_v6_0_ext_prepare,
3262 .mode_set = dce_v6_0_ext_mode_set,
3263 .commit = dce_v6_0_ext_commit,
3264 .disable = dce_v6_0_ext_disable,
3265 /* no detect for TMDS/LVDS yet */
3266 };
3267
3268 static const struct drm_encoder_helper_funcs dce_v6_0_dig_helper_funcs = {
3269 .dpms = amdgpu_atombios_encoder_dpms,
3270 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3271 .prepare = dce_v6_0_encoder_prepare,
3272 .mode_set = dce_v6_0_encoder_mode_set,
3273 .commit = dce_v6_0_encoder_commit,
3274 .disable = dce_v6_0_encoder_disable,
3275 .detect = amdgpu_atombios_encoder_dig_detect,
3276 };
3277
3278 static const struct drm_encoder_helper_funcs dce_v6_0_dac_helper_funcs = {
3279 .dpms = amdgpu_atombios_encoder_dpms,
3280 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3281 .prepare = dce_v6_0_encoder_prepare,
3282 .mode_set = dce_v6_0_encoder_mode_set,
3283 .commit = dce_v6_0_encoder_commit,
3284 .detect = amdgpu_atombios_encoder_dac_detect,
3285 };
3286
dce_v6_0_encoder_destroy(struct drm_encoder * encoder)3287 static void dce_v6_0_encoder_destroy(struct drm_encoder *encoder)
3288 {
3289 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3290 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3291 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3292 kfree(amdgpu_encoder->enc_priv);
3293 drm_encoder_cleanup(encoder);
3294 kfree(amdgpu_encoder);
3295 }
3296
3297 static const struct drm_encoder_funcs dce_v6_0_encoder_funcs = {
3298 .destroy = dce_v6_0_encoder_destroy,
3299 };
3300
dce_v6_0_encoder_add(struct amdgpu_device * adev,uint32_t encoder_enum,uint32_t supported_device,u16 caps)3301 static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
3302 uint32_t encoder_enum,
3303 uint32_t supported_device,
3304 u16 caps)
3305 {
3306 struct drm_device *dev = adev_to_drm(adev);
3307 struct drm_encoder *encoder;
3308 struct amdgpu_encoder *amdgpu_encoder;
3309
3310 /* see if we already added it */
3311 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3312 amdgpu_encoder = to_amdgpu_encoder(encoder);
3313 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3314 amdgpu_encoder->devices |= supported_device;
3315 return;
3316 }
3317
3318 }
3319
3320 /* add a new one */
3321 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3322 if (!amdgpu_encoder)
3323 return;
3324
3325 encoder = &amdgpu_encoder->base;
3326 switch (adev->mode_info.num_crtc) {
3327 case 1:
3328 encoder->possible_crtcs = 0x1;
3329 break;
3330 case 2:
3331 default:
3332 encoder->possible_crtcs = 0x3;
3333 break;
3334 case 4:
3335 encoder->possible_crtcs = 0xf;
3336 break;
3337 case 6:
3338 encoder->possible_crtcs = 0x3f;
3339 break;
3340 }
3341
3342 amdgpu_encoder->enc_priv = NULL;
3343 amdgpu_encoder->encoder_enum = encoder_enum;
3344 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3345 amdgpu_encoder->devices = supported_device;
3346 amdgpu_encoder->rmx_type = RMX_OFF;
3347 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3348 amdgpu_encoder->is_ext_encoder = false;
3349 amdgpu_encoder->caps = caps;
3350
3351 switch (amdgpu_encoder->encoder_id) {
3352 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3353 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3354 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3355 DRM_MODE_ENCODER_DAC, NULL);
3356 drm_encoder_helper_add(encoder, &dce_v6_0_dac_helper_funcs);
3357 break;
3358 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3359 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3360 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3361 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3362 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3363 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3364 amdgpu_encoder->rmx_type = RMX_FULL;
3365 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3366 DRM_MODE_ENCODER_LVDS, NULL);
3367 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3368 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3369 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3370 DRM_MODE_ENCODER_DAC, NULL);
3371 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3372 } else {
3373 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3374 DRM_MODE_ENCODER_TMDS, NULL);
3375 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3376 }
3377 drm_encoder_helper_add(encoder, &dce_v6_0_dig_helper_funcs);
3378 break;
3379 case ENCODER_OBJECT_ID_SI170B:
3380 case ENCODER_OBJECT_ID_CH7303:
3381 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3382 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3383 case ENCODER_OBJECT_ID_TITFP513:
3384 case ENCODER_OBJECT_ID_VT1623:
3385 case ENCODER_OBJECT_ID_HDMI_SI1930:
3386 case ENCODER_OBJECT_ID_TRAVIS:
3387 case ENCODER_OBJECT_ID_NUTMEG:
3388 /* these are handled by the primary encoders */
3389 amdgpu_encoder->is_ext_encoder = true;
3390 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3391 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3392 DRM_MODE_ENCODER_LVDS, NULL);
3393 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3394 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3395 DRM_MODE_ENCODER_DAC, NULL);
3396 else
3397 drm_encoder_init(dev, encoder, &dce_v6_0_encoder_funcs,
3398 DRM_MODE_ENCODER_TMDS, NULL);
3399 drm_encoder_helper_add(encoder, &dce_v6_0_ext_helper_funcs);
3400 break;
3401 }
3402 }
3403
3404 static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
3405 .bandwidth_update = &dce_v6_0_bandwidth_update,
3406 .vblank_get_counter = &dce_v6_0_vblank_get_counter,
3407 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3408 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3409 .hpd_sense = &dce_v6_0_hpd_sense,
3410 .hpd_set_polarity = &dce_v6_0_hpd_set_polarity,
3411 .hpd_get_gpio_reg = &dce_v6_0_hpd_get_gpio_reg,
3412 .page_flip = &dce_v6_0_page_flip,
3413 .page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
3414 .add_encoder = &dce_v6_0_encoder_add,
3415 .add_connector = &amdgpu_connector_add,
3416 };
3417
dce_v6_0_set_display_funcs(struct amdgpu_device * adev)3418 static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
3419 {
3420 adev->mode_info.funcs = &dce_v6_0_display_funcs;
3421 }
3422
3423 static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
3424 .set = dce_v6_0_set_crtc_interrupt_state,
3425 .process = dce_v6_0_crtc_irq,
3426 };
3427
3428 static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
3429 .set = dce_v6_0_set_pageflip_interrupt_state,
3430 .process = dce_v6_0_pageflip_irq,
3431 };
3432
3433 static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
3434 .set = dce_v6_0_set_hpd_interrupt_state,
3435 .process = dce_v6_0_hpd_irq,
3436 };
3437
dce_v6_0_set_irq_funcs(struct amdgpu_device * adev)3438 static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
3439 {
3440 if (adev->mode_info.num_crtc > 0)
3441 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3442 else
3443 adev->crtc_irq.num_types = 0;
3444 adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
3445
3446 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3447 adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
3448
3449 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3450 adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
3451 }
3452
3453 const struct amdgpu_ip_block_version dce_v6_0_ip_block =
3454 {
3455 .type = AMD_IP_BLOCK_TYPE_DCE,
3456 .major = 6,
3457 .minor = 0,
3458 .rev = 0,
3459 .funcs = &dce_v6_0_ip_funcs,
3460 };
3461
3462 const struct amdgpu_ip_block_version dce_v6_4_ip_block =
3463 {
3464 .type = AMD_IP_BLOCK_TYPE_DCE,
3465 .major = 6,
3466 .minor = 4,
3467 .rev = 0,
3468 .funcs = &dce_v6_0_ip_funcs,
3469 };
3470