1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_cs.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_common.h"
33
34 #include "vcn/vcn_1_0_offset.h"
35 #include "vcn/vcn_1_0_sh_mask.h"
36 #include "mmhub/mmhub_9_1_offset.h"
37 #include "mmhub/mmhub_9_1_sh_mask.h"
38
39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #include "jpeg_v1_0.h"
41 #include "vcn_v1_0.h"
42
43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
44 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
45 #define mmUVD_REG_XX_MASK_1_0 0x05ac
46 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
47
48 static int vcn_v1_0_stop(struct amdgpu_device *adev);
49 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
50 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
51 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
52 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
53 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
54 int inst_idx, struct dpg_pause_state *new_state);
55
56 static void vcn_v1_0_idle_work_handler(struct work_struct *work);
57 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
58
59 /**
60 * vcn_v1_0_early_init - set function pointers
61 *
62 * @handle: amdgpu_device pointer
63 *
64 * Set ring and irq function pointers
65 */
vcn_v1_0_early_init(void * handle)66 static int vcn_v1_0_early_init(void *handle)
67 {
68 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
69
70 adev->vcn.num_enc_rings = 2;
71
72 vcn_v1_0_set_dec_ring_funcs(adev);
73 vcn_v1_0_set_enc_ring_funcs(adev);
74 vcn_v1_0_set_irq_funcs(adev);
75
76 jpeg_v1_0_early_init(handle);
77
78 return 0;
79 }
80
81 /**
82 * vcn_v1_0_sw_init - sw init for VCN block
83 *
84 * @handle: amdgpu_device pointer
85 *
86 * Load firmware and sw initialization
87 */
vcn_v1_0_sw_init(void * handle)88 static int vcn_v1_0_sw_init(void *handle)
89 {
90 struct amdgpu_ring *ring;
91 int i, r;
92 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
93
94 /* VCN DEC TRAP */
95 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
96 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
97 if (r)
98 return r;
99
100 /* VCN ENC TRAP */
101 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
102 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
103 &adev->vcn.inst->irq);
104 if (r)
105 return r;
106 }
107
108 r = amdgpu_vcn_sw_init(adev);
109 if (r)
110 return r;
111
112 /* Override the work func */
113 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
114
115 amdgpu_vcn_setup_ucode(adev);
116
117 r = amdgpu_vcn_resume(adev);
118 if (r)
119 return r;
120
121 ring = &adev->vcn.inst->ring_dec;
122 sprintf(ring->name, "vcn_dec");
123 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
124 AMDGPU_RING_PRIO_DEFAULT, NULL);
125 if (r)
126 return r;
127
128 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
129 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
130 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
131 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
132 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
133 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
134 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
135 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
136 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
137 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
138
139 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
140 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
141
142 ring = &adev->vcn.inst->ring_enc[i];
143 sprintf(ring->name, "vcn_enc%d", i);
144 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
145 hw_prio, NULL);
146 if (r)
147 return r;
148 }
149
150 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
151
152 if (amdgpu_vcnfw_log) {
153 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
154
155 fw_shared->present_flag_0 = 0;
156 amdgpu_vcn_fwlog_init(adev->vcn.inst);
157 }
158
159 r = jpeg_v1_0_sw_init(handle);
160
161 return r;
162 }
163
164 /**
165 * vcn_v1_0_sw_fini - sw fini for VCN block
166 *
167 * @handle: amdgpu_device pointer
168 *
169 * VCN suspend and free up sw allocation
170 */
vcn_v1_0_sw_fini(void * handle)171 static int vcn_v1_0_sw_fini(void *handle)
172 {
173 int r;
174 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
175
176 r = amdgpu_vcn_suspend(adev);
177 if (r)
178 return r;
179
180 jpeg_v1_0_sw_fini(handle);
181
182 r = amdgpu_vcn_sw_fini(adev);
183
184 return r;
185 }
186
187 /**
188 * vcn_v1_0_hw_init - start and test VCN block
189 *
190 * @handle: amdgpu_device pointer
191 *
192 * Initialize the hardware, boot up the VCPU and do some testing
193 */
vcn_v1_0_hw_init(void * handle)194 static int vcn_v1_0_hw_init(void *handle)
195 {
196 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
197 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
198 int i, r;
199
200 r = amdgpu_ring_test_helper(ring);
201 if (r)
202 goto done;
203
204 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
205 ring = &adev->vcn.inst->ring_enc[i];
206 r = amdgpu_ring_test_helper(ring);
207 if (r)
208 goto done;
209 }
210
211 ring = &adev->jpeg.inst->ring_dec;
212 r = amdgpu_ring_test_helper(ring);
213 if (r)
214 goto done;
215
216 done:
217 if (!r)
218 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
219 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
220
221 return r;
222 }
223
224 /**
225 * vcn_v1_0_hw_fini - stop the hardware block
226 *
227 * @handle: amdgpu_device pointer
228 *
229 * Stop the VCN block, mark ring as not ready any more
230 */
vcn_v1_0_hw_fini(void * handle)231 static int vcn_v1_0_hw_fini(void *handle)
232 {
233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
234
235 cancel_delayed_work_sync(&adev->vcn.idle_work);
236
237 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
238 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
239 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
240 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
241 }
242
243 return 0;
244 }
245
246 /**
247 * vcn_v1_0_suspend - suspend VCN block
248 *
249 * @handle: amdgpu_device pointer
250 *
251 * HW fini and suspend VCN block
252 */
vcn_v1_0_suspend(void * handle)253 static int vcn_v1_0_suspend(void *handle)
254 {
255 int r;
256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
257 bool idle_work_unexecuted;
258
259 idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
260 if (idle_work_unexecuted) {
261 if (adev->pm.dpm_enabled)
262 amdgpu_dpm_enable_uvd(adev, false);
263 }
264
265 r = vcn_v1_0_hw_fini(adev);
266 if (r)
267 return r;
268
269 r = amdgpu_vcn_suspend(adev);
270
271 return r;
272 }
273
274 /**
275 * vcn_v1_0_resume - resume VCN block
276 *
277 * @handle: amdgpu_device pointer
278 *
279 * Resume firmware and hw init VCN block
280 */
vcn_v1_0_resume(void * handle)281 static int vcn_v1_0_resume(void *handle)
282 {
283 int r;
284 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
285
286 r = amdgpu_vcn_resume(adev);
287 if (r)
288 return r;
289
290 r = vcn_v1_0_hw_init(adev);
291
292 return r;
293 }
294
295 /**
296 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
297 *
298 * @adev: amdgpu_device pointer
299 *
300 * Let the VCN memory controller know it's offsets
301 */
vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device * adev)302 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
303 {
304 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
305 uint32_t offset;
306
307 /* cache window 0: fw */
308 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
309 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
310 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
311 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
312 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
313 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
314 offset = 0;
315 } else {
316 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
317 lower_32_bits(adev->vcn.inst->gpu_addr));
318 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
319 upper_32_bits(adev->vcn.inst->gpu_addr));
320 offset = size;
321 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
322 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
323 }
324
325 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
326
327 /* cache window 1: stack */
328 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
329 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
330 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
331 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
332 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
333 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
334
335 /* cache window 2: context */
336 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
337 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
338 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
339 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
340 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
341 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
342
343 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
344 adev->gfx.config.gb_addr_config);
345 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
346 adev->gfx.config.gb_addr_config);
347 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
348 adev->gfx.config.gb_addr_config);
349 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
350 adev->gfx.config.gb_addr_config);
351 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
352 adev->gfx.config.gb_addr_config);
353 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
354 adev->gfx.config.gb_addr_config);
355 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
356 adev->gfx.config.gb_addr_config);
357 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
358 adev->gfx.config.gb_addr_config);
359 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
360 adev->gfx.config.gb_addr_config);
361 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
362 adev->gfx.config.gb_addr_config);
363 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
364 adev->gfx.config.gb_addr_config);
365 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
366 adev->gfx.config.gb_addr_config);
367 }
368
vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device * adev)369 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
370 {
371 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
372 uint32_t offset;
373
374 /* cache window 0: fw */
375 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
376 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
377 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
378 0xFFFFFFFF, 0);
379 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
380 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
381 0xFFFFFFFF, 0);
382 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
383 0xFFFFFFFF, 0);
384 offset = 0;
385 } else {
386 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
387 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
388 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
389 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
390 offset = size;
391 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
392 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
393 }
394
395 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
396
397 /* cache window 1: stack */
398 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
399 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
400 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
401 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
402 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
403 0xFFFFFFFF, 0);
404 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
405 0xFFFFFFFF, 0);
406
407 /* cache window 2: context */
408 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
409 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
410 0xFFFFFFFF, 0);
411 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
412 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
413 0xFFFFFFFF, 0);
414 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
415 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
416 0xFFFFFFFF, 0);
417
418 /* VCN global tiling registers */
419 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
420 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
421 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
422 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
423 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
424 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
425 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
426 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
427 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
428 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
429 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
430 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
431 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
432 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
433 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
434 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
435 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
436 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
437 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
438 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
439 }
440
441 /**
442 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
443 *
444 * @adev: amdgpu_device pointer
445 *
446 * Disable clock gating for VCN block
447 */
vcn_v1_0_disable_clock_gating(struct amdgpu_device * adev)448 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
449 {
450 uint32_t data;
451
452 /* JPEG disable CGC */
453 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
454
455 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
456 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
457 else
458 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
459
460 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
461 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
462 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
463
464 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
465 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
466 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
467
468 /* UVD disable CGC */
469 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
470 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
471 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
472 else
473 data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
474
475 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
476 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
477 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
478
479 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
480 data &= ~(UVD_CGC_GATE__SYS_MASK
481 | UVD_CGC_GATE__UDEC_MASK
482 | UVD_CGC_GATE__MPEG2_MASK
483 | UVD_CGC_GATE__REGS_MASK
484 | UVD_CGC_GATE__RBC_MASK
485 | UVD_CGC_GATE__LMI_MC_MASK
486 | UVD_CGC_GATE__LMI_UMC_MASK
487 | UVD_CGC_GATE__IDCT_MASK
488 | UVD_CGC_GATE__MPRD_MASK
489 | UVD_CGC_GATE__MPC_MASK
490 | UVD_CGC_GATE__LBSI_MASK
491 | UVD_CGC_GATE__LRBBM_MASK
492 | UVD_CGC_GATE__UDEC_RE_MASK
493 | UVD_CGC_GATE__UDEC_CM_MASK
494 | UVD_CGC_GATE__UDEC_IT_MASK
495 | UVD_CGC_GATE__UDEC_DB_MASK
496 | UVD_CGC_GATE__UDEC_MP_MASK
497 | UVD_CGC_GATE__WCB_MASK
498 | UVD_CGC_GATE__VCPU_MASK
499 | UVD_CGC_GATE__SCPU_MASK);
500 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
501
502 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
503 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
504 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
505 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
506 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
507 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
508 | UVD_CGC_CTRL__SYS_MODE_MASK
509 | UVD_CGC_CTRL__UDEC_MODE_MASK
510 | UVD_CGC_CTRL__MPEG2_MODE_MASK
511 | UVD_CGC_CTRL__REGS_MODE_MASK
512 | UVD_CGC_CTRL__RBC_MODE_MASK
513 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
514 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
515 | UVD_CGC_CTRL__IDCT_MODE_MASK
516 | UVD_CGC_CTRL__MPRD_MODE_MASK
517 | UVD_CGC_CTRL__MPC_MODE_MASK
518 | UVD_CGC_CTRL__LBSI_MODE_MASK
519 | UVD_CGC_CTRL__LRBBM_MODE_MASK
520 | UVD_CGC_CTRL__WCB_MODE_MASK
521 | UVD_CGC_CTRL__VCPU_MODE_MASK
522 | UVD_CGC_CTRL__SCPU_MODE_MASK);
523 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
524
525 /* turn on */
526 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
527 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
528 | UVD_SUVD_CGC_GATE__SIT_MASK
529 | UVD_SUVD_CGC_GATE__SMP_MASK
530 | UVD_SUVD_CGC_GATE__SCM_MASK
531 | UVD_SUVD_CGC_GATE__SDB_MASK
532 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
533 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
534 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
535 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
536 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
537 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
538 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
539 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
540 | UVD_SUVD_CGC_GATE__SCLR_MASK
541 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
542 | UVD_SUVD_CGC_GATE__ENT_MASK
543 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
544 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
545 | UVD_SUVD_CGC_GATE__SITE_MASK
546 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
547 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
548 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
549 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
550 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
551 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
552
553 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
554 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
555 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
556 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
557 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
558 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
559 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
560 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
561 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
562 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
563 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
564 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
565 }
566
567 /**
568 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
569 *
570 * @adev: amdgpu_device pointer
571 *
572 * Enable clock gating for VCN block
573 */
vcn_v1_0_enable_clock_gating(struct amdgpu_device * adev)574 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
575 {
576 uint32_t data = 0;
577
578 /* enable JPEG CGC */
579 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
580 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
581 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
582 else
583 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
584 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
585 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
586 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
587
588 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
589 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
590 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
591
592 /* enable UVD CGC */
593 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
594 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
595 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
596 else
597 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
598 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
599 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
600 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
601
602 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
603 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
604 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
605 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
606 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
607 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
608 | UVD_CGC_CTRL__SYS_MODE_MASK
609 | UVD_CGC_CTRL__UDEC_MODE_MASK
610 | UVD_CGC_CTRL__MPEG2_MODE_MASK
611 | UVD_CGC_CTRL__REGS_MODE_MASK
612 | UVD_CGC_CTRL__RBC_MODE_MASK
613 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
614 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
615 | UVD_CGC_CTRL__IDCT_MODE_MASK
616 | UVD_CGC_CTRL__MPRD_MODE_MASK
617 | UVD_CGC_CTRL__MPC_MODE_MASK
618 | UVD_CGC_CTRL__LBSI_MODE_MASK
619 | UVD_CGC_CTRL__LRBBM_MODE_MASK
620 | UVD_CGC_CTRL__WCB_MODE_MASK
621 | UVD_CGC_CTRL__VCPU_MODE_MASK
622 | UVD_CGC_CTRL__SCPU_MODE_MASK);
623 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
624
625 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
626 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
627 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
628 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
629 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
630 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
631 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
632 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
633 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
634 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
635 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
636 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
637 }
638
vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel)639 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
640 {
641 uint32_t reg_data = 0;
642
643 /* disable JPEG CGC */
644 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
645 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
646 else
647 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
648 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
649 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
650 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
651
652 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
653
654 /* enable sw clock gating control */
655 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
656 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
657 else
658 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
659 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
660 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
661 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
662 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
663 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
664 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
665 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
666 UVD_CGC_CTRL__SYS_MODE_MASK |
667 UVD_CGC_CTRL__UDEC_MODE_MASK |
668 UVD_CGC_CTRL__MPEG2_MODE_MASK |
669 UVD_CGC_CTRL__REGS_MODE_MASK |
670 UVD_CGC_CTRL__RBC_MODE_MASK |
671 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
672 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
673 UVD_CGC_CTRL__IDCT_MODE_MASK |
674 UVD_CGC_CTRL__MPRD_MODE_MASK |
675 UVD_CGC_CTRL__MPC_MODE_MASK |
676 UVD_CGC_CTRL__LBSI_MODE_MASK |
677 UVD_CGC_CTRL__LRBBM_MODE_MASK |
678 UVD_CGC_CTRL__WCB_MODE_MASK |
679 UVD_CGC_CTRL__VCPU_MODE_MASK |
680 UVD_CGC_CTRL__SCPU_MODE_MASK);
681 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
682
683 /* turn off clock gating */
684 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
685
686 /* turn on SUVD clock gating */
687 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
688
689 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
690 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
691 }
692
vcn_1_0_disable_static_power_gating(struct amdgpu_device * adev)693 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
694 {
695 uint32_t data = 0;
696
697 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
698 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
699 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
700 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
701 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
702 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
703 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
704 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
705 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
706 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
707 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
708 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
709
710 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
711 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
712 } else {
713 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
714 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
715 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
716 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
717 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
718 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
719 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
720 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
721 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
722 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
723 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
724 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
725 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
726 }
727
728 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
729
730 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
731 data &= ~0x103;
732 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
733 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
734
735 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
736 }
737
vcn_1_0_enable_static_power_gating(struct amdgpu_device * adev)738 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
739 {
740 uint32_t data = 0;
741
742 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
743 /* Before power off, this indicator has to be turned on */
744 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
745 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
746 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
747 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
748
749
750 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
751 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
752 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
753 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
756 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
757 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
758 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
759 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
761
762 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
763
764 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
765 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
766 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
767 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
768 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
769 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
770 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
771 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
772 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
773 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
774 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
775 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
776 }
777 }
778
779 /**
780 * vcn_v1_0_start_spg_mode - start VCN block
781 *
782 * @adev: amdgpu_device pointer
783 *
784 * Setup and start the VCN block
785 */
vcn_v1_0_start_spg_mode(struct amdgpu_device * adev)786 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
787 {
788 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
789 uint32_t rb_bufsz, tmp;
790 uint32_t lmi_swap_cntl;
791 int i, j, r;
792
793 /* disable byte swapping */
794 lmi_swap_cntl = 0;
795
796 vcn_1_0_disable_static_power_gating(adev);
797
798 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
799 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
800
801 /* disable clock gating */
802 vcn_v1_0_disable_clock_gating(adev);
803
804 /* disable interupt */
805 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
806 ~UVD_MASTINT_EN__VCPU_EN_MASK);
807
808 /* initialize VCN memory controller */
809 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
810 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
811 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
812 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
813 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
814 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
815
816 #ifdef __BIG_ENDIAN
817 /* swap (8 in 32) RB and IB */
818 lmi_swap_cntl = 0xa;
819 #endif
820 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
821
822 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
823 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
824 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
825 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
826
827 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
828 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
829 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
830 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
831 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
832
833 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
834 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
835 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
836 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
837 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
838
839 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
840 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
841 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
842 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
843
844 vcn_v1_0_mc_resume_spg_mode(adev);
845
846 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
847 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
848 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
849
850 /* enable VCPU clock */
851 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
852
853 /* boot up the VCPU */
854 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
855 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
856
857 /* enable UMC */
858 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
859 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
860
861 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
862 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
863 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
864 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
865
866 for (i = 0; i < 10; ++i) {
867 uint32_t status;
868
869 for (j = 0; j < 100; ++j) {
870 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
871 if (status & UVD_STATUS__IDLE)
872 break;
873 mdelay(10);
874 }
875 r = 0;
876 if (status & UVD_STATUS__IDLE)
877 break;
878
879 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
880 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
881 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
882 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
883 mdelay(10);
884 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
885 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
886 mdelay(10);
887 r = -1;
888 }
889
890 if (r) {
891 DRM_ERROR("VCN decode not responding, giving up!!!\n");
892 return r;
893 }
894 /* enable master interrupt */
895 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
896 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
897
898 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
899 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
900 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
901 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
902
903 /* clear the busy bit of UVD_STATUS */
904 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
905 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
906
907 /* force RBC into idle state */
908 rb_bufsz = order_base_2(ring->ring_size);
909 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
910 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
911 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
912 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
913 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
914 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
915
916 /* set the write pointer delay */
917 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
918
919 /* set the wb address */
920 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
921 (upper_32_bits(ring->gpu_addr) >> 2));
922
923 /* program the RB_BASE for ring buffer */
924 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
925 lower_32_bits(ring->gpu_addr));
926 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
927 upper_32_bits(ring->gpu_addr));
928
929 /* Initialize the ring buffer's read and write pointers */
930 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
931
932 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
933
934 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
935 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
936 lower_32_bits(ring->wptr));
937
938 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
939 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
940
941 ring = &adev->vcn.inst->ring_enc[0];
942 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
943 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
944 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
945 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
946 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
947
948 ring = &adev->vcn.inst->ring_enc[1];
949 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
950 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
951 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
952 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
953 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
954
955 jpeg_v1_0_start(adev, 0);
956
957 return 0;
958 }
959
vcn_v1_0_start_dpg_mode(struct amdgpu_device * adev)960 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
961 {
962 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
963 uint32_t rb_bufsz, tmp;
964 uint32_t lmi_swap_cntl;
965
966 /* disable byte swapping */
967 lmi_swap_cntl = 0;
968
969 vcn_1_0_enable_static_power_gating(adev);
970
971 /* enable dynamic power gating mode */
972 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
973 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
974 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
975 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
976
977 /* enable clock gating */
978 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
979
980 /* enable VCPU clock */
981 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
982 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
983 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
984 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
985
986 /* disable interupt */
987 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
988 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
989
990 /* initialize VCN memory controller */
991 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
992 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
993 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
994 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
995 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
996 UVD_LMI_CTRL__REQ_MODE_MASK |
997 UVD_LMI_CTRL__CRC_RESET_MASK |
998 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
999 0x00100000L, 0xFFFFFFFF, 0);
1000
1001 #ifdef __BIG_ENDIAN
1002 /* swap (8 in 32) RB and IB */
1003 lmi_swap_cntl = 0xa;
1004 #endif
1005 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1006
1007 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,
1008 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1009
1010 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,
1011 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1012 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1013 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1014 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1015
1016 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,
1017 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1018 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1019 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1020 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1021
1022 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,
1023 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1024 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1025 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1026
1027 vcn_v1_0_mc_resume_dpg_mode(adev);
1028
1029 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1030 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1031
1032 /* boot up the VCPU */
1033 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1034
1035 /* enable UMC */
1036 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,
1037 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1038 0xFFFFFFFF, 0);
1039
1040 /* enable master interrupt */
1041 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1042 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1043
1044 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1045 /* setup mmUVD_LMI_CTRL */
1046 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1047 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1048 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1049 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1050 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1051 UVD_LMI_CTRL__REQ_MODE_MASK |
1052 UVD_LMI_CTRL__CRC_RESET_MASK |
1053 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1054 0x00100000L, 0xFFFFFFFF, 1);
1055
1056 tmp = adev->gfx.config.gb_addr_config;
1057 /* setup VCN global tiling registers */
1058 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1059 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1060
1061 /* enable System Interrupt for JRBC */
1062 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,
1063 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1064
1065 /* force RBC into idle state */
1066 rb_bufsz = order_base_2(ring->ring_size);
1067 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1068 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1069 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1070 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1071 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1072 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1073
1074 /* set the write pointer delay */
1075 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1076
1077 /* set the wb address */
1078 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1079 (upper_32_bits(ring->gpu_addr) >> 2));
1080
1081 /* program the RB_BASE for ring buffer */
1082 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1083 lower_32_bits(ring->gpu_addr));
1084 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1085 upper_32_bits(ring->gpu_addr));
1086
1087 /* Initialize the ring buffer's read and write pointers */
1088 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1089
1090 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1091
1092 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1093 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1094 lower_32_bits(ring->wptr));
1095
1096 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1097 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1098
1099 jpeg_v1_0_start(adev, 1);
1100
1101 return 0;
1102 }
1103
vcn_v1_0_start(struct amdgpu_device * adev)1104 static int vcn_v1_0_start(struct amdgpu_device *adev)
1105 {
1106 return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
1107 vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
1108 }
1109
1110 /**
1111 * vcn_v1_0_stop_spg_mode - stop VCN block
1112 *
1113 * @adev: amdgpu_device pointer
1114 *
1115 * stop the VCN block
1116 */
vcn_v1_0_stop_spg_mode(struct amdgpu_device * adev)1117 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1118 {
1119 int tmp;
1120
1121 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1122
1123 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1124 UVD_LMI_STATUS__READ_CLEAN_MASK |
1125 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1126 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1127 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1128
1129 /* stall UMC channel */
1130 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1131 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1132 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1133
1134 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1135 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1136 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1137
1138 /* disable VCPU clock */
1139 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1140 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1141
1142 /* reset LMI UMC/LMI */
1143 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1144 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1145 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1146
1147 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1148 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1149 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1150
1151 /* put VCPU into reset */
1152 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1153 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1154 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1155
1156 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1157
1158 vcn_v1_0_enable_clock_gating(adev);
1159 vcn_1_0_enable_static_power_gating(adev);
1160 return 0;
1161 }
1162
vcn_v1_0_stop_dpg_mode(struct amdgpu_device * adev)1163 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1164 {
1165 uint32_t tmp;
1166
1167 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1168 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1169 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1170 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1171
1172 /* wait for read ptr to be equal to write ptr */
1173 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1174 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1175
1176 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1177 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1178
1179 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1180 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
1181
1182 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1183 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1184
1185 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1186 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1187 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1188
1189 /* disable dynamic power gating mode */
1190 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1191 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1192
1193 return 0;
1194 }
1195
vcn_v1_0_stop(struct amdgpu_device * adev)1196 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1197 {
1198 int r;
1199
1200 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1201 r = vcn_v1_0_stop_dpg_mode(adev);
1202 else
1203 r = vcn_v1_0_stop_spg_mode(adev);
1204
1205 return r;
1206 }
1207
vcn_v1_0_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1208 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1209 int inst_idx, struct dpg_pause_state *new_state)
1210 {
1211 int ret_code;
1212 uint32_t reg_data = 0;
1213 uint32_t reg_data2 = 0;
1214 struct amdgpu_ring *ring;
1215
1216 /* pause/unpause if state is changed */
1217 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1218 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1219 adev->vcn.inst[inst_idx].pause_state.fw_based,
1220 adev->vcn.inst[inst_idx].pause_state.jpeg,
1221 new_state->fw_based, new_state->jpeg);
1222
1223 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1224 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1225
1226 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1227 ret_code = 0;
1228
1229 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1230 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1231 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1232 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1233
1234 if (!ret_code) {
1235 /* pause DPG non-jpeg */
1236 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1237 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1238 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1239 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1240 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1241
1242 /* Restore */
1243 ring = &adev->vcn.inst->ring_enc[0];
1244 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1245 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1246 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1247 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1248 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1249
1250 ring = &adev->vcn.inst->ring_enc[1];
1251 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1252 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1253 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1254 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1255 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1256
1257 ring = &adev->vcn.inst->ring_dec;
1258 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1259 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1260 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1261 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1262 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1263 }
1264 } else {
1265 /* unpause dpg non-jpeg, no need to wait */
1266 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1267 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1268 }
1269 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1270 }
1271
1272 /* pause/unpause if state is changed */
1273 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1274 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1275 adev->vcn.inst[inst_idx].pause_state.fw_based,
1276 adev->vcn.inst[inst_idx].pause_state.jpeg,
1277 new_state->fw_based, new_state->jpeg);
1278
1279 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1280 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1281
1282 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1283 ret_code = 0;
1284
1285 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1286 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1287 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1288 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1289
1290 if (!ret_code) {
1291 /* Make sure JPRG Snoop is disabled before sending the pause */
1292 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1293 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1294 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1295
1296 /* pause DPG jpeg */
1297 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1298 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1299 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1300 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1301 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1302
1303 /* Restore */
1304 ring = &adev->jpeg.inst->ring_dec;
1305 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1306 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1307 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1308 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1309 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1310 lower_32_bits(ring->gpu_addr));
1311 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1312 upper_32_bits(ring->gpu_addr));
1313 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1314 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1315 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1316 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1317
1318 ring = &adev->vcn.inst->ring_dec;
1319 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1320 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1321 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1322 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1323 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1324 }
1325 } else {
1326 /* unpause dpg jpeg, no need to wait */
1327 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1328 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1329 }
1330 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1331 }
1332
1333 return 0;
1334 }
1335
vcn_v1_0_is_idle(void * handle)1336 static bool vcn_v1_0_is_idle(void *handle)
1337 {
1338 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1339
1340 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1341 }
1342
vcn_v1_0_wait_for_idle(void * handle)1343 static int vcn_v1_0_wait_for_idle(void *handle)
1344 {
1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1346 int ret;
1347
1348 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1349 UVD_STATUS__IDLE);
1350
1351 return ret;
1352 }
1353
vcn_v1_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1354 static int vcn_v1_0_set_clockgating_state(void *handle,
1355 enum amd_clockgating_state state)
1356 {
1357 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1358 bool enable = (state == AMD_CG_STATE_GATE);
1359
1360 if (enable) {
1361 /* wait for STATUS to clear */
1362 if (!vcn_v1_0_is_idle(handle))
1363 return -EBUSY;
1364 vcn_v1_0_enable_clock_gating(adev);
1365 } else {
1366 /* disable HW gating and enable Sw gating */
1367 vcn_v1_0_disable_clock_gating(adev);
1368 }
1369 return 0;
1370 }
1371
1372 /**
1373 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1374 *
1375 * @ring: amdgpu_ring pointer
1376 *
1377 * Returns the current hardware read pointer
1378 */
vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring * ring)1379 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1380 {
1381 struct amdgpu_device *adev = ring->adev;
1382
1383 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1384 }
1385
1386 /**
1387 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1388 *
1389 * @ring: amdgpu_ring pointer
1390 *
1391 * Returns the current hardware write pointer
1392 */
vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring * ring)1393 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1394 {
1395 struct amdgpu_device *adev = ring->adev;
1396
1397 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1398 }
1399
1400 /**
1401 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1402 *
1403 * @ring: amdgpu_ring pointer
1404 *
1405 * Commits the write pointer to the hardware
1406 */
vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring * ring)1407 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1408 {
1409 struct amdgpu_device *adev = ring->adev;
1410
1411 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1412 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1413 lower_32_bits(ring->wptr) | 0x80000000);
1414
1415 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1416 }
1417
1418 /**
1419 * vcn_v1_0_dec_ring_insert_start - insert a start command
1420 *
1421 * @ring: amdgpu_ring pointer
1422 *
1423 * Write a start command to the ring.
1424 */
vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring * ring)1425 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1426 {
1427 struct amdgpu_device *adev = ring->adev;
1428
1429 amdgpu_ring_write(ring,
1430 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1431 amdgpu_ring_write(ring, 0);
1432 amdgpu_ring_write(ring,
1433 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1434 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1435 }
1436
1437 /**
1438 * vcn_v1_0_dec_ring_insert_end - insert a end command
1439 *
1440 * @ring: amdgpu_ring pointer
1441 *
1442 * Write a end command to the ring.
1443 */
vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring * ring)1444 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1445 {
1446 struct amdgpu_device *adev = ring->adev;
1447
1448 amdgpu_ring_write(ring,
1449 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1450 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1451 }
1452
1453 /**
1454 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1455 *
1456 * @ring: amdgpu_ring pointer
1457 * @addr: address
1458 * @seq: sequence number
1459 * @flags: fence related flags
1460 *
1461 * Write a fence and a trap command to the ring.
1462 */
vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1463 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1464 unsigned flags)
1465 {
1466 struct amdgpu_device *adev = ring->adev;
1467
1468 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1469
1470 amdgpu_ring_write(ring,
1471 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1472 amdgpu_ring_write(ring, seq);
1473 amdgpu_ring_write(ring,
1474 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1475 amdgpu_ring_write(ring, addr & 0xffffffff);
1476 amdgpu_ring_write(ring,
1477 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1478 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1479 amdgpu_ring_write(ring,
1480 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1481 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1482
1483 amdgpu_ring_write(ring,
1484 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1485 amdgpu_ring_write(ring, 0);
1486 amdgpu_ring_write(ring,
1487 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1488 amdgpu_ring_write(ring, 0);
1489 amdgpu_ring_write(ring,
1490 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1491 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1492 }
1493
1494 /**
1495 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1496 *
1497 * @ring: amdgpu_ring pointer
1498 * @job: job to retrieve vmid from
1499 * @ib: indirect buffer to execute
1500 * @flags: unused
1501 *
1502 * Write ring commands to execute the indirect buffer
1503 */
vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1504 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1505 struct amdgpu_job *job,
1506 struct amdgpu_ib *ib,
1507 uint32_t flags)
1508 {
1509 struct amdgpu_device *adev = ring->adev;
1510 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1511
1512 amdgpu_ring_write(ring,
1513 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1514 amdgpu_ring_write(ring, vmid);
1515
1516 amdgpu_ring_write(ring,
1517 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1518 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1519 amdgpu_ring_write(ring,
1520 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1521 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1522 amdgpu_ring_write(ring,
1523 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1524 amdgpu_ring_write(ring, ib->length_dw);
1525 }
1526
vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1527 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1528 uint32_t reg, uint32_t val,
1529 uint32_t mask)
1530 {
1531 struct amdgpu_device *adev = ring->adev;
1532
1533 amdgpu_ring_write(ring,
1534 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1535 amdgpu_ring_write(ring, reg << 2);
1536 amdgpu_ring_write(ring,
1537 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1538 amdgpu_ring_write(ring, val);
1539 amdgpu_ring_write(ring,
1540 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1541 amdgpu_ring_write(ring, mask);
1542 amdgpu_ring_write(ring,
1543 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1544 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1545 }
1546
vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1547 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1548 unsigned vmid, uint64_t pd_addr)
1549 {
1550 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1551 uint32_t data0, data1, mask;
1552
1553 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1554
1555 /* wait for register write */
1556 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1557 data1 = lower_32_bits(pd_addr);
1558 mask = 0xffffffff;
1559 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1560 }
1561
vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1562 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1563 uint32_t reg, uint32_t val)
1564 {
1565 struct amdgpu_device *adev = ring->adev;
1566
1567 amdgpu_ring_write(ring,
1568 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1569 amdgpu_ring_write(ring, reg << 2);
1570 amdgpu_ring_write(ring,
1571 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1572 amdgpu_ring_write(ring, val);
1573 amdgpu_ring_write(ring,
1574 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1575 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1576 }
1577
1578 /**
1579 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1580 *
1581 * @ring: amdgpu_ring pointer
1582 *
1583 * Returns the current hardware enc read pointer
1584 */
vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring * ring)1585 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1586 {
1587 struct amdgpu_device *adev = ring->adev;
1588
1589 if (ring == &adev->vcn.inst->ring_enc[0])
1590 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1591 else
1592 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1593 }
1594
1595 /**
1596 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1597 *
1598 * @ring: amdgpu_ring pointer
1599 *
1600 * Returns the current hardware enc write pointer
1601 */
vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring * ring)1602 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1603 {
1604 struct amdgpu_device *adev = ring->adev;
1605
1606 if (ring == &adev->vcn.inst->ring_enc[0])
1607 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1608 else
1609 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1610 }
1611
1612 /**
1613 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1614 *
1615 * @ring: amdgpu_ring pointer
1616 *
1617 * Commits the enc write pointer to the hardware
1618 */
vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring * ring)1619 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1620 {
1621 struct amdgpu_device *adev = ring->adev;
1622
1623 if (ring == &adev->vcn.inst->ring_enc[0])
1624 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1625 lower_32_bits(ring->wptr));
1626 else
1627 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1628 lower_32_bits(ring->wptr));
1629 }
1630
1631 /**
1632 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1633 *
1634 * @ring: amdgpu_ring pointer
1635 * @addr: address
1636 * @seq: sequence number
1637 * @flags: fence related flags
1638 *
1639 * Write enc a fence and a trap command to the ring.
1640 */
vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1641 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1642 u64 seq, unsigned flags)
1643 {
1644 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1645
1646 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1647 amdgpu_ring_write(ring, addr);
1648 amdgpu_ring_write(ring, upper_32_bits(addr));
1649 amdgpu_ring_write(ring, seq);
1650 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1651 }
1652
vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring * ring)1653 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1654 {
1655 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1656 }
1657
1658 /**
1659 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1660 *
1661 * @ring: amdgpu_ring pointer
1662 * @job: job to retrive vmid from
1663 * @ib: indirect buffer to execute
1664 * @flags: unused
1665 *
1666 * Write enc ring commands to execute the indirect buffer
1667 */
vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1668 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1669 struct amdgpu_job *job,
1670 struct amdgpu_ib *ib,
1671 uint32_t flags)
1672 {
1673 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1674
1675 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1676 amdgpu_ring_write(ring, vmid);
1677 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1678 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1679 amdgpu_ring_write(ring, ib->length_dw);
1680 }
1681
vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1682 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1683 uint32_t reg, uint32_t val,
1684 uint32_t mask)
1685 {
1686 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1687 amdgpu_ring_write(ring, reg << 2);
1688 amdgpu_ring_write(ring, mask);
1689 amdgpu_ring_write(ring, val);
1690 }
1691
vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1692 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1693 unsigned int vmid, uint64_t pd_addr)
1694 {
1695 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
1696
1697 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1698
1699 /* wait for reg writes */
1700 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1701 vmid * hub->ctx_addr_distance,
1702 lower_32_bits(pd_addr), 0xffffffff);
1703 }
1704
vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1705 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1706 uint32_t reg, uint32_t val)
1707 {
1708 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1709 amdgpu_ring_write(ring, reg << 2);
1710 amdgpu_ring_write(ring, val);
1711 }
1712
vcn_v1_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1713 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1714 struct amdgpu_irq_src *source,
1715 unsigned type,
1716 enum amdgpu_interrupt_state state)
1717 {
1718 return 0;
1719 }
1720
vcn_v1_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1721 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1722 struct amdgpu_irq_src *source,
1723 struct amdgpu_iv_entry *entry)
1724 {
1725 DRM_DEBUG("IH: VCN TRAP\n");
1726
1727 switch (entry->src_id) {
1728 case 124:
1729 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1730 break;
1731 case 119:
1732 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1733 break;
1734 case 120:
1735 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1736 break;
1737 default:
1738 DRM_ERROR("Unhandled interrupt: %d %d\n",
1739 entry->src_id, entry->src_data[0]);
1740 break;
1741 }
1742
1743 return 0;
1744 }
1745
vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1746 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1747 {
1748 struct amdgpu_device *adev = ring->adev;
1749 int i;
1750
1751 WARN_ON(ring->wptr % 2 || count % 2);
1752
1753 for (i = 0; i < count / 2; i++) {
1754 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1755 amdgpu_ring_write(ring, 0);
1756 }
1757 }
1758
vcn_v1_0_set_powergating_state(void * handle,enum amd_powergating_state state)1759 static int vcn_v1_0_set_powergating_state(void *handle,
1760 enum amd_powergating_state state)
1761 {
1762 /* This doesn't actually powergate the VCN block.
1763 * That's done in the dpm code via the SMC. This
1764 * just re-inits the block as necessary. The actual
1765 * gating still happens in the dpm code. We should
1766 * revisit this when there is a cleaner line between
1767 * the smc and the hw blocks
1768 */
1769 int ret;
1770 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1771
1772 if(state == adev->vcn.cur_state)
1773 return 0;
1774
1775 if (state == AMD_PG_STATE_GATE)
1776 ret = vcn_v1_0_stop(adev);
1777 else
1778 ret = vcn_v1_0_start(adev);
1779
1780 if(!ret)
1781 adev->vcn.cur_state = state;
1782 return ret;
1783 }
1784
vcn_v1_0_idle_work_handler(struct work_struct * work)1785 static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1786 {
1787 struct amdgpu_device *adev =
1788 container_of(work, struct amdgpu_device, vcn.idle_work.work);
1789 unsigned int fences = 0, i;
1790
1791 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1792 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1793
1794 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1795 struct dpg_pause_state new_state;
1796
1797 if (fences)
1798 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1799 else
1800 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1801
1802 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1803 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1804 else
1805 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1806
1807 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1808 }
1809
1810 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
1811 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1812
1813 if (fences == 0) {
1814 amdgpu_gfx_off_ctrl(adev, true);
1815 if (adev->pm.dpm_enabled)
1816 amdgpu_dpm_enable_uvd(adev, false);
1817 else
1818 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1819 AMD_PG_STATE_GATE);
1820 } else {
1821 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1822 }
1823 }
1824
vcn_v1_0_ring_begin_use(struct amdgpu_ring * ring)1825 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1826 {
1827 struct amdgpu_device *adev = ring->adev;
1828 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
1829
1830 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
1831
1832 if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
1833 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
1834
1835 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1836
1837 }
1838
vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring * ring,bool set_clocks)1839 void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
1840 {
1841 struct amdgpu_device *adev = ring->adev;
1842
1843 if (set_clocks) {
1844 amdgpu_gfx_off_ctrl(adev, false);
1845 if (adev->pm.dpm_enabled)
1846 amdgpu_dpm_enable_uvd(adev, true);
1847 else
1848 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1849 AMD_PG_STATE_UNGATE);
1850 }
1851
1852 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1853 struct dpg_pause_state new_state;
1854 unsigned int fences = 0, i;
1855
1856 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1857 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1858
1859 if (fences)
1860 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1861 else
1862 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1863
1864 if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec))
1865 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1866 else
1867 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1868
1869 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1870 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1871 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1872 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1873
1874 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1875 }
1876 }
1877
vcn_v1_0_ring_end_use(struct amdgpu_ring * ring)1878 void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1879 {
1880 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1881 mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
1882 }
1883
1884 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1885 .name = "vcn_v1_0",
1886 .early_init = vcn_v1_0_early_init,
1887 .late_init = NULL,
1888 .sw_init = vcn_v1_0_sw_init,
1889 .sw_fini = vcn_v1_0_sw_fini,
1890 .hw_init = vcn_v1_0_hw_init,
1891 .hw_fini = vcn_v1_0_hw_fini,
1892 .suspend = vcn_v1_0_suspend,
1893 .resume = vcn_v1_0_resume,
1894 .is_idle = vcn_v1_0_is_idle,
1895 .wait_for_idle = vcn_v1_0_wait_for_idle,
1896 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1897 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1898 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1899 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1900 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1901 .set_powergating_state = vcn_v1_0_set_powergating_state,
1902 };
1903
1904 /*
1905 * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
1906 * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
1907 * before command submission as a workaround.
1908 */
vcn_v1_0_validate_bo(struct amdgpu_cs_parser * parser,struct amdgpu_job * job,uint64_t addr)1909 static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
1910 struct amdgpu_job *job,
1911 uint64_t addr)
1912 {
1913 struct ttm_operation_ctx ctx = { false, false };
1914 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1915 struct amdgpu_vm *vm = &fpriv->vm;
1916 struct amdgpu_bo_va_mapping *mapping;
1917 struct amdgpu_bo *bo;
1918 int r;
1919
1920 addr &= AMDGPU_GMC_HOLE_MASK;
1921 if (addr & 0x7) {
1922 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1923 return -EINVAL;
1924 }
1925
1926 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
1927 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1928 return -EINVAL;
1929
1930 bo = mapping->bo_va->base.bo;
1931 if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
1932 return 0;
1933
1934 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1935 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1936 if (r) {
1937 DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
1938 return r;
1939 }
1940
1941 return r;
1942 }
1943
vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)1944 static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1945 struct amdgpu_job *job,
1946 struct amdgpu_ib *ib)
1947 {
1948 uint32_t msg_lo = 0, msg_hi = 0;
1949 int i, r;
1950
1951 if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
1952 return 0;
1953
1954 for (i = 0; i < ib->length_dw; i += 2) {
1955 uint32_t reg = amdgpu_ib_get_value(ib, i);
1956 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
1957
1958 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1959 msg_lo = val;
1960 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1961 msg_hi = val;
1962 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
1963 r = vcn_v1_0_validate_bo(p, job,
1964 ((u64)msg_hi) << 32 | msg_lo);
1965 if (r)
1966 return r;
1967 }
1968 }
1969
1970 return 0;
1971 }
1972
1973 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1974 .type = AMDGPU_RING_TYPE_VCN_DEC,
1975 .align_mask = 0xf,
1976 .support_64bit_ptrs = false,
1977 .no_user_fence = true,
1978 .secure_submission_supported = true,
1979 .vmhub = AMDGPU_MMHUB_0,
1980 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1981 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1982 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1983 .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
1984 .emit_frame_size =
1985 6 + 6 + /* hdp invalidate / flush */
1986 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1987 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1988 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1989 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1990 6,
1991 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1992 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1993 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1994 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1995 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1996 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1997 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
1998 .insert_start = vcn_v1_0_dec_ring_insert_start,
1999 .insert_end = vcn_v1_0_dec_ring_insert_end,
2000 .pad_ib = amdgpu_ring_generic_pad_ib,
2001 .begin_use = vcn_v1_0_ring_begin_use,
2002 .end_use = vcn_v1_0_ring_end_use,
2003 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2004 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2005 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2006 };
2007
2008 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2009 .type = AMDGPU_RING_TYPE_VCN_ENC,
2010 .align_mask = 0x3f,
2011 .nop = VCN_ENC_CMD_NO_OP,
2012 .support_64bit_ptrs = false,
2013 .no_user_fence = true,
2014 .vmhub = AMDGPU_MMHUB_0,
2015 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2016 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2017 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2018 .emit_frame_size =
2019 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2020 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2021 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2022 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2023 1, /* vcn_v1_0_enc_ring_insert_end */
2024 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2025 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2026 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2027 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2028 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2029 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2030 .insert_nop = amdgpu_ring_insert_nop,
2031 .insert_end = vcn_v1_0_enc_ring_insert_end,
2032 .pad_ib = amdgpu_ring_generic_pad_ib,
2033 .begin_use = vcn_v1_0_ring_begin_use,
2034 .end_use = vcn_v1_0_ring_end_use,
2035 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2036 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2037 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2038 };
2039
vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device * adev)2040 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2041 {
2042 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2043 DRM_INFO("VCN decode is enabled in VM mode\n");
2044 }
2045
vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device * adev)2046 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2047 {
2048 int i;
2049
2050 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2051 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2052
2053 DRM_INFO("VCN encode is enabled in VM mode\n");
2054 }
2055
2056 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2057 .set = vcn_v1_0_set_interrupt_state,
2058 .process = vcn_v1_0_process_interrupt,
2059 };
2060
vcn_v1_0_set_irq_funcs(struct amdgpu_device * adev)2061 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2062 {
2063 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2064 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2065 }
2066
2067 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
2068 {
2069 .type = AMD_IP_BLOCK_TYPE_VCN,
2070 .major = 1,
2071 .minor = 0,
2072 .rev = 0,
2073 .funcs = &vcn_v1_0_ip_funcs,
2074 };
2075