1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_cs.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_common.h"
33
34 #include "vcn/vcn_1_0_offset.h"
35 #include "vcn/vcn_1_0_sh_mask.h"
36 #include "mmhub/mmhub_9_1_offset.h"
37 #include "mmhub/mmhub_9_1_sh_mask.h"
38
39 #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h"
40 #include "jpeg_v1_0.h"
41 #include "vcn_v1_0.h"
42
43 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab
44 #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1
45 #define mmUVD_REG_XX_MASK_1_0 0x05ac
46 #define mmUVD_REG_XX_MASK_1_0_BASE_IDX 1
47
48 static int vcn_v1_0_stop(struct amdgpu_device *adev);
49 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
50 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
51 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
52 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
53 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
54 int inst_idx, struct dpg_pause_state *new_state);
55
56 static void vcn_v1_0_idle_work_handler(struct work_struct *work);
57 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
58
59 /**
60 * vcn_v1_0_early_init - set function pointers and load microcode
61 *
62 * @handle: amdgpu_device pointer
63 *
64 * Set ring and irq function pointers
65 * Load microcode from filesystem
66 */
vcn_v1_0_early_init(void * handle)67 static int vcn_v1_0_early_init(void *handle)
68 {
69 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
70
71 adev->vcn.num_enc_rings = 2;
72
73 vcn_v1_0_set_dec_ring_funcs(adev);
74 vcn_v1_0_set_enc_ring_funcs(adev);
75 vcn_v1_0_set_irq_funcs(adev);
76
77 jpeg_v1_0_early_init(handle);
78
79 return amdgpu_vcn_early_init(adev);
80 }
81
82 /**
83 * vcn_v1_0_sw_init - sw init for VCN block
84 *
85 * @handle: amdgpu_device pointer
86 *
87 * Load firmware and sw initialization
88 */
vcn_v1_0_sw_init(void * handle)89 static int vcn_v1_0_sw_init(void *handle)
90 {
91 struct amdgpu_ring *ring;
92 int i, r;
93 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
94
95 /* VCN DEC TRAP */
96 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
97 VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq);
98 if (r)
99 return r;
100
101 /* VCN ENC TRAP */
102 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
103 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
104 &adev->vcn.inst->irq);
105 if (r)
106 return r;
107 }
108
109 r = amdgpu_vcn_sw_init(adev);
110 if (r)
111 return r;
112
113 /* Override the work func */
114 adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
115
116 amdgpu_vcn_setup_ucode(adev);
117
118 r = amdgpu_vcn_resume(adev);
119 if (r)
120 return r;
121
122 ring = &adev->vcn.inst->ring_dec;
123 ring->vm_hub = AMDGPU_MMHUB0(0);
124 sprintf(ring->name, "vcn_dec");
125 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
126 AMDGPU_RING_PRIO_DEFAULT, NULL);
127 if (r)
128 return r;
129
130 adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 =
131 SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
132 adev->vcn.internal.data0 = adev->vcn.inst->external.data0 =
133 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
134 adev->vcn.internal.data1 = adev->vcn.inst->external.data1 =
135 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
136 adev->vcn.internal.cmd = adev->vcn.inst->external.cmd =
137 SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
138 adev->vcn.internal.nop = adev->vcn.inst->external.nop =
139 SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
140
141 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
142 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
143
144 ring = &adev->vcn.inst->ring_enc[i];
145 ring->vm_hub = AMDGPU_MMHUB0(0);
146 sprintf(ring->name, "vcn_enc%d", i);
147 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
148 hw_prio, NULL);
149 if (r)
150 return r;
151 }
152
153 adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
154
155 if (amdgpu_vcnfw_log) {
156 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
157
158 fw_shared->present_flag_0 = 0;
159 amdgpu_vcn_fwlog_init(adev->vcn.inst);
160 }
161
162 r = jpeg_v1_0_sw_init(handle);
163
164 return r;
165 }
166
167 /**
168 * vcn_v1_0_sw_fini - sw fini for VCN block
169 *
170 * @handle: amdgpu_device pointer
171 *
172 * VCN suspend and free up sw allocation
173 */
vcn_v1_0_sw_fini(void * handle)174 static int vcn_v1_0_sw_fini(void *handle)
175 {
176 int r;
177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
178
179 r = amdgpu_vcn_suspend(adev);
180 if (r)
181 return r;
182
183 jpeg_v1_0_sw_fini(handle);
184
185 r = amdgpu_vcn_sw_fini(adev);
186
187 return r;
188 }
189
190 /**
191 * vcn_v1_0_hw_init - start and test VCN block
192 *
193 * @handle: amdgpu_device pointer
194 *
195 * Initialize the hardware, boot up the VCPU and do some testing
196 */
vcn_v1_0_hw_init(void * handle)197 static int vcn_v1_0_hw_init(void *handle)
198 {
199 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
201 int i, r;
202
203 r = amdgpu_ring_test_helper(ring);
204 if (r)
205 goto done;
206
207 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
208 ring = &adev->vcn.inst->ring_enc[i];
209 r = amdgpu_ring_test_helper(ring);
210 if (r)
211 goto done;
212 }
213
214 ring = adev->jpeg.inst->ring_dec;
215 r = amdgpu_ring_test_helper(ring);
216 if (r)
217 goto done;
218
219 done:
220 if (!r)
221 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
222 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
223
224 return r;
225 }
226
227 /**
228 * vcn_v1_0_hw_fini - stop the hardware block
229 *
230 * @handle: amdgpu_device pointer
231 *
232 * Stop the VCN block, mark ring as not ready any more
233 */
vcn_v1_0_hw_fini(void * handle)234 static int vcn_v1_0_hw_fini(void *handle)
235 {
236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
237
238 cancel_delayed_work_sync(&adev->vcn.idle_work);
239
240 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
241 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
242 RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
243 vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
244 }
245
246 return 0;
247 }
248
249 /**
250 * vcn_v1_0_suspend - suspend VCN block
251 *
252 * @handle: amdgpu_device pointer
253 *
254 * HW fini and suspend VCN block
255 */
vcn_v1_0_suspend(void * handle)256 static int vcn_v1_0_suspend(void *handle)
257 {
258 int r;
259 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
260 bool idle_work_unexecuted;
261
262 idle_work_unexecuted = cancel_delayed_work_sync(&adev->vcn.idle_work);
263 if (idle_work_unexecuted) {
264 if (adev->pm.dpm_enabled)
265 amdgpu_dpm_enable_uvd(adev, false);
266 }
267
268 r = vcn_v1_0_hw_fini(adev);
269 if (r)
270 return r;
271
272 r = amdgpu_vcn_suspend(adev);
273
274 return r;
275 }
276
277 /**
278 * vcn_v1_0_resume - resume VCN block
279 *
280 * @handle: amdgpu_device pointer
281 *
282 * Resume firmware and hw init VCN block
283 */
vcn_v1_0_resume(void * handle)284 static int vcn_v1_0_resume(void *handle)
285 {
286 int r;
287 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
288
289 r = amdgpu_vcn_resume(adev);
290 if (r)
291 return r;
292
293 r = vcn_v1_0_hw_init(adev);
294
295 return r;
296 }
297
298 /**
299 * vcn_v1_0_mc_resume_spg_mode - memory controller programming
300 *
301 * @adev: amdgpu_device pointer
302 *
303 * Let the VCN memory controller know it's offsets
304 */
vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device * adev)305 static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
306 {
307 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
308 uint32_t offset;
309
310 /* cache window 0: fw */
311 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
312 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
313 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
314 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
315 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
316 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
317 offset = 0;
318 } else {
319 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
320 lower_32_bits(adev->vcn.inst->gpu_addr));
321 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
322 upper_32_bits(adev->vcn.inst->gpu_addr));
323 offset = size;
324 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
325 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
326 }
327
328 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
329
330 /* cache window 1: stack */
331 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
332 lower_32_bits(adev->vcn.inst->gpu_addr + offset));
333 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
334 upper_32_bits(adev->vcn.inst->gpu_addr + offset));
335 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
336 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
337
338 /* cache window 2: context */
339 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
340 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
341 WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
342 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
343 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
344 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
345
346 WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
347 adev->gfx.config.gb_addr_config);
348 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
349 adev->gfx.config.gb_addr_config);
350 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
351 adev->gfx.config.gb_addr_config);
352 WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
353 adev->gfx.config.gb_addr_config);
354 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
355 adev->gfx.config.gb_addr_config);
356 WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
357 adev->gfx.config.gb_addr_config);
358 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
359 adev->gfx.config.gb_addr_config);
360 WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
361 adev->gfx.config.gb_addr_config);
362 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
363 adev->gfx.config.gb_addr_config);
364 WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
365 adev->gfx.config.gb_addr_config);
366 WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG,
367 adev->gfx.config.gb_addr_config);
368 WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG,
369 adev->gfx.config.gb_addr_config);
370 }
371
vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device * adev)372 static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
373 {
374 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
375 uint32_t offset;
376
377 /* cache window 0: fw */
378 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
379 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
380 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo),
381 0xFFFFFFFF, 0);
382 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
383 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi),
384 0xFFFFFFFF, 0);
385 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0,
386 0xFFFFFFFF, 0);
387 offset = 0;
388 } else {
389 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
390 lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
391 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
392 upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0);
393 offset = size;
394 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
395 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0);
396 }
397
398 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0);
399
400 /* cache window 1: stack */
401 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
402 lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
403 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
404 upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0);
405 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0,
406 0xFFFFFFFF, 0);
407 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE,
408 0xFFFFFFFF, 0);
409
410 /* cache window 2: context */
411 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
412 lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
413 0xFFFFFFFF, 0);
414 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
415 upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE),
416 0xFFFFFFFF, 0);
417 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0);
418 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE,
419 0xFFFFFFFF, 0);
420
421 /* VCN global tiling registers */
422 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG,
423 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
424 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG,
425 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
426 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG,
427 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
428 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG,
429 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
430 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG,
431 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
432 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG,
433 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
434 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG,
435 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
436 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG,
437 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
438 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG,
439 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
440 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG,
441 adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0);
442 }
443
444 /**
445 * vcn_v1_0_disable_clock_gating - disable VCN clock gating
446 *
447 * @adev: amdgpu_device pointer
448 *
449 * Disable clock gating for VCN block
450 */
vcn_v1_0_disable_clock_gating(struct amdgpu_device * adev)451 static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev)
452 {
453 uint32_t data;
454
455 /* JPEG disable CGC */
456 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
457
458 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
459 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
460 else
461 data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK;
462
463 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
464 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
465 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
466
467 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
468 data &= ~(JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
469 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
470
471 /* UVD disable CGC */
472 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
473 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
474 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
475 else
476 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
477
478 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
479 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
480 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
481
482 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
483 data &= ~(UVD_CGC_GATE__SYS_MASK
484 | UVD_CGC_GATE__UDEC_MASK
485 | UVD_CGC_GATE__MPEG2_MASK
486 | UVD_CGC_GATE__REGS_MASK
487 | UVD_CGC_GATE__RBC_MASK
488 | UVD_CGC_GATE__LMI_MC_MASK
489 | UVD_CGC_GATE__LMI_UMC_MASK
490 | UVD_CGC_GATE__IDCT_MASK
491 | UVD_CGC_GATE__MPRD_MASK
492 | UVD_CGC_GATE__MPC_MASK
493 | UVD_CGC_GATE__LBSI_MASK
494 | UVD_CGC_GATE__LRBBM_MASK
495 | UVD_CGC_GATE__UDEC_RE_MASK
496 | UVD_CGC_GATE__UDEC_CM_MASK
497 | UVD_CGC_GATE__UDEC_IT_MASK
498 | UVD_CGC_GATE__UDEC_DB_MASK
499 | UVD_CGC_GATE__UDEC_MP_MASK
500 | UVD_CGC_GATE__WCB_MASK
501 | UVD_CGC_GATE__VCPU_MASK
502 | UVD_CGC_GATE__SCPU_MASK);
503 WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);
504
505 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
506 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
507 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
508 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
509 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
510 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
511 | UVD_CGC_CTRL__SYS_MODE_MASK
512 | UVD_CGC_CTRL__UDEC_MODE_MASK
513 | UVD_CGC_CTRL__MPEG2_MODE_MASK
514 | UVD_CGC_CTRL__REGS_MODE_MASK
515 | UVD_CGC_CTRL__RBC_MODE_MASK
516 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
517 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
518 | UVD_CGC_CTRL__IDCT_MODE_MASK
519 | UVD_CGC_CTRL__MPRD_MODE_MASK
520 | UVD_CGC_CTRL__MPC_MODE_MASK
521 | UVD_CGC_CTRL__LBSI_MODE_MASK
522 | UVD_CGC_CTRL__LRBBM_MODE_MASK
523 | UVD_CGC_CTRL__WCB_MODE_MASK
524 | UVD_CGC_CTRL__VCPU_MODE_MASK
525 | UVD_CGC_CTRL__SCPU_MODE_MASK);
526 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
527
528 /* turn on */
529 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
530 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
531 | UVD_SUVD_CGC_GATE__SIT_MASK
532 | UVD_SUVD_CGC_GATE__SMP_MASK
533 | UVD_SUVD_CGC_GATE__SCM_MASK
534 | UVD_SUVD_CGC_GATE__SDB_MASK
535 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
536 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
537 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
538 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
539 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
540 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
541 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
542 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
543 | UVD_SUVD_CGC_GATE__SCLR_MASK
544 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
545 | UVD_SUVD_CGC_GATE__ENT_MASK
546 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
547 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
548 | UVD_SUVD_CGC_GATE__SITE_MASK
549 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
550 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
551 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
552 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
553 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
554 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);
555
556 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
557 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
558 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
559 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
560 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
561 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
562 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
563 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
564 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
565 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
566 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
567 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
568 }
569
570 /**
571 * vcn_v1_0_enable_clock_gating - enable VCN clock gating
572 *
573 * @adev: amdgpu_device pointer
574 *
575 * Enable clock gating for VCN block
576 */
vcn_v1_0_enable_clock_gating(struct amdgpu_device * adev)577 static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev)
578 {
579 uint32_t data = 0;
580
581 /* enable JPEG CGC */
582 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL);
583 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
584 data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
585 else
586 data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
587 data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
588 data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
589 WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, data);
590
591 data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE);
592 data |= (JPEG_CGC_GATE__JPEG_MASK | JPEG_CGC_GATE__JPEG2_MASK);
593 WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, data);
594
595 /* enable UVD CGC */
596 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
597 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
598 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
599 else
600 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
601 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
602 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
603 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
604
605 data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
606 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
607 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
608 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
609 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
610 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
611 | UVD_CGC_CTRL__SYS_MODE_MASK
612 | UVD_CGC_CTRL__UDEC_MODE_MASK
613 | UVD_CGC_CTRL__MPEG2_MODE_MASK
614 | UVD_CGC_CTRL__REGS_MODE_MASK
615 | UVD_CGC_CTRL__RBC_MODE_MASK
616 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
617 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
618 | UVD_CGC_CTRL__IDCT_MODE_MASK
619 | UVD_CGC_CTRL__MPRD_MODE_MASK
620 | UVD_CGC_CTRL__MPC_MODE_MASK
621 | UVD_CGC_CTRL__LBSI_MODE_MASK
622 | UVD_CGC_CTRL__LRBBM_MODE_MASK
623 | UVD_CGC_CTRL__WCB_MODE_MASK
624 | UVD_CGC_CTRL__VCPU_MODE_MASK
625 | UVD_CGC_CTRL__SCPU_MODE_MASK);
626 WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);
627
628 data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
629 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
630 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
631 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
632 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
633 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
634 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
635 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
636 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
637 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
638 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
639 WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
640 }
641
vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device * adev,uint8_t sram_sel)642 static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel)
643 {
644 uint32_t reg_data = 0;
645
646 /* disable JPEG CGC */
647 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
648 reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
649 else
650 reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
651 reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
652 reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
653 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
654
655 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
656
657 /* enable sw clock gating control */
658 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
659 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
660 else
661 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
662 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
663 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
664 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
665 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
666 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
667 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
668 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
669 UVD_CGC_CTRL__SYS_MODE_MASK |
670 UVD_CGC_CTRL__UDEC_MODE_MASK |
671 UVD_CGC_CTRL__MPEG2_MODE_MASK |
672 UVD_CGC_CTRL__REGS_MODE_MASK |
673 UVD_CGC_CTRL__RBC_MODE_MASK |
674 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
675 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
676 UVD_CGC_CTRL__IDCT_MODE_MASK |
677 UVD_CGC_CTRL__MPRD_MODE_MASK |
678 UVD_CGC_CTRL__MPC_MODE_MASK |
679 UVD_CGC_CTRL__LBSI_MODE_MASK |
680 UVD_CGC_CTRL__LRBBM_MODE_MASK |
681 UVD_CGC_CTRL__WCB_MODE_MASK |
682 UVD_CGC_CTRL__VCPU_MODE_MASK |
683 UVD_CGC_CTRL__SCPU_MODE_MASK);
684 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel);
685
686 /* turn off clock gating */
687 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel);
688
689 /* turn on SUVD clock gating */
690 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel);
691
692 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
693 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel);
694 }
695
vcn_1_0_disable_static_power_gating(struct amdgpu_device * adev)696 static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev)
697 {
698 uint32_t data = 0;
699
700 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
701 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
702 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
703 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
704 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
705 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
706 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
707 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
708 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
709 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
710 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
711 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
712
713 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
714 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF);
715 } else {
716 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
717 | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
718 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
719 | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
720 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
721 | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
722 | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
723 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
724 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
725 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
726 | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
727 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
728 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF);
729 }
730
731 /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */
732
733 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
734 data &= ~0x103;
735 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
736 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK;
737
738 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
739 }
740
vcn_1_0_enable_static_power_gating(struct amdgpu_device * adev)741 static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
742 {
743 uint32_t data = 0;
744
745 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
746 /* Before power off, this indicator has to be turned on */
747 data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
748 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
749 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
750 WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
751
752
753 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
754 | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
755 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
756 | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
757 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
758 | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
759 | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
760 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
761 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
762 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
763 | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT);
764
765 WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
766
767 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
768 | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
769 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
770 | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
771 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
772 | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
773 | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
774 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
775 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
776 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
777 | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT);
778 SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF);
779 }
780 }
781
782 /**
783 * vcn_v1_0_start_spg_mode - start VCN block
784 *
785 * @adev: amdgpu_device pointer
786 *
787 * Setup and start the VCN block
788 */
vcn_v1_0_start_spg_mode(struct amdgpu_device * adev)789 static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
790 {
791 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
792 uint32_t rb_bufsz, tmp;
793 uint32_t lmi_swap_cntl;
794 int i, j, r;
795
796 /* disable byte swapping */
797 lmi_swap_cntl = 0;
798
799 vcn_1_0_disable_static_power_gating(adev);
800
801 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
802 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
803
804 /* disable clock gating */
805 vcn_v1_0_disable_clock_gating(adev);
806
807 /* disable interupt */
808 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
809 ~UVD_MASTINT_EN__VCPU_EN_MASK);
810
811 /* initialize VCN memory controller */
812 tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
813 WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
814 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
815 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
816 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
817 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
818
819 #ifdef __BIG_ENDIAN
820 /* swap (8 in 32) RB and IB */
821 lmi_swap_cntl = 0xa;
822 #endif
823 WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
824
825 tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
826 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
827 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
828 WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp);
829
830 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
831 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
832 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
833 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
834 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
835
836 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
837 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
838 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
839 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
840 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
841
842 WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
843 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
844 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
845 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
846
847 vcn_v1_0_mc_resume_spg_mode(adev);
848
849 WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK_1_0, 0x10);
850 WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0,
851 RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK_1_0) | 0x3);
852
853 /* enable VCPU clock */
854 WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
855
856 /* boot up the VCPU */
857 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
858 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
859
860 /* enable UMC */
861 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
862 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
863
864 tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET);
865 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
866 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
867 WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp);
868
869 for (i = 0; i < 10; ++i) {
870 uint32_t status;
871
872 for (j = 0; j < 100; ++j) {
873 status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
874 if (status & UVD_STATUS__IDLE)
875 break;
876 mdelay(10);
877 }
878 r = 0;
879 if (status & UVD_STATUS__IDLE)
880 break;
881
882 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
883 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
884 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
885 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
886 mdelay(10);
887 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
888 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
889 mdelay(10);
890 r = -1;
891 }
892
893 if (r) {
894 DRM_ERROR("VCN decode not responding, giving up!!!\n");
895 return r;
896 }
897 /* enable master interrupt */
898 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
899 UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK);
900
901 /* enable system interrupt for JRBC, TODO: move to set interrupt*/
902 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN),
903 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK,
904 ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK);
905
906 /* clear the busy bit of UVD_STATUS */
907 tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY;
908 WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);
909
910 /* force RBC into idle state */
911 rb_bufsz = order_base_2(ring->ring_size);
912 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
913 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
914 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
915 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
916 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
917 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
918
919 /* set the write pointer delay */
920 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
921
922 /* set the wb address */
923 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
924 (upper_32_bits(ring->gpu_addr) >> 2));
925
926 /* program the RB_BASE for ring buffer */
927 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
928 lower_32_bits(ring->gpu_addr));
929 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
930 upper_32_bits(ring->gpu_addr));
931
932 /* Initialize the ring buffer's read and write pointers */
933 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
934
935 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
936
937 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
938 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
939 lower_32_bits(ring->wptr));
940
941 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
942 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
943
944 ring = &adev->vcn.inst->ring_enc[0];
945 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
946 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
947 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
948 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
949 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
950
951 ring = &adev->vcn.inst->ring_enc[1];
952 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
953 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
954 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
955 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
956 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
957
958 jpeg_v1_0_start(adev, 0);
959
960 return 0;
961 }
962
vcn_v1_0_start_dpg_mode(struct amdgpu_device * adev)963 static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
964 {
965 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
966 uint32_t rb_bufsz, tmp;
967 uint32_t lmi_swap_cntl;
968
969 /* disable byte swapping */
970 lmi_swap_cntl = 0;
971
972 vcn_1_0_enable_static_power_gating(adev);
973
974 /* enable dynamic power gating mode */
975 tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
976 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
977 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
978 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);
979
980 /* enable clock gating */
981 vcn_v1_0_clock_gating_dpg_mode(adev, 0);
982
983 /* enable VCPU clock */
984 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
985 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
986 tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
987 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0);
988
989 /* disable interupt */
990 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
991 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
992
993 /* initialize VCN memory controller */
994 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
995 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
996 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
997 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
998 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
999 UVD_LMI_CTRL__REQ_MODE_MASK |
1000 UVD_LMI_CTRL__CRC_RESET_MASK |
1001 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1002 0x00100000L, 0xFFFFFFFF, 0);
1003
1004 #ifdef __BIG_ENDIAN
1005 /* swap (8 in 32) RB and IB */
1006 lmi_swap_cntl = 0xa;
1007 #endif
1008 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0);
1009
1010 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL,
1011 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0);
1012
1013 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0,
1014 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1015 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1016 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1017 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0);
1018
1019 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0,
1020 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1021 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1022 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1023 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0);
1024
1025 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX,
1026 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1027 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1028 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0);
1029
1030 vcn_v1_0_mc_resume_dpg_mode(adev);
1031
1032 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0);
1033 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0);
1034
1035 /* boot up the VCPU */
1036 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0);
1037
1038 /* enable UMC */
1039 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2,
1040 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT,
1041 0xFFFFFFFF, 0);
1042
1043 /* enable master interrupt */
1044 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN,
1045 UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0);
1046
1047 vcn_v1_0_clock_gating_dpg_mode(adev, 1);
1048 /* setup mmUVD_LMI_CTRL */
1049 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL,
1050 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
1051 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1052 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1053 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
1054 UVD_LMI_CTRL__REQ_MODE_MASK |
1055 UVD_LMI_CTRL__CRC_RESET_MASK |
1056 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1057 0x00100000L, 0xFFFFFFFF, 1);
1058
1059 tmp = adev->gfx.config.gb_addr_config;
1060 /* setup VCN global tiling registers */
1061 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1062 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1);
1063
1064 /* enable System Interrupt for JRBC */
1065 WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN,
1066 UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1);
1067
1068 /* force RBC into idle state */
1069 rb_bufsz = order_base_2(ring->ring_size);
1070 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1071 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1072 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1073 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1074 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1075 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
1076
1077 /* set the write pointer delay */
1078 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
1079
1080 /* set the wb address */
1081 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
1082 (upper_32_bits(ring->gpu_addr) >> 2));
1083
1084 /* program the RB_BASE for ring buffer */
1085 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1086 lower_32_bits(ring->gpu_addr));
1087 WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1088 upper_32_bits(ring->gpu_addr));
1089
1090 /* Initialize the ring buffer's read and write pointers */
1091 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);
1092
1093 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);
1094
1095 ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1096 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1097 lower_32_bits(ring->wptr));
1098
1099 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
1100 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
1101
1102 jpeg_v1_0_start(adev, 1);
1103
1104 return 0;
1105 }
1106
vcn_v1_0_start(struct amdgpu_device * adev)1107 static int vcn_v1_0_start(struct amdgpu_device *adev)
1108 {
1109 return (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ?
1110 vcn_v1_0_start_dpg_mode(adev) : vcn_v1_0_start_spg_mode(adev);
1111 }
1112
1113 /**
1114 * vcn_v1_0_stop_spg_mode - stop VCN block
1115 *
1116 * @adev: amdgpu_device pointer
1117 *
1118 * stop the VCN block
1119 */
vcn_v1_0_stop_spg_mode(struct amdgpu_device * adev)1120 static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
1121 {
1122 int tmp;
1123
1124 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1125
1126 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1127 UVD_LMI_STATUS__READ_CLEAN_MASK |
1128 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1129 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1130 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1131
1132 /* stall UMC channel */
1133 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
1134 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
1135 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1136
1137 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1138 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1139 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
1140
1141 /* disable VCPU clock */
1142 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
1143 ~UVD_VCPU_CNTL__CLK_EN_MASK);
1144
1145 /* reset LMI UMC/LMI */
1146 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1147 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
1148 ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
1149
1150 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1151 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
1152 ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
1153
1154 /* put VCPU into reset */
1155 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
1156 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
1157 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
1158
1159 WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
1160
1161 vcn_v1_0_enable_clock_gating(adev);
1162 vcn_1_0_enable_static_power_gating(adev);
1163 return 0;
1164 }
1165
vcn_v1_0_stop_dpg_mode(struct amdgpu_device * adev)1166 static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
1167 {
1168 uint32_t tmp;
1169
1170 /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
1171 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1172 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1173 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1174
1175 /* wait for read ptr to be equal to write ptr */
1176 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1177 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1178
1179 tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1180 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1181
1182 tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
1183 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF);
1184
1185 tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1186 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1187
1188 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1189 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1190 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1191
1192 /* disable dynamic power gating mode */
1193 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
1194 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1195
1196 return 0;
1197 }
1198
vcn_v1_0_stop(struct amdgpu_device * adev)1199 static int vcn_v1_0_stop(struct amdgpu_device *adev)
1200 {
1201 int r;
1202
1203 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1204 r = vcn_v1_0_stop_dpg_mode(adev);
1205 else
1206 r = vcn_v1_0_stop_spg_mode(adev);
1207
1208 return r;
1209 }
1210
vcn_v1_0_pause_dpg_mode(struct amdgpu_device * adev,int inst_idx,struct dpg_pause_state * new_state)1211 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
1212 int inst_idx, struct dpg_pause_state *new_state)
1213 {
1214 int ret_code;
1215 uint32_t reg_data = 0;
1216 uint32_t reg_data2 = 0;
1217 struct amdgpu_ring *ring;
1218
1219 /* pause/unpause if state is changed */
1220 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1221 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1222 adev->vcn.inst[inst_idx].pause_state.fw_based,
1223 adev->vcn.inst[inst_idx].pause_state.jpeg,
1224 new_state->fw_based, new_state->jpeg);
1225
1226 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1227 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1228
1229 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1230 ret_code = 0;
1231
1232 if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
1233 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1234 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1235 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1236
1237 if (!ret_code) {
1238 /* pause DPG non-jpeg */
1239 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1240 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1241 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1242 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1243 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1244
1245 /* Restore */
1246 ring = &adev->vcn.inst->ring_enc[0];
1247 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
1248 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1249 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
1250 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1251 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1252
1253 ring = &adev->vcn.inst->ring_enc[1];
1254 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1255 WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1256 WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
1257 WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1258 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1259
1260 ring = &adev->vcn.inst->ring_dec;
1261 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1262 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1263 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1264 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1265 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1266 }
1267 } else {
1268 /* unpause dpg non-jpeg, no need to wait */
1269 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1270 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1271 }
1272 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1273 }
1274
1275 /* pause/unpause if state is changed */
1276 if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
1277 DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
1278 adev->vcn.inst[inst_idx].pause_state.fw_based,
1279 adev->vcn.inst[inst_idx].pause_state.jpeg,
1280 new_state->fw_based, new_state->jpeg);
1281
1282 reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
1283 (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1284
1285 if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
1286 ret_code = 0;
1287
1288 if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
1289 ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1290 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
1291 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1292
1293 if (!ret_code) {
1294 /* Make sure JPRG Snoop is disabled before sending the pause */
1295 reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
1296 reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
1297 WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
1298
1299 /* pause DPG jpeg */
1300 reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1301 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1302 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
1303 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
1304 UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
1305
1306 /* Restore */
1307 ring = adev->jpeg.inst->ring_dec;
1308 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
1309 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1310 UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
1311 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1312 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
1313 lower_32_bits(ring->gpu_addr));
1314 WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
1315 upper_32_bits(ring->gpu_addr));
1316 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
1317 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
1318 WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
1319 UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
1320
1321 ring = &adev->vcn.inst->ring_dec;
1322 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
1323 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
1324 SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
1325 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
1326 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1327 }
1328 } else {
1329 /* unpause dpg jpeg, no need to wait */
1330 reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
1331 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
1332 }
1333 adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
1334 }
1335
1336 return 0;
1337 }
1338
vcn_v1_0_is_idle(void * handle)1339 static bool vcn_v1_0_is_idle(void *handle)
1340 {
1341 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1342
1343 return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
1344 }
1345
vcn_v1_0_wait_for_idle(void * handle)1346 static int vcn_v1_0_wait_for_idle(void *handle)
1347 {
1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1349 int ret;
1350
1351 ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
1352 UVD_STATUS__IDLE);
1353
1354 return ret;
1355 }
1356
vcn_v1_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1357 static int vcn_v1_0_set_clockgating_state(void *handle,
1358 enum amd_clockgating_state state)
1359 {
1360 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1361 bool enable = (state == AMD_CG_STATE_GATE);
1362
1363 if (enable) {
1364 /* wait for STATUS to clear */
1365 if (!vcn_v1_0_is_idle(handle))
1366 return -EBUSY;
1367 vcn_v1_0_enable_clock_gating(adev);
1368 } else {
1369 /* disable HW gating and enable Sw gating */
1370 vcn_v1_0_disable_clock_gating(adev);
1371 }
1372 return 0;
1373 }
1374
1375 /**
1376 * vcn_v1_0_dec_ring_get_rptr - get read pointer
1377 *
1378 * @ring: amdgpu_ring pointer
1379 *
1380 * Returns the current hardware read pointer
1381 */
vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring * ring)1382 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
1383 {
1384 struct amdgpu_device *adev = ring->adev;
1385
1386 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
1387 }
1388
1389 /**
1390 * vcn_v1_0_dec_ring_get_wptr - get write pointer
1391 *
1392 * @ring: amdgpu_ring pointer
1393 *
1394 * Returns the current hardware write pointer
1395 */
vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring * ring)1396 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
1397 {
1398 struct amdgpu_device *adev = ring->adev;
1399
1400 return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
1401 }
1402
1403 /**
1404 * vcn_v1_0_dec_ring_set_wptr - set write pointer
1405 *
1406 * @ring: amdgpu_ring pointer
1407 *
1408 * Commits the write pointer to the hardware
1409 */
vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring * ring)1410 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
1411 {
1412 struct amdgpu_device *adev = ring->adev;
1413
1414 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
1415 WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
1416 lower_32_bits(ring->wptr) | 0x80000000);
1417
1418 WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1419 }
1420
1421 /**
1422 * vcn_v1_0_dec_ring_insert_start - insert a start command
1423 *
1424 * @ring: amdgpu_ring pointer
1425 *
1426 * Write a start command to the ring.
1427 */
vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring * ring)1428 static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
1429 {
1430 struct amdgpu_device *adev = ring->adev;
1431
1432 amdgpu_ring_write(ring,
1433 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1434 amdgpu_ring_write(ring, 0);
1435 amdgpu_ring_write(ring,
1436 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1437 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1);
1438 }
1439
1440 /**
1441 * vcn_v1_0_dec_ring_insert_end - insert a end command
1442 *
1443 * @ring: amdgpu_ring pointer
1444 *
1445 * Write a end command to the ring.
1446 */
vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring * ring)1447 static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
1448 {
1449 struct amdgpu_device *adev = ring->adev;
1450
1451 amdgpu_ring_write(ring,
1452 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1453 amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
1454 }
1455
1456 /**
1457 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
1458 *
1459 * @ring: amdgpu_ring pointer
1460 * @addr: address
1461 * @seq: sequence number
1462 * @flags: fence related flags
1463 *
1464 * Write a fence and a trap command to the ring.
1465 */
vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1466 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1467 unsigned flags)
1468 {
1469 struct amdgpu_device *adev = ring->adev;
1470
1471 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1472
1473 amdgpu_ring_write(ring,
1474 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
1475 amdgpu_ring_write(ring, seq);
1476 amdgpu_ring_write(ring,
1477 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1478 amdgpu_ring_write(ring, addr & 0xffffffff);
1479 amdgpu_ring_write(ring,
1480 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1481 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
1482 amdgpu_ring_write(ring,
1483 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1484 amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1);
1485
1486 amdgpu_ring_write(ring,
1487 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1488 amdgpu_ring_write(ring, 0);
1489 amdgpu_ring_write(ring,
1490 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1491 amdgpu_ring_write(ring, 0);
1492 amdgpu_ring_write(ring,
1493 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1494 amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
1495 }
1496
1497 /**
1498 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
1499 *
1500 * @ring: amdgpu_ring pointer
1501 * @job: job to retrieve vmid from
1502 * @ib: indirect buffer to execute
1503 * @flags: unused
1504 *
1505 * Write ring commands to execute the indirect buffer
1506 */
vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1507 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
1508 struct amdgpu_job *job,
1509 struct amdgpu_ib *ib,
1510 uint32_t flags)
1511 {
1512 struct amdgpu_device *adev = ring->adev;
1513 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1514
1515 amdgpu_ring_write(ring,
1516 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
1517 amdgpu_ring_write(ring, vmid);
1518
1519 amdgpu_ring_write(ring,
1520 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
1521 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1522 amdgpu_ring_write(ring,
1523 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
1524 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1525 amdgpu_ring_write(ring,
1526 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
1527 amdgpu_ring_write(ring, ib->length_dw);
1528 }
1529
vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1530 static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
1531 uint32_t reg, uint32_t val,
1532 uint32_t mask)
1533 {
1534 struct amdgpu_device *adev = ring->adev;
1535
1536 amdgpu_ring_write(ring,
1537 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1538 amdgpu_ring_write(ring, reg << 2);
1539 amdgpu_ring_write(ring,
1540 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1541 amdgpu_ring_write(ring, val);
1542 amdgpu_ring_write(ring,
1543 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
1544 amdgpu_ring_write(ring, mask);
1545 amdgpu_ring_write(ring,
1546 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1547 amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1);
1548 }
1549
vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1550 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
1551 unsigned vmid, uint64_t pd_addr)
1552 {
1553 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1554 uint32_t data0, data1, mask;
1555
1556 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1557
1558 /* wait for register write */
1559 data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
1560 data1 = lower_32_bits(pd_addr);
1561 mask = 0xffffffff;
1562 vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
1563 }
1564
vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1565 static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
1566 uint32_t reg, uint32_t val)
1567 {
1568 struct amdgpu_device *adev = ring->adev;
1569
1570 amdgpu_ring_write(ring,
1571 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
1572 amdgpu_ring_write(ring, reg << 2);
1573 amdgpu_ring_write(ring,
1574 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
1575 amdgpu_ring_write(ring, val);
1576 amdgpu_ring_write(ring,
1577 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
1578 amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
1579 }
1580
1581 /**
1582 * vcn_v1_0_enc_ring_get_rptr - get enc read pointer
1583 *
1584 * @ring: amdgpu_ring pointer
1585 *
1586 * Returns the current hardware enc read pointer
1587 */
vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring * ring)1588 static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
1589 {
1590 struct amdgpu_device *adev = ring->adev;
1591
1592 if (ring == &adev->vcn.inst->ring_enc[0])
1593 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
1594 else
1595 return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
1596 }
1597
1598 /**
1599 * vcn_v1_0_enc_ring_get_wptr - get enc write pointer
1600 *
1601 * @ring: amdgpu_ring pointer
1602 *
1603 * Returns the current hardware enc write pointer
1604 */
vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring * ring)1605 static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
1606 {
1607 struct amdgpu_device *adev = ring->adev;
1608
1609 if (ring == &adev->vcn.inst->ring_enc[0])
1610 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
1611 else
1612 return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
1613 }
1614
1615 /**
1616 * vcn_v1_0_enc_ring_set_wptr - set enc write pointer
1617 *
1618 * @ring: amdgpu_ring pointer
1619 *
1620 * Commits the enc write pointer to the hardware
1621 */
vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring * ring)1622 static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
1623 {
1624 struct amdgpu_device *adev = ring->adev;
1625
1626 if (ring == &adev->vcn.inst->ring_enc[0])
1627 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR,
1628 lower_32_bits(ring->wptr));
1629 else
1630 WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2,
1631 lower_32_bits(ring->wptr));
1632 }
1633
1634 /**
1635 * vcn_v1_0_enc_ring_emit_fence - emit an enc fence & trap command
1636 *
1637 * @ring: amdgpu_ring pointer
1638 * @addr: address
1639 * @seq: sequence number
1640 * @flags: fence related flags
1641 *
1642 * Write enc a fence and a trap command to the ring.
1643 */
vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)1644 static void vcn_v1_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
1645 u64 seq, unsigned flags)
1646 {
1647 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1648
1649 amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
1650 amdgpu_ring_write(ring, addr);
1651 amdgpu_ring_write(ring, upper_32_bits(addr));
1652 amdgpu_ring_write(ring, seq);
1653 amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
1654 }
1655
vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring * ring)1656 static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1657 {
1658 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
1659 }
1660
1661 /**
1662 * vcn_v1_0_enc_ring_emit_ib - enc execute indirect buffer
1663 *
1664 * @ring: amdgpu_ring pointer
1665 * @job: job to retrive vmid from
1666 * @ib: indirect buffer to execute
1667 * @flags: unused
1668 *
1669 * Write enc ring commands to execute the indirect buffer
1670 */
vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1671 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1672 struct amdgpu_job *job,
1673 struct amdgpu_ib *ib,
1674 uint32_t flags)
1675 {
1676 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1677
1678 amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
1679 amdgpu_ring_write(ring, vmid);
1680 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1681 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1682 amdgpu_ring_write(ring, ib->length_dw);
1683 }
1684
vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)1685 static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
1686 uint32_t reg, uint32_t val,
1687 uint32_t mask)
1688 {
1689 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
1690 amdgpu_ring_write(ring, reg << 2);
1691 amdgpu_ring_write(ring, mask);
1692 amdgpu_ring_write(ring, val);
1693 }
1694
vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1695 static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1696 unsigned int vmid, uint64_t pd_addr)
1697 {
1698 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
1699
1700 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1701
1702 /* wait for reg writes */
1703 vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
1704 vmid * hub->ctx_addr_distance,
1705 lower_32_bits(pd_addr), 0xffffffff);
1706 }
1707
vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1708 static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
1709 uint32_t reg, uint32_t val)
1710 {
1711 amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
1712 amdgpu_ring_write(ring, reg << 2);
1713 amdgpu_ring_write(ring, val);
1714 }
1715
vcn_v1_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1716 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
1717 struct amdgpu_irq_src *source,
1718 unsigned type,
1719 enum amdgpu_interrupt_state state)
1720 {
1721 return 0;
1722 }
1723
vcn_v1_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1724 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
1725 struct amdgpu_irq_src *source,
1726 struct amdgpu_iv_entry *entry)
1727 {
1728 DRM_DEBUG("IH: VCN TRAP\n");
1729
1730 switch (entry->src_id) {
1731 case 124:
1732 amdgpu_fence_process(&adev->vcn.inst->ring_dec);
1733 break;
1734 case 119:
1735 amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
1736 break;
1737 case 120:
1738 amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
1739 break;
1740 default:
1741 DRM_ERROR("Unhandled interrupt: %d %d\n",
1742 entry->src_id, entry->src_data[0]);
1743 break;
1744 }
1745
1746 return 0;
1747 }
1748
vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1749 static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1750 {
1751 struct amdgpu_device *adev = ring->adev;
1752 int i;
1753
1754 WARN_ON(ring->wptr % 2 || count % 2);
1755
1756 for (i = 0; i < count / 2; i++) {
1757 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
1758 amdgpu_ring_write(ring, 0);
1759 }
1760 }
1761
vcn_v1_0_set_powergating_state(void * handle,enum amd_powergating_state state)1762 static int vcn_v1_0_set_powergating_state(void *handle,
1763 enum amd_powergating_state state)
1764 {
1765 /* This doesn't actually powergate the VCN block.
1766 * That's done in the dpm code via the SMC. This
1767 * just re-inits the block as necessary. The actual
1768 * gating still happens in the dpm code. We should
1769 * revisit this when there is a cleaner line between
1770 * the smc and the hw blocks
1771 */
1772 int ret;
1773 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1774
1775 if (state == adev->vcn.cur_state)
1776 return 0;
1777
1778 if (state == AMD_PG_STATE_GATE)
1779 ret = vcn_v1_0_stop(adev);
1780 else
1781 ret = vcn_v1_0_start(adev);
1782
1783 if (!ret)
1784 adev->vcn.cur_state = state;
1785 return ret;
1786 }
1787
vcn_v1_0_idle_work_handler(struct work_struct * work)1788 static void vcn_v1_0_idle_work_handler(struct work_struct *work)
1789 {
1790 struct amdgpu_device *adev =
1791 container_of(work, struct amdgpu_device, vcn.idle_work.work);
1792 unsigned int fences = 0, i;
1793
1794 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1795 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1796
1797 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1798 struct dpg_pause_state new_state;
1799
1800 if (fences)
1801 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1802 else
1803 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1804
1805 if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec))
1806 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1807 else
1808 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1809
1810 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1811 }
1812
1813 fences += amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec);
1814 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1815
1816 if (fences == 0) {
1817 amdgpu_gfx_off_ctrl(adev, true);
1818 if (adev->pm.dpm_enabled)
1819 amdgpu_dpm_enable_uvd(adev, false);
1820 else
1821 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1822 AMD_PG_STATE_GATE);
1823 } else {
1824 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1825 }
1826 }
1827
vcn_v1_0_ring_begin_use(struct amdgpu_ring * ring)1828 static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
1829 {
1830 struct amdgpu_device *adev = ring->adev;
1831 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
1832
1833 mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
1834
1835 if (amdgpu_fence_wait_empty(ring->adev->jpeg.inst->ring_dec))
1836 DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
1837
1838 vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
1839
1840 }
1841
vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring * ring,bool set_clocks)1842 void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
1843 {
1844 struct amdgpu_device *adev = ring->adev;
1845
1846 if (set_clocks) {
1847 amdgpu_gfx_off_ctrl(adev, false);
1848 if (adev->pm.dpm_enabled)
1849 amdgpu_dpm_enable_uvd(adev, true);
1850 else
1851 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
1852 AMD_PG_STATE_UNGATE);
1853 }
1854
1855 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1856 struct dpg_pause_state new_state;
1857 unsigned int fences = 0, i;
1858
1859 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
1860 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1861
1862 if (fences)
1863 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1864 else
1865 new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
1866
1867 if (amdgpu_fence_count_emitted(adev->jpeg.inst->ring_dec))
1868 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1869 else
1870 new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
1871
1872 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
1873 new_state.fw_based = VCN_DPG_STATE__PAUSE;
1874 else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
1875 new_state.jpeg = VCN_DPG_STATE__PAUSE;
1876
1877 adev->vcn.pause_dpg_mode(adev, 0, &new_state);
1878 }
1879 }
1880
vcn_v1_0_ring_end_use(struct amdgpu_ring * ring)1881 void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
1882 {
1883 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
1884 mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
1885 }
1886
1887 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
1888 .name = "vcn_v1_0",
1889 .early_init = vcn_v1_0_early_init,
1890 .late_init = NULL,
1891 .sw_init = vcn_v1_0_sw_init,
1892 .sw_fini = vcn_v1_0_sw_fini,
1893 .hw_init = vcn_v1_0_hw_init,
1894 .hw_fini = vcn_v1_0_hw_fini,
1895 .suspend = vcn_v1_0_suspend,
1896 .resume = vcn_v1_0_resume,
1897 .is_idle = vcn_v1_0_is_idle,
1898 .wait_for_idle = vcn_v1_0_wait_for_idle,
1899 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
1900 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
1901 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
1902 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
1903 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
1904 .set_powergating_state = vcn_v1_0_set_powergating_state,
1905 };
1906
1907 /*
1908 * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
1909 * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
1910 * before command submission as a workaround.
1911 */
vcn_v1_0_validate_bo(struct amdgpu_cs_parser * parser,struct amdgpu_job * job,uint64_t addr)1912 static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
1913 struct amdgpu_job *job,
1914 uint64_t addr)
1915 {
1916 struct ttm_operation_ctx ctx = { false, false };
1917 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1918 struct amdgpu_vm *vm = &fpriv->vm;
1919 struct amdgpu_bo_va_mapping *mapping;
1920 struct amdgpu_bo *bo;
1921 int r;
1922
1923 addr &= AMDGPU_GMC_HOLE_MASK;
1924 if (addr & 0x7) {
1925 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1926 return -EINVAL;
1927 }
1928
1929 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
1930 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1931 return -EINVAL;
1932
1933 bo = mapping->bo_va->base.bo;
1934 if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
1935 return 0;
1936
1937 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
1938 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1939 if (r) {
1940 DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
1941 return r;
1942 }
1943
1944 return r;
1945 }
1946
vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)1947 static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1948 struct amdgpu_job *job,
1949 struct amdgpu_ib *ib)
1950 {
1951 uint32_t msg_lo = 0, msg_hi = 0;
1952 int i, r;
1953
1954 if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
1955 return 0;
1956
1957 for (i = 0; i < ib->length_dw; i += 2) {
1958 uint32_t reg = amdgpu_ib_get_value(ib, i);
1959 uint32_t val = amdgpu_ib_get_value(ib, i + 1);
1960
1961 if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
1962 msg_lo = val;
1963 } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
1964 msg_hi = val;
1965 } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
1966 r = vcn_v1_0_validate_bo(p, job,
1967 ((u64)msg_hi) << 32 | msg_lo);
1968 if (r)
1969 return r;
1970 }
1971 }
1972
1973 return 0;
1974 }
1975
1976 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
1977 .type = AMDGPU_RING_TYPE_VCN_DEC,
1978 .align_mask = 0xf,
1979 .support_64bit_ptrs = false,
1980 .no_user_fence = true,
1981 .secure_submission_supported = true,
1982 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
1983 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
1984 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
1985 .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
1986 .emit_frame_size =
1987 6 + 6 + /* hdp invalidate / flush */
1988 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1989 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1990 8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
1991 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
1992 6,
1993 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
1994 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
1995 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
1996 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
1997 .test_ring = amdgpu_vcn_dec_ring_test_ring,
1998 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1999 .insert_nop = vcn_v1_0_dec_ring_insert_nop,
2000 .insert_start = vcn_v1_0_dec_ring_insert_start,
2001 .insert_end = vcn_v1_0_dec_ring_insert_end,
2002 .pad_ib = amdgpu_ring_generic_pad_ib,
2003 .begin_use = vcn_v1_0_ring_begin_use,
2004 .end_use = vcn_v1_0_ring_end_use,
2005 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
2006 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
2007 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2008 };
2009
2010 static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
2011 .type = AMDGPU_RING_TYPE_VCN_ENC,
2012 .align_mask = 0x3f,
2013 .nop = VCN_ENC_CMD_NO_OP,
2014 .support_64bit_ptrs = false,
2015 .no_user_fence = true,
2016 .get_rptr = vcn_v1_0_enc_ring_get_rptr,
2017 .get_wptr = vcn_v1_0_enc_ring_get_wptr,
2018 .set_wptr = vcn_v1_0_enc_ring_set_wptr,
2019 .emit_frame_size =
2020 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2021 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
2022 4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
2023 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
2024 1, /* vcn_v1_0_enc_ring_insert_end */
2025 .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
2026 .emit_ib = vcn_v1_0_enc_ring_emit_ib,
2027 .emit_fence = vcn_v1_0_enc_ring_emit_fence,
2028 .emit_vm_flush = vcn_v1_0_enc_ring_emit_vm_flush,
2029 .test_ring = amdgpu_vcn_enc_ring_test_ring,
2030 .test_ib = amdgpu_vcn_enc_ring_test_ib,
2031 .insert_nop = amdgpu_ring_insert_nop,
2032 .insert_end = vcn_v1_0_enc_ring_insert_end,
2033 .pad_ib = amdgpu_ring_generic_pad_ib,
2034 .begin_use = vcn_v1_0_ring_begin_use,
2035 .end_use = vcn_v1_0_ring_end_use,
2036 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
2037 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
2038 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2039 };
2040
vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device * adev)2041 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
2042 {
2043 adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
2044 DRM_INFO("VCN decode is enabled in VM mode\n");
2045 }
2046
vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device * adev)2047 static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2048 {
2049 int i;
2050
2051 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
2052 adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs;
2053
2054 DRM_INFO("VCN encode is enabled in VM mode\n");
2055 }
2056
2057 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
2058 .set = vcn_v1_0_set_interrupt_state,
2059 .process = vcn_v1_0_process_interrupt,
2060 };
2061
vcn_v1_0_set_irq_funcs(struct amdgpu_device * adev)2062 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
2063 {
2064 adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2;
2065 adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs;
2066 }
2067
2068 const struct amdgpu_ip_block_version vcn_v1_0_ip_block = {
2069 .type = AMD_IP_BLOCK_TYPE_VCN,
2070 .major = 1,
2071 .minor = 0,
2072 .rev = 0,
2073 .funcs = &vcn_v1_0_ip_funcs,
2074 };
2075