1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26
27 #include <linux/firmware.h>
28 #include <linux/module.h>
29 #include <drm/drmP.h>
30 #include <drm/drm.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_pm.h"
34 #include "amdgpu_vcn.h"
35 #include "soc15d.h"
36 #include "soc15_common.h"
37
38 #include "vcn/vcn_1_0_offset.h"
39
40 /* 1 second timeout */
41 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
45
46 MODULE_FIRMWARE(FIRMWARE_RAVEN);
47
48 static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
49
amdgpu_vcn_sw_init(struct amdgpu_device * adev)50 int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
51 {
52 unsigned long bo_size;
53 const char *fw_name;
54 const struct common_firmware_header *hdr;
55 unsigned char fw_check;
56 int r;
57
58 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
59
60 switch (adev->asic_type) {
61 case CHIP_RAVEN:
62 fw_name = FIRMWARE_RAVEN;
63 break;
64 default:
65 return -EINVAL;
66 }
67
68 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
69 if (r) {
70 dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
71 fw_name);
72 return r;
73 }
74
75 r = amdgpu_ucode_validate(adev->vcn.fw);
76 if (r) {
77 dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
78 fw_name);
79 release_firmware(adev->vcn.fw);
80 adev->vcn.fw = NULL;
81 return r;
82 }
83
84 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
85 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
86
87 /* Bit 20-23, it is encode major and non-zero for new naming convention.
88 * This field is part of version minor and DRM_DISABLED_FLAG in old naming
89 * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
90 * is zero in old naming convention, this field is always zero so far.
91 * These four bits are used to tell which naming convention is present.
92 */
93 fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
94 if (fw_check) {
95 unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
96
97 fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
98 enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
99 enc_major = fw_check;
100 dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
101 vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
102 DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
103 enc_major, enc_minor, dec_ver, vep, fw_rev);
104 } else {
105 unsigned int version_major, version_minor, family_id;
106
107 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
108 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
109 version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
110 DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
111 version_major, version_minor, family_id);
112 }
113
114 bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
115 + AMDGPU_VCN_SESSION_SIZE * 40;
116 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
117 bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
118 r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
119 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
120 &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
121 if (r) {
122 dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
123 return r;
124 }
125
126 return 0;
127 }
128
amdgpu_vcn_sw_fini(struct amdgpu_device * adev)129 int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
130 {
131 int i;
132
133 kvfree(adev->vcn.saved_bo);
134
135 amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
136 &adev->vcn.gpu_addr,
137 (void **)&adev->vcn.cpu_addr);
138
139 amdgpu_ring_fini(&adev->vcn.ring_dec);
140
141 for (i = 0; i < adev->vcn.num_enc_rings; ++i)
142 amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
143
144 amdgpu_ring_fini(&adev->vcn.ring_jpeg);
145
146 release_firmware(adev->vcn.fw);
147
148 return 0;
149 }
150
amdgpu_vcn_suspend(struct amdgpu_device * adev)151 int amdgpu_vcn_suspend(struct amdgpu_device *adev)
152 {
153 unsigned size;
154 void *ptr;
155
156 cancel_delayed_work_sync(&adev->vcn.idle_work);
157
158 if (adev->vcn.vcpu_bo == NULL)
159 return 0;
160
161 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
162 ptr = adev->vcn.cpu_addr;
163
164 adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
165 if (!adev->vcn.saved_bo)
166 return -ENOMEM;
167
168 memcpy_fromio(adev->vcn.saved_bo, ptr, size);
169
170 return 0;
171 }
172
amdgpu_vcn_resume(struct amdgpu_device * adev)173 int amdgpu_vcn_resume(struct amdgpu_device *adev)
174 {
175 unsigned size;
176 void *ptr;
177
178 if (adev->vcn.vcpu_bo == NULL)
179 return -EINVAL;
180
181 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
182 ptr = adev->vcn.cpu_addr;
183
184 if (adev->vcn.saved_bo != NULL) {
185 memcpy_toio(ptr, adev->vcn.saved_bo, size);
186 kvfree(adev->vcn.saved_bo);
187 adev->vcn.saved_bo = NULL;
188 } else {
189 const struct common_firmware_header *hdr;
190 unsigned offset;
191
192 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
193 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
194 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
195 memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
196 le32_to_cpu(hdr->ucode_size_bytes));
197 size -= le32_to_cpu(hdr->ucode_size_bytes);
198 ptr += le32_to_cpu(hdr->ucode_size_bytes);
199 }
200 memset_io(ptr, 0, size);
201 }
202
203 return 0;
204 }
205
amdgpu_vcn_idle_work_handler(struct work_struct * work)206 static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
207 {
208 struct amdgpu_device *adev =
209 container_of(work, struct amdgpu_device, vcn.idle_work.work);
210 unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
211 unsigned i;
212
213 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
214 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
215 }
216
217 fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
218
219 if (fences == 0) {
220 if (adev->pm.dpm_enabled)
221 amdgpu_dpm_enable_uvd(adev, false);
222 else
223 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
224 AMD_PG_STATE_GATE);
225 } else {
226 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
227 }
228 }
229
amdgpu_vcn_ring_begin_use(struct amdgpu_ring * ring)230 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
231 {
232 struct amdgpu_device *adev = ring->adev;
233 bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
234
235 if (set_clocks) {
236 if (adev->pm.dpm_enabled)
237 amdgpu_dpm_enable_uvd(adev, true);
238 else
239 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
240 AMD_PG_STATE_UNGATE);
241 }
242 }
243
amdgpu_vcn_ring_end_use(struct amdgpu_ring * ring)244 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
245 {
246 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
247 }
248
amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring * ring)249 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
250 {
251 struct amdgpu_device *adev = ring->adev;
252 uint32_t tmp = 0;
253 unsigned i;
254 int r;
255
256 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
257 r = amdgpu_ring_alloc(ring, 3);
258 if (r) {
259 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
260 ring->idx, r);
261 return r;
262 }
263 amdgpu_ring_write(ring,
264 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
265 amdgpu_ring_write(ring, 0xDEADBEEF);
266 amdgpu_ring_commit(ring);
267 for (i = 0; i < adev->usec_timeout; i++) {
268 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
269 if (tmp == 0xDEADBEEF)
270 break;
271 DRM_UDELAY(1);
272 }
273
274 if (i < adev->usec_timeout) {
275 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
276 ring->idx, i);
277 } else {
278 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
279 ring->idx, tmp);
280 r = -EINVAL;
281 }
282 return r;
283 }
284
amdgpu_vcn_dec_send_msg(struct amdgpu_ring * ring,struct amdgpu_bo * bo,struct dma_fence ** fence)285 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
286 struct amdgpu_bo *bo,
287 struct dma_fence **fence)
288 {
289 struct amdgpu_device *adev = ring->adev;
290 struct dma_fence *f = NULL;
291 struct amdgpu_job *job;
292 struct amdgpu_ib *ib;
293 uint64_t addr;
294 int i, r;
295
296 r = amdgpu_job_alloc_with_ib(adev, 64, &job);
297 if (r)
298 goto err;
299
300 ib = &job->ibs[0];
301 addr = amdgpu_bo_gpu_offset(bo);
302 ib->ptr[0] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0);
303 ib->ptr[1] = addr;
304 ib->ptr[2] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0);
305 ib->ptr[3] = addr >> 32;
306 ib->ptr[4] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0);
307 ib->ptr[5] = 0;
308 for (i = 6; i < 16; i += 2) {
309 ib->ptr[i] = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0);
310 ib->ptr[i+1] = 0;
311 }
312 ib->length_dw = 16;
313
314 r = amdgpu_job_submit_direct(job, ring, &f);
315 if (r)
316 goto err_free;
317
318 amdgpu_bo_fence(bo, f, false);
319 amdgpu_bo_unreserve(bo);
320 amdgpu_bo_unref(&bo);
321
322 if (fence)
323 *fence = dma_fence_get(f);
324 dma_fence_put(f);
325
326 return 0;
327
328 err_free:
329 amdgpu_job_free(job);
330
331 err:
332 amdgpu_bo_unreserve(bo);
333 amdgpu_bo_unref(&bo);
334 return r;
335 }
336
amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)337 static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
338 struct dma_fence **fence)
339 {
340 struct amdgpu_device *adev = ring->adev;
341 struct amdgpu_bo *bo = NULL;
342 uint32_t *msg;
343 int r, i;
344
345 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
346 AMDGPU_GEM_DOMAIN_VRAM,
347 &bo, NULL, (void **)&msg);
348 if (r)
349 return r;
350
351 msg[0] = cpu_to_le32(0x00000028);
352 msg[1] = cpu_to_le32(0x00000038);
353 msg[2] = cpu_to_le32(0x00000001);
354 msg[3] = cpu_to_le32(0x00000000);
355 msg[4] = cpu_to_le32(handle);
356 msg[5] = cpu_to_le32(0x00000000);
357 msg[6] = cpu_to_le32(0x00000001);
358 msg[7] = cpu_to_le32(0x00000028);
359 msg[8] = cpu_to_le32(0x00000010);
360 msg[9] = cpu_to_le32(0x00000000);
361 msg[10] = cpu_to_le32(0x00000007);
362 msg[11] = cpu_to_le32(0x00000000);
363 msg[12] = cpu_to_le32(0x00000780);
364 msg[13] = cpu_to_le32(0x00000440);
365 for (i = 14; i < 1024; ++i)
366 msg[i] = cpu_to_le32(0x0);
367
368 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
369 }
370
amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)371 static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
372 struct dma_fence **fence)
373 {
374 struct amdgpu_device *adev = ring->adev;
375 struct amdgpu_bo *bo = NULL;
376 uint32_t *msg;
377 int r, i;
378
379 r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
380 AMDGPU_GEM_DOMAIN_VRAM,
381 &bo, NULL, (void **)&msg);
382 if (r)
383 return r;
384
385 msg[0] = cpu_to_le32(0x00000028);
386 msg[1] = cpu_to_le32(0x00000018);
387 msg[2] = cpu_to_le32(0x00000000);
388 msg[3] = cpu_to_le32(0x00000002);
389 msg[4] = cpu_to_le32(handle);
390 msg[5] = cpu_to_le32(0x00000000);
391 for (i = 6; i < 1024; ++i)
392 msg[i] = cpu_to_le32(0x0);
393
394 return amdgpu_vcn_dec_send_msg(ring, bo, fence);
395 }
396
amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring * ring,long timeout)397 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
398 {
399 struct dma_fence *fence;
400 long r;
401
402 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
403 if (r) {
404 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
405 goto error;
406 }
407
408 r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
409 if (r) {
410 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
411 goto error;
412 }
413
414 r = dma_fence_wait_timeout(fence, false, timeout);
415 if (r == 0) {
416 DRM_ERROR("amdgpu: IB test timed out.\n");
417 r = -ETIMEDOUT;
418 } else if (r < 0) {
419 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
420 } else {
421 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
422 r = 0;
423 }
424
425 dma_fence_put(fence);
426
427 error:
428 return r;
429 }
430
amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring * ring)431 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
432 {
433 struct amdgpu_device *adev = ring->adev;
434 uint32_t rptr = amdgpu_ring_get_rptr(ring);
435 unsigned i;
436 int r;
437
438 r = amdgpu_ring_alloc(ring, 16);
439 if (r) {
440 DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
441 ring->idx, r);
442 return r;
443 }
444 amdgpu_ring_write(ring, VCN_ENC_CMD_END);
445 amdgpu_ring_commit(ring);
446
447 for (i = 0; i < adev->usec_timeout; i++) {
448 if (amdgpu_ring_get_rptr(ring) != rptr)
449 break;
450 DRM_UDELAY(1);
451 }
452
453 if (i < adev->usec_timeout) {
454 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
455 ring->idx, i);
456 } else {
457 DRM_ERROR("amdgpu: ring %d test failed\n",
458 ring->idx);
459 r = -ETIMEDOUT;
460 }
461
462 return r;
463 }
464
amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)465 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
466 struct dma_fence **fence)
467 {
468 const unsigned ib_size_dw = 16;
469 struct amdgpu_job *job;
470 struct amdgpu_ib *ib;
471 struct dma_fence *f = NULL;
472 uint64_t dummy;
473 int i, r;
474
475 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
476 if (r)
477 return r;
478
479 ib = &job->ibs[0];
480 dummy = ib->gpu_addr + 1024;
481
482 ib->length_dw = 0;
483 ib->ptr[ib->length_dw++] = 0x00000018;
484 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
485 ib->ptr[ib->length_dw++] = handle;
486 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
487 ib->ptr[ib->length_dw++] = dummy;
488 ib->ptr[ib->length_dw++] = 0x0000000b;
489
490 ib->ptr[ib->length_dw++] = 0x00000014;
491 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
492 ib->ptr[ib->length_dw++] = 0x0000001c;
493 ib->ptr[ib->length_dw++] = 0x00000000;
494 ib->ptr[ib->length_dw++] = 0x00000000;
495
496 ib->ptr[ib->length_dw++] = 0x00000008;
497 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
498
499 for (i = ib->length_dw; i < ib_size_dw; ++i)
500 ib->ptr[i] = 0x0;
501
502 r = amdgpu_job_submit_direct(job, ring, &f);
503 if (r)
504 goto err;
505
506 if (fence)
507 *fence = dma_fence_get(f);
508 dma_fence_put(f);
509
510 return 0;
511
512 err:
513 amdgpu_job_free(job);
514 return r;
515 }
516
amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)517 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
518 struct dma_fence **fence)
519 {
520 const unsigned ib_size_dw = 16;
521 struct amdgpu_job *job;
522 struct amdgpu_ib *ib;
523 struct dma_fence *f = NULL;
524 uint64_t dummy;
525 int i, r;
526
527 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
528 if (r)
529 return r;
530
531 ib = &job->ibs[0];
532 dummy = ib->gpu_addr + 1024;
533
534 ib->length_dw = 0;
535 ib->ptr[ib->length_dw++] = 0x00000018;
536 ib->ptr[ib->length_dw++] = 0x00000001;
537 ib->ptr[ib->length_dw++] = handle;
538 ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
539 ib->ptr[ib->length_dw++] = dummy;
540 ib->ptr[ib->length_dw++] = 0x0000000b;
541
542 ib->ptr[ib->length_dw++] = 0x00000014;
543 ib->ptr[ib->length_dw++] = 0x00000002;
544 ib->ptr[ib->length_dw++] = 0x0000001c;
545 ib->ptr[ib->length_dw++] = 0x00000000;
546 ib->ptr[ib->length_dw++] = 0x00000000;
547
548 ib->ptr[ib->length_dw++] = 0x00000008;
549 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
550
551 for (i = ib->length_dw; i < ib_size_dw; ++i)
552 ib->ptr[i] = 0x0;
553
554 r = amdgpu_job_submit_direct(job, ring, &f);
555 if (r)
556 goto err;
557
558 if (fence)
559 *fence = dma_fence_get(f);
560 dma_fence_put(f);
561
562 return 0;
563
564 err:
565 amdgpu_job_free(job);
566 return r;
567 }
568
amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)569 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
570 {
571 struct dma_fence *fence = NULL;
572 long r;
573
574 r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
575 if (r) {
576 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
577 goto error;
578 }
579
580 r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
581 if (r) {
582 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
583 goto error;
584 }
585
586 r = dma_fence_wait_timeout(fence, false, timeout);
587 if (r == 0) {
588 DRM_ERROR("amdgpu: IB test timed out.\n");
589 r = -ETIMEDOUT;
590 } else if (r < 0) {
591 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
592 } else {
593 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
594 r = 0;
595 }
596 error:
597 dma_fence_put(fence);
598 return r;
599 }
600
amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring * ring)601 int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
602 {
603 struct amdgpu_device *adev = ring->adev;
604 uint32_t tmp = 0;
605 unsigned i;
606 int r;
607
608 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD);
609 r = amdgpu_ring_alloc(ring, 3);
610
611 if (r) {
612 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
613 ring->idx, r);
614 return r;
615 }
616
617 amdgpu_ring_write(ring,
618 PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0));
619 amdgpu_ring_write(ring, 0xDEADBEEF);
620 amdgpu_ring_commit(ring);
621
622 for (i = 0; i < adev->usec_timeout; i++) {
623 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID));
624 if (tmp == 0xDEADBEEF)
625 break;
626 DRM_UDELAY(1);
627 }
628
629 if (i < adev->usec_timeout) {
630 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
631 ring->idx, i);
632 } else {
633 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
634 ring->idx, tmp);
635 r = -EINVAL;
636 }
637
638 return r;
639 }
640
amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)641 static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle,
642 struct dma_fence **fence)
643 {
644 struct amdgpu_device *adev = ring->adev;
645 struct amdgpu_job *job;
646 struct amdgpu_ib *ib;
647 struct dma_fence *f = NULL;
648 const unsigned ib_size_dw = 16;
649 int i, r;
650
651 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
652 if (r)
653 return r;
654
655 ib = &job->ibs[0];
656
657 ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0);
658 ib->ptr[1] = 0xDEADBEEF;
659 for (i = 2; i < 16; i += 2) {
660 ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
661 ib->ptr[i+1] = 0;
662 }
663 ib->length_dw = 16;
664
665 r = amdgpu_job_submit_direct(job, ring, &f);
666 if (r)
667 goto err;
668
669 if (fence)
670 *fence = dma_fence_get(f);
671 dma_fence_put(f);
672
673 return 0;
674
675 err:
676 amdgpu_job_free(job);
677 return r;
678 }
679
amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring * ring,long timeout)680 int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
681 {
682 struct amdgpu_device *adev = ring->adev;
683 uint32_t tmp = 0;
684 unsigned i;
685 struct dma_fence *fence = NULL;
686 long r = 0;
687
688 r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
689 if (r) {
690 DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
691 goto error;
692 }
693
694 r = dma_fence_wait_timeout(fence, false, timeout);
695 if (r == 0) {
696 DRM_ERROR("amdgpu: IB test timed out.\n");
697 r = -ETIMEDOUT;
698 goto error;
699 } else if (r < 0) {
700 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
701 goto error;
702 } else
703 r = 0;
704
705 for (i = 0; i < adev->usec_timeout; i++) {
706 tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH));
707 if (tmp == 0xDEADBEEF)
708 break;
709 DRM_UDELAY(1);
710 }
711
712 if (i < adev->usec_timeout)
713 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
714 else {
715 DRM_ERROR("ib test failed (0x%08X)\n", tmp);
716 r = -EINVAL;
717 }
718
719 dma_fence_put(fence);
720
721 error:
722 return r;
723 }
724