1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25 #include <linux/firmware.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "vid.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
34 #include "smu/smu_7_1_3_d.h"
35 #include "smu/smu_7_1_3_sh_mask.h"
36 #include "bif/bif_5_1_d.h"
37 #include "gmc/gmc_8_1_d.h"
38 #include "vi.h"
39 #include "ivsrcid/ivsrcid_vislands30.h"
40
41 /* Polaris10/11/12 firmware version */
42 #define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48 static int uvd_v6_0_start(struct amdgpu_device *adev);
49 static void uvd_v6_0_stop(struct amdgpu_device *adev);
50 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51 static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56 /**
57 * uvd_v6_0_enc_support - get encode support status
58 *
59 * @adev: amdgpu_device pointer
60 *
61 * Returns the current hardware encode support status
62 */
uvd_v6_0_enc_support(struct amdgpu_device * adev)63 static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64 {
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68 }
69
70 /**
71 * uvd_v6_0_ring_get_rptr - get read pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware read pointer
76 */
uvd_v6_0_ring_get_rptr(struct amdgpu_ring * ring)77 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78 {
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82 }
83
84 /**
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 *
87 * @ring: amdgpu_ring pointer
88 *
89 * Returns the current hardware enc read pointer
90 */
uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring * ring)91 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92 {
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99 }
100 /**
101 * uvd_v6_0_ring_get_wptr - get write pointer
102 *
103 * @ring: amdgpu_ring pointer
104 *
105 * Returns the current hardware write pointer
106 */
uvd_v6_0_ring_get_wptr(struct amdgpu_ring * ring)107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108 {
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112 }
113
114 /**
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Returns the current hardware enc write pointer
120 */
uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring * ring)121 static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122 {
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129 }
130
131 /**
132 * uvd_v6_0_ring_set_wptr - set write pointer
133 *
134 * @ring: amdgpu_ring pointer
135 *
136 * Commits the write pointer to the hardware
137 */
uvd_v6_0_ring_set_wptr(struct amdgpu_ring * ring)138 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139 {
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143 }
144
145 /**
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 *
148 * @ring: amdgpu_ring pointer
149 *
150 * Commits the enc write pointer to the hardware
151 */
uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring * ring)152 static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153 {
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162 }
163
164 /**
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 *
167 * @ring: the engine to test on
168 *
169 */
uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring * ring)170 static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171 {
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196 }
197
198 /**
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 *
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
203 * @bo: amdgpu object for which we query the offset
204 * @fence: optional fence to return
205 *
206 * Open up a stream for HW test
207 */
uvd_v6_0_enc_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_bo * bo,struct dma_fence ** fence)208 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
211 {
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
216 uint64_t addr;
217 int i, r;
218
219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
220 AMDGPU_IB_POOL_DIRECT, &job);
221 if (r)
222 return r;
223
224 ib = &job->ibs[0];
225 addr = amdgpu_bo_gpu_offset(bo);
226
227 ib->length_dw = 0;
228 ib->ptr[ib->length_dw++] = 0x00000018;
229 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 ib->ptr[ib->length_dw++] = handle;
231 ib->ptr[ib->length_dw++] = 0x00010000;
232 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 ib->ptr[ib->length_dw++] = addr;
234
235 ib->ptr[ib->length_dw++] = 0x00000014;
236 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 ib->ptr[ib->length_dw++] = 0x0000001c;
238 ib->ptr[ib->length_dw++] = 0x00000001;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240
241 ib->ptr[ib->length_dw++] = 0x00000008;
242 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
243
244 for (i = ib->length_dw; i < ib_size_dw; ++i)
245 ib->ptr[i] = 0x0;
246
247 r = amdgpu_job_submit_direct(job, ring, &f);
248 if (r)
249 goto err;
250
251 if (fence)
252 *fence = dma_fence_get(f);
253 dma_fence_put(f);
254 return 0;
255
256 err:
257 amdgpu_job_free(job);
258 return r;
259 }
260
261 /**
262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
263 *
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
266 * @bo: amdgpu object for which we query the offset
267 * @fence: optional fence to return
268 *
269 * Close up a stream for HW test or if userspace failed to do so
270 */
uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,struct amdgpu_bo * bo,struct dma_fence ** fence)271 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 uint32_t handle,
273 struct amdgpu_bo *bo,
274 struct dma_fence **fence)
275 {
276 const unsigned ib_size_dw = 16;
277 struct amdgpu_job *job;
278 struct amdgpu_ib *ib;
279 struct dma_fence *f = NULL;
280 uint64_t addr;
281 int i, r;
282
283 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
284 AMDGPU_IB_POOL_DIRECT, &job);
285 if (r)
286 return r;
287
288 ib = &job->ibs[0];
289 addr = amdgpu_bo_gpu_offset(bo);
290
291 ib->length_dw = 0;
292 ib->ptr[ib->length_dw++] = 0x00000018;
293 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 ib->ptr[ib->length_dw++] = handle;
295 ib->ptr[ib->length_dw++] = 0x00010000;
296 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 ib->ptr[ib->length_dw++] = addr;
298
299 ib->ptr[ib->length_dw++] = 0x00000014;
300 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 ib->ptr[ib->length_dw++] = 0x0000001c;
302 ib->ptr[ib->length_dw++] = 0x00000001;
303 ib->ptr[ib->length_dw++] = 0x00000000;
304
305 ib->ptr[ib->length_dw++] = 0x00000008;
306 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
307
308 for (i = ib->length_dw; i < ib_size_dw; ++i)
309 ib->ptr[i] = 0x0;
310
311 r = amdgpu_job_submit_direct(job, ring, &f);
312 if (r)
313 goto err;
314
315 if (fence)
316 *fence = dma_fence_get(f);
317 dma_fence_put(f);
318 return 0;
319
320 err:
321 amdgpu_job_free(job);
322 return r;
323 }
324
325 /**
326 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
327 *
328 * @ring: the engine to test on
329 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
330 *
331 */
uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring * ring,long timeout)332 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
333 {
334 struct dma_fence *fence = NULL;
335 struct amdgpu_bo *bo = NULL;
336 long r;
337
338 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
339 AMDGPU_GEM_DOMAIN_VRAM,
340 &bo, NULL, NULL);
341 if (r)
342 return r;
343
344 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
345 if (r)
346 goto error;
347
348 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
349 if (r)
350 goto error;
351
352 r = dma_fence_wait_timeout(fence, false, timeout);
353 if (r == 0)
354 r = -ETIMEDOUT;
355 else if (r > 0)
356 r = 0;
357
358 error:
359 dma_fence_put(fence);
360 amdgpu_bo_unpin(bo);
361 amdgpu_bo_unreserve(bo);
362 amdgpu_bo_unref(&bo);
363 return r;
364 }
365
uvd_v6_0_early_init(void * handle)366 static int uvd_v6_0_early_init(void *handle)
367 {
368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
369 adev->uvd.num_uvd_inst = 1;
370
371 if (!(adev->flags & AMD_IS_APU) &&
372 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
373 return -ENOENT;
374
375 uvd_v6_0_set_ring_funcs(adev);
376
377 if (uvd_v6_0_enc_support(adev)) {
378 adev->uvd.num_enc_rings = 2;
379 uvd_v6_0_set_enc_ring_funcs(adev);
380 }
381
382 uvd_v6_0_set_irq_funcs(adev);
383
384 return 0;
385 }
386
uvd_v6_0_sw_init(void * handle)387 static int uvd_v6_0_sw_init(void *handle)
388 {
389 struct amdgpu_ring *ring;
390 int i, r;
391 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
392
393 /* UVD TRAP */
394 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
395 if (r)
396 return r;
397
398 /* UVD ENC TRAP */
399 if (uvd_v6_0_enc_support(adev)) {
400 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
401 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
402 if (r)
403 return r;
404 }
405 }
406
407 r = amdgpu_uvd_sw_init(adev);
408 if (r)
409 return r;
410
411 if (!uvd_v6_0_enc_support(adev)) {
412 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
413 adev->uvd.inst->ring_enc[i].funcs = NULL;
414
415 adev->uvd.inst->irq.num_types = 1;
416 adev->uvd.num_enc_rings = 0;
417
418 DRM_INFO("UVD ENC is disabled\n");
419 }
420
421 ring = &adev->uvd.inst->ring;
422 sprintf(ring->name, "uvd");
423 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
424 AMDGPU_RING_PRIO_DEFAULT, NULL);
425 if (r)
426 return r;
427
428 r = amdgpu_uvd_resume(adev);
429 if (r)
430 return r;
431
432 if (uvd_v6_0_enc_support(adev)) {
433 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
434 ring = &adev->uvd.inst->ring_enc[i];
435 sprintf(ring->name, "uvd_enc%d", i);
436 r = amdgpu_ring_init(adev, ring, 512,
437 &adev->uvd.inst->irq, 0,
438 AMDGPU_RING_PRIO_DEFAULT, NULL);
439 if (r)
440 return r;
441 }
442 }
443
444 r = amdgpu_uvd_entity_init(adev);
445
446 return r;
447 }
448
uvd_v6_0_sw_fini(void * handle)449 static int uvd_v6_0_sw_fini(void *handle)
450 {
451 int i, r;
452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
453
454 r = amdgpu_uvd_suspend(adev);
455 if (r)
456 return r;
457
458 if (uvd_v6_0_enc_support(adev)) {
459 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
460 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
461 }
462
463 return amdgpu_uvd_sw_fini(adev);
464 }
465
466 /**
467 * uvd_v6_0_hw_init - start and test UVD block
468 *
469 * @handle: handle used to pass amdgpu_device pointer
470 *
471 * Initialize the hardware, boot up the VCPU and do some testing
472 */
uvd_v6_0_hw_init(void * handle)473 static int uvd_v6_0_hw_init(void *handle)
474 {
475 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
476 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
477 uint32_t tmp;
478 int i, r;
479
480 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
481 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
482 uvd_v6_0_enable_mgcg(adev, true);
483
484 r = amdgpu_ring_test_helper(ring);
485 if (r)
486 goto done;
487
488 r = amdgpu_ring_alloc(ring, 10);
489 if (r) {
490 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
491 goto done;
492 }
493
494 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
495 amdgpu_ring_write(ring, tmp);
496 amdgpu_ring_write(ring, 0xFFFFF);
497
498 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
499 amdgpu_ring_write(ring, tmp);
500 amdgpu_ring_write(ring, 0xFFFFF);
501
502 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
503 amdgpu_ring_write(ring, tmp);
504 amdgpu_ring_write(ring, 0xFFFFF);
505
506 /* Clear timeout status bits */
507 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
508 amdgpu_ring_write(ring, 0x8);
509
510 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
511 amdgpu_ring_write(ring, 3);
512
513 amdgpu_ring_commit(ring);
514
515 if (uvd_v6_0_enc_support(adev)) {
516 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
517 ring = &adev->uvd.inst->ring_enc[i];
518 r = amdgpu_ring_test_helper(ring);
519 if (r)
520 goto done;
521 }
522 }
523
524 done:
525 if (!r) {
526 if (uvd_v6_0_enc_support(adev))
527 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
528 else
529 DRM_INFO("UVD initialized successfully.\n");
530 }
531
532 return r;
533 }
534
535 /**
536 * uvd_v6_0_hw_fini - stop the hardware block
537 *
538 * @handle: handle used to pass amdgpu_device pointer
539 *
540 * Stop the UVD block, mark ring as not ready any more
541 */
uvd_v6_0_hw_fini(void * handle)542 static int uvd_v6_0_hw_fini(void *handle)
543 {
544 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545
546 /*
547 * Proper cleanups before halting the HW engine:
548 * - cancel the delayed idle work
549 * - enable powergating
550 * - enable clockgating
551 * - disable dpm
552 *
553 * TODO: to align with the VCN implementation, move the
554 * jobs for clockgating/powergating/dpm setting to
555 * ->set_powergating_state().
556 */
557 cancel_delayed_work_sync(&adev->uvd.idle_work);
558
559 if (adev->pm.dpm_enabled) {
560 amdgpu_dpm_enable_uvd(adev, false);
561 } else {
562 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
563 /* shutdown the UVD block */
564 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
565 AMD_PG_STATE_GATE);
566 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
567 AMD_CG_STATE_GATE);
568 }
569
570 if (RREG32(mmUVD_STATUS) != 0)
571 uvd_v6_0_stop(adev);
572
573 return 0;
574 }
575
uvd_v6_0_suspend(void * handle)576 static int uvd_v6_0_suspend(void *handle)
577 {
578 int r;
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
580
581 r = uvd_v6_0_hw_fini(adev);
582 if (r)
583 return r;
584
585 return amdgpu_uvd_suspend(adev);
586 }
587
uvd_v6_0_resume(void * handle)588 static int uvd_v6_0_resume(void *handle)
589 {
590 int r;
591 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
592
593 r = amdgpu_uvd_resume(adev);
594 if (r)
595 return r;
596
597 return uvd_v6_0_hw_init(adev);
598 }
599
600 /**
601 * uvd_v6_0_mc_resume - memory controller programming
602 *
603 * @adev: amdgpu_device pointer
604 *
605 * Let the UVD memory controller know it's offsets
606 */
uvd_v6_0_mc_resume(struct amdgpu_device * adev)607 static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
608 {
609 uint64_t offset;
610 uint32_t size;
611
612 /* program memory controller bits 0-27 */
613 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
614 lower_32_bits(adev->uvd.inst->gpu_addr));
615 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
616 upper_32_bits(adev->uvd.inst->gpu_addr));
617
618 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
619 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
620 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
621 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
622
623 offset += size;
624 size = AMDGPU_UVD_HEAP_SIZE;
625 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
626 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
627
628 offset += size;
629 size = AMDGPU_UVD_STACK_SIZE +
630 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
631 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
632 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
633
634 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
635 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
636 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
637
638 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
639 }
640
641 #if 0
642 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
643 bool enable)
644 {
645 u32 data, data1;
646
647 data = RREG32(mmUVD_CGC_GATE);
648 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
649 if (enable) {
650 data |= UVD_CGC_GATE__SYS_MASK |
651 UVD_CGC_GATE__UDEC_MASK |
652 UVD_CGC_GATE__MPEG2_MASK |
653 UVD_CGC_GATE__RBC_MASK |
654 UVD_CGC_GATE__LMI_MC_MASK |
655 UVD_CGC_GATE__IDCT_MASK |
656 UVD_CGC_GATE__MPRD_MASK |
657 UVD_CGC_GATE__MPC_MASK |
658 UVD_CGC_GATE__LBSI_MASK |
659 UVD_CGC_GATE__LRBBM_MASK |
660 UVD_CGC_GATE__UDEC_RE_MASK |
661 UVD_CGC_GATE__UDEC_CM_MASK |
662 UVD_CGC_GATE__UDEC_IT_MASK |
663 UVD_CGC_GATE__UDEC_DB_MASK |
664 UVD_CGC_GATE__UDEC_MP_MASK |
665 UVD_CGC_GATE__WCB_MASK |
666 UVD_CGC_GATE__VCPU_MASK |
667 UVD_CGC_GATE__SCPU_MASK;
668 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
669 UVD_SUVD_CGC_GATE__SIT_MASK |
670 UVD_SUVD_CGC_GATE__SMP_MASK |
671 UVD_SUVD_CGC_GATE__SCM_MASK |
672 UVD_SUVD_CGC_GATE__SDB_MASK |
673 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
674 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
675 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
676 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
677 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
678 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
679 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
680 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
681 } else {
682 data &= ~(UVD_CGC_GATE__SYS_MASK |
683 UVD_CGC_GATE__UDEC_MASK |
684 UVD_CGC_GATE__MPEG2_MASK |
685 UVD_CGC_GATE__RBC_MASK |
686 UVD_CGC_GATE__LMI_MC_MASK |
687 UVD_CGC_GATE__LMI_UMC_MASK |
688 UVD_CGC_GATE__IDCT_MASK |
689 UVD_CGC_GATE__MPRD_MASK |
690 UVD_CGC_GATE__MPC_MASK |
691 UVD_CGC_GATE__LBSI_MASK |
692 UVD_CGC_GATE__LRBBM_MASK |
693 UVD_CGC_GATE__UDEC_RE_MASK |
694 UVD_CGC_GATE__UDEC_CM_MASK |
695 UVD_CGC_GATE__UDEC_IT_MASK |
696 UVD_CGC_GATE__UDEC_DB_MASK |
697 UVD_CGC_GATE__UDEC_MP_MASK |
698 UVD_CGC_GATE__WCB_MASK |
699 UVD_CGC_GATE__VCPU_MASK |
700 UVD_CGC_GATE__SCPU_MASK);
701 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
702 UVD_SUVD_CGC_GATE__SIT_MASK |
703 UVD_SUVD_CGC_GATE__SMP_MASK |
704 UVD_SUVD_CGC_GATE__SCM_MASK |
705 UVD_SUVD_CGC_GATE__SDB_MASK |
706 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
707 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
708 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
709 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
710 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
711 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
712 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
713 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
714 }
715 WREG32(mmUVD_CGC_GATE, data);
716 WREG32(mmUVD_SUVD_CGC_GATE, data1);
717 }
718 #endif
719
720 /**
721 * uvd_v6_0_start - start UVD block
722 *
723 * @adev: amdgpu_device pointer
724 *
725 * Setup and start the UVD block
726 */
uvd_v6_0_start(struct amdgpu_device * adev)727 static int uvd_v6_0_start(struct amdgpu_device *adev)
728 {
729 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
730 uint32_t rb_bufsz, tmp;
731 uint32_t lmi_swap_cntl;
732 uint32_t mp_swap_cntl;
733 int i, j, r;
734
735 /* disable DPG */
736 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
737
738 /* disable byte swapping */
739 lmi_swap_cntl = 0;
740 mp_swap_cntl = 0;
741
742 uvd_v6_0_mc_resume(adev);
743
744 /* disable interupt */
745 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
746
747 /* stall UMC and register bus before resetting VCPU */
748 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
749 mdelay(1);
750
751 /* put LMI, VCPU, RBC etc... into reset */
752 WREG32(mmUVD_SOFT_RESET,
753 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
754 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
755 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
756 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
757 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
758 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
759 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
760 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
761 mdelay(5);
762
763 /* take UVD block out of reset */
764 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
765 mdelay(5);
766
767 /* initialize UVD memory controller */
768 WREG32(mmUVD_LMI_CTRL,
769 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
770 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
771 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
772 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
773 UVD_LMI_CTRL__REQ_MODE_MASK |
774 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
775
776 #ifdef __BIG_ENDIAN
777 /* swap (8 in 32) RB and IB */
778 lmi_swap_cntl = 0xa;
779 mp_swap_cntl = 0;
780 #endif
781 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
782 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
783
784 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
785 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
786 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
787 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
788 WREG32(mmUVD_MPC_SET_ALU, 0);
789 WREG32(mmUVD_MPC_SET_MUX, 0x88);
790
791 /* take all subblocks out of reset, except VCPU */
792 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
793 mdelay(5);
794
795 /* enable VCPU clock */
796 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
797
798 /* enable UMC */
799 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
800
801 /* boot up the VCPU */
802 WREG32(mmUVD_SOFT_RESET, 0);
803 mdelay(10);
804
805 for (i = 0; i < 10; ++i) {
806 uint32_t status;
807
808 for (j = 0; j < 100; ++j) {
809 status = RREG32(mmUVD_STATUS);
810 if (status & 2)
811 break;
812 mdelay(10);
813 }
814 r = 0;
815 if (status & 2)
816 break;
817
818 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
819 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
820 mdelay(10);
821 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
822 mdelay(10);
823 r = -1;
824 }
825
826 if (r) {
827 DRM_ERROR("UVD not responding, giving up!!!\n");
828 return r;
829 }
830 /* enable master interrupt */
831 WREG32_P(mmUVD_MASTINT_EN,
832 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
833 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
834
835 /* clear the bit 4 of UVD_STATUS */
836 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
837
838 /* force RBC into idle state */
839 rb_bufsz = order_base_2(ring->ring_size);
840 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
841 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
842 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
843 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
844 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
845 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
846 WREG32(mmUVD_RBC_RB_CNTL, tmp);
847
848 /* set the write pointer delay */
849 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
850
851 /* set the wb address */
852 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
853
854 /* program the RB_BASE for ring buffer */
855 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
856 lower_32_bits(ring->gpu_addr));
857 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
858 upper_32_bits(ring->gpu_addr));
859
860 /* Initialize the ring buffer's read and write pointers */
861 WREG32(mmUVD_RBC_RB_RPTR, 0);
862
863 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
864 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
865
866 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
867
868 if (uvd_v6_0_enc_support(adev)) {
869 ring = &adev->uvd.inst->ring_enc[0];
870 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
871 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
872 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
873 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
874 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
875
876 ring = &adev->uvd.inst->ring_enc[1];
877 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
878 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
879 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
880 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
881 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
882 }
883
884 return 0;
885 }
886
887 /**
888 * uvd_v6_0_stop - stop UVD block
889 *
890 * @adev: amdgpu_device pointer
891 *
892 * stop the UVD block
893 */
uvd_v6_0_stop(struct amdgpu_device * adev)894 static void uvd_v6_0_stop(struct amdgpu_device *adev)
895 {
896 /* force RBC into idle state */
897 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
898
899 /* Stall UMC and register bus before resetting VCPU */
900 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
901 mdelay(1);
902
903 /* put VCPU into reset */
904 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
905 mdelay(5);
906
907 /* disable VCPU clock */
908 WREG32(mmUVD_VCPU_CNTL, 0x0);
909
910 /* Unstall UMC and register bus */
911 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
912
913 WREG32(mmUVD_STATUS, 0);
914 }
915
916 /**
917 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
918 *
919 * @ring: amdgpu_ring pointer
920 * @addr: address
921 * @seq: sequence number
922 * @flags: fence related flags
923 *
924 * Write a fence and a trap command to the ring.
925 */
uvd_v6_0_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)926 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
927 unsigned flags)
928 {
929 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
930
931 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
932 amdgpu_ring_write(ring, seq);
933 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
934 amdgpu_ring_write(ring, addr & 0xffffffff);
935 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
936 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
937 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
938 amdgpu_ring_write(ring, 0);
939
940 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
941 amdgpu_ring_write(ring, 0);
942 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
943 amdgpu_ring_write(ring, 0);
944 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
945 amdgpu_ring_write(ring, 2);
946 }
947
948 /**
949 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
950 *
951 * @ring: amdgpu_ring pointer
952 * @addr: address
953 * @seq: sequence number
954 * @flags: fence related flags
955 *
956 * Write enc a fence and a trap command to the ring.
957 */
uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)958 static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
959 u64 seq, unsigned flags)
960 {
961 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
962
963 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
964 amdgpu_ring_write(ring, addr);
965 amdgpu_ring_write(ring, upper_32_bits(addr));
966 amdgpu_ring_write(ring, seq);
967 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
968 }
969
970 /**
971 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
972 *
973 * @ring: amdgpu_ring pointer
974 */
uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring * ring)975 static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
976 {
977 /* The firmware doesn't seem to like touching registers at this point. */
978 }
979
980 /**
981 * uvd_v6_0_ring_test_ring - register write test
982 *
983 * @ring: amdgpu_ring pointer
984 *
985 * Test if we can successfully write to the context register
986 */
uvd_v6_0_ring_test_ring(struct amdgpu_ring * ring)987 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
988 {
989 struct amdgpu_device *adev = ring->adev;
990 uint32_t tmp = 0;
991 unsigned i;
992 int r;
993
994 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
995 r = amdgpu_ring_alloc(ring, 3);
996 if (r)
997 return r;
998
999 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
1000 amdgpu_ring_write(ring, 0xDEADBEEF);
1001 amdgpu_ring_commit(ring);
1002 for (i = 0; i < adev->usec_timeout; i++) {
1003 tmp = RREG32(mmUVD_CONTEXT_ID);
1004 if (tmp == 0xDEADBEEF)
1005 break;
1006 udelay(1);
1007 }
1008
1009 if (i >= adev->usec_timeout)
1010 r = -ETIMEDOUT;
1011
1012 return r;
1013 }
1014
1015 /**
1016 * uvd_v6_0_ring_emit_ib - execute indirect buffer
1017 *
1018 * @ring: amdgpu_ring pointer
1019 * @job: job to retrieve vmid from
1020 * @ib: indirect buffer to execute
1021 * @flags: unused
1022 *
1023 * Write ring commands to execute the indirect buffer
1024 */
uvd_v6_0_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1025 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1026 struct amdgpu_job *job,
1027 struct amdgpu_ib *ib,
1028 uint32_t flags)
1029 {
1030 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1031
1032 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1033 amdgpu_ring_write(ring, vmid);
1034
1035 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1036 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1037 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1038 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1039 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1040 amdgpu_ring_write(ring, ib->length_dw);
1041 }
1042
1043 /**
1044 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1045 *
1046 * @ring: amdgpu_ring pointer
1047 * @job: job to retrive vmid from
1048 * @ib: indirect buffer to execute
1049 * @flags: unused
1050 *
1051 * Write enc ring commands to execute the indirect buffer
1052 */
uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1053 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1054 struct amdgpu_job *job,
1055 struct amdgpu_ib *ib,
1056 uint32_t flags)
1057 {
1058 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1059
1060 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1061 amdgpu_ring_write(ring, vmid);
1062 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1063 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1064 amdgpu_ring_write(ring, ib->length_dw);
1065 }
1066
uvd_v6_0_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)1067 static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1068 uint32_t reg, uint32_t val)
1069 {
1070 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1071 amdgpu_ring_write(ring, reg << 2);
1072 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1073 amdgpu_ring_write(ring, val);
1074 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1075 amdgpu_ring_write(ring, 0x8);
1076 }
1077
uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)1078 static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1079 unsigned vmid, uint64_t pd_addr)
1080 {
1081 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1082
1083 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1084 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1085 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1086 amdgpu_ring_write(ring, 0);
1087 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1088 amdgpu_ring_write(ring, 1 << vmid); /* mask */
1089 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1090 amdgpu_ring_write(ring, 0xC);
1091 }
1092
uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1093 static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1094 {
1095 uint32_t seq = ring->fence_drv.sync_seq;
1096 uint64_t addr = ring->fence_drv.gpu_addr;
1097
1098 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1099 amdgpu_ring_write(ring, lower_32_bits(addr));
1100 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1101 amdgpu_ring_write(ring, upper_32_bits(addr));
1102 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1103 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1104 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1105 amdgpu_ring_write(ring, seq);
1106 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1107 amdgpu_ring_write(ring, 0xE);
1108 }
1109
uvd_v6_0_ring_insert_nop(struct amdgpu_ring * ring,uint32_t count)1110 static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1111 {
1112 int i;
1113
1114 WARN_ON(ring->wptr % 2 || count % 2);
1115
1116 for (i = 0; i < count / 2; i++) {
1117 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1118 amdgpu_ring_write(ring, 0);
1119 }
1120 }
1121
uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring * ring)1122 static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1123 {
1124 uint32_t seq = ring->fence_drv.sync_seq;
1125 uint64_t addr = ring->fence_drv.gpu_addr;
1126
1127 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1128 amdgpu_ring_write(ring, lower_32_bits(addr));
1129 amdgpu_ring_write(ring, upper_32_bits(addr));
1130 amdgpu_ring_write(ring, seq);
1131 }
1132
uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring * ring)1133 static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1134 {
1135 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1136 }
1137
uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)1138 static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1139 unsigned int vmid, uint64_t pd_addr)
1140 {
1141 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1142 amdgpu_ring_write(ring, vmid);
1143 amdgpu_ring_write(ring, pd_addr >> 12);
1144
1145 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1146 amdgpu_ring_write(ring, vmid);
1147 }
1148
uvd_v6_0_is_idle(void * handle)1149 static bool uvd_v6_0_is_idle(void *handle)
1150 {
1151 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1152
1153 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1154 }
1155
uvd_v6_0_wait_for_idle(void * handle)1156 static int uvd_v6_0_wait_for_idle(void *handle)
1157 {
1158 unsigned i;
1159 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1160
1161 for (i = 0; i < adev->usec_timeout; i++) {
1162 if (uvd_v6_0_is_idle(handle))
1163 return 0;
1164 }
1165 return -ETIMEDOUT;
1166 }
1167
1168 #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
uvd_v6_0_check_soft_reset(void * handle)1169 static bool uvd_v6_0_check_soft_reset(void *handle)
1170 {
1171 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1172 u32 srbm_soft_reset = 0;
1173 u32 tmp = RREG32(mmSRBM_STATUS);
1174
1175 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1176 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1177 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1178 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1179
1180 if (srbm_soft_reset) {
1181 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1182 return true;
1183 } else {
1184 adev->uvd.inst->srbm_soft_reset = 0;
1185 return false;
1186 }
1187 }
1188
uvd_v6_0_pre_soft_reset(void * handle)1189 static int uvd_v6_0_pre_soft_reset(void *handle)
1190 {
1191 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1192
1193 if (!adev->uvd.inst->srbm_soft_reset)
1194 return 0;
1195
1196 uvd_v6_0_stop(adev);
1197 return 0;
1198 }
1199
uvd_v6_0_soft_reset(void * handle)1200 static int uvd_v6_0_soft_reset(void *handle)
1201 {
1202 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1203 u32 srbm_soft_reset;
1204
1205 if (!adev->uvd.inst->srbm_soft_reset)
1206 return 0;
1207 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1208
1209 if (srbm_soft_reset) {
1210 u32 tmp;
1211
1212 tmp = RREG32(mmSRBM_SOFT_RESET);
1213 tmp |= srbm_soft_reset;
1214 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1215 WREG32(mmSRBM_SOFT_RESET, tmp);
1216 tmp = RREG32(mmSRBM_SOFT_RESET);
1217
1218 udelay(50);
1219
1220 tmp &= ~srbm_soft_reset;
1221 WREG32(mmSRBM_SOFT_RESET, tmp);
1222 tmp = RREG32(mmSRBM_SOFT_RESET);
1223
1224 /* Wait a little for things to settle down */
1225 udelay(50);
1226 }
1227
1228 return 0;
1229 }
1230
uvd_v6_0_post_soft_reset(void * handle)1231 static int uvd_v6_0_post_soft_reset(void *handle)
1232 {
1233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1234
1235 if (!adev->uvd.inst->srbm_soft_reset)
1236 return 0;
1237
1238 mdelay(5);
1239
1240 return uvd_v6_0_start(adev);
1241 }
1242
uvd_v6_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)1243 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1244 struct amdgpu_irq_src *source,
1245 unsigned type,
1246 enum amdgpu_interrupt_state state)
1247 {
1248 // TODO
1249 return 0;
1250 }
1251
uvd_v6_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)1252 static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1253 struct amdgpu_irq_src *source,
1254 struct amdgpu_iv_entry *entry)
1255 {
1256 bool int_handled = true;
1257 DRM_DEBUG("IH: UVD TRAP\n");
1258
1259 switch (entry->src_id) {
1260 case 124:
1261 amdgpu_fence_process(&adev->uvd.inst->ring);
1262 break;
1263 case 119:
1264 if (likely(uvd_v6_0_enc_support(adev)))
1265 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1266 else
1267 int_handled = false;
1268 break;
1269 case 120:
1270 if (likely(uvd_v6_0_enc_support(adev)))
1271 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1272 else
1273 int_handled = false;
1274 break;
1275 }
1276
1277 if (!int_handled)
1278 DRM_ERROR("Unhandled interrupt: %d %d\n",
1279 entry->src_id, entry->src_data[0]);
1280
1281 return 0;
1282 }
1283
uvd_v6_0_enable_clock_gating(struct amdgpu_device * adev,bool enable)1284 static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1285 {
1286 uint32_t data1, data3;
1287
1288 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1289 data3 = RREG32(mmUVD_CGC_GATE);
1290
1291 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1292 UVD_SUVD_CGC_GATE__SIT_MASK |
1293 UVD_SUVD_CGC_GATE__SMP_MASK |
1294 UVD_SUVD_CGC_GATE__SCM_MASK |
1295 UVD_SUVD_CGC_GATE__SDB_MASK |
1296 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1297 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1298 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1299 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1300 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1301 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1302 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1303 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1304
1305 if (enable) {
1306 data3 |= (UVD_CGC_GATE__SYS_MASK |
1307 UVD_CGC_GATE__UDEC_MASK |
1308 UVD_CGC_GATE__MPEG2_MASK |
1309 UVD_CGC_GATE__RBC_MASK |
1310 UVD_CGC_GATE__LMI_MC_MASK |
1311 UVD_CGC_GATE__LMI_UMC_MASK |
1312 UVD_CGC_GATE__IDCT_MASK |
1313 UVD_CGC_GATE__MPRD_MASK |
1314 UVD_CGC_GATE__MPC_MASK |
1315 UVD_CGC_GATE__LBSI_MASK |
1316 UVD_CGC_GATE__LRBBM_MASK |
1317 UVD_CGC_GATE__UDEC_RE_MASK |
1318 UVD_CGC_GATE__UDEC_CM_MASK |
1319 UVD_CGC_GATE__UDEC_IT_MASK |
1320 UVD_CGC_GATE__UDEC_DB_MASK |
1321 UVD_CGC_GATE__UDEC_MP_MASK |
1322 UVD_CGC_GATE__WCB_MASK |
1323 UVD_CGC_GATE__JPEG_MASK |
1324 UVD_CGC_GATE__SCPU_MASK |
1325 UVD_CGC_GATE__JPEG2_MASK);
1326 /* only in pg enabled, we can gate clock to vcpu*/
1327 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1328 data3 |= UVD_CGC_GATE__VCPU_MASK;
1329
1330 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1331 } else {
1332 data3 = 0;
1333 }
1334
1335 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1336 WREG32(mmUVD_CGC_GATE, data3);
1337 }
1338
uvd_v6_0_set_sw_clock_gating(struct amdgpu_device * adev)1339 static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1340 {
1341 uint32_t data, data2;
1342
1343 data = RREG32(mmUVD_CGC_CTRL);
1344 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1345
1346
1347 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1348 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1349
1350
1351 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1352 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1353 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1354
1355 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1356 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1357 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1358 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1359 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1360 UVD_CGC_CTRL__SYS_MODE_MASK |
1361 UVD_CGC_CTRL__UDEC_MODE_MASK |
1362 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1363 UVD_CGC_CTRL__REGS_MODE_MASK |
1364 UVD_CGC_CTRL__RBC_MODE_MASK |
1365 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1366 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1367 UVD_CGC_CTRL__IDCT_MODE_MASK |
1368 UVD_CGC_CTRL__MPRD_MODE_MASK |
1369 UVD_CGC_CTRL__MPC_MODE_MASK |
1370 UVD_CGC_CTRL__LBSI_MODE_MASK |
1371 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1372 UVD_CGC_CTRL__WCB_MODE_MASK |
1373 UVD_CGC_CTRL__VCPU_MODE_MASK |
1374 UVD_CGC_CTRL__JPEG_MODE_MASK |
1375 UVD_CGC_CTRL__SCPU_MODE_MASK |
1376 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1377 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1378 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1379 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1380 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1381 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1382
1383 WREG32(mmUVD_CGC_CTRL, data);
1384 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1385 }
1386
1387 #if 0
1388 static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1389 {
1390 uint32_t data, data1, cgc_flags, suvd_flags;
1391
1392 data = RREG32(mmUVD_CGC_GATE);
1393 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1394
1395 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1396 UVD_CGC_GATE__UDEC_MASK |
1397 UVD_CGC_GATE__MPEG2_MASK |
1398 UVD_CGC_GATE__RBC_MASK |
1399 UVD_CGC_GATE__LMI_MC_MASK |
1400 UVD_CGC_GATE__IDCT_MASK |
1401 UVD_CGC_GATE__MPRD_MASK |
1402 UVD_CGC_GATE__MPC_MASK |
1403 UVD_CGC_GATE__LBSI_MASK |
1404 UVD_CGC_GATE__LRBBM_MASK |
1405 UVD_CGC_GATE__UDEC_RE_MASK |
1406 UVD_CGC_GATE__UDEC_CM_MASK |
1407 UVD_CGC_GATE__UDEC_IT_MASK |
1408 UVD_CGC_GATE__UDEC_DB_MASK |
1409 UVD_CGC_GATE__UDEC_MP_MASK |
1410 UVD_CGC_GATE__WCB_MASK |
1411 UVD_CGC_GATE__VCPU_MASK |
1412 UVD_CGC_GATE__SCPU_MASK |
1413 UVD_CGC_GATE__JPEG_MASK |
1414 UVD_CGC_GATE__JPEG2_MASK;
1415
1416 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1417 UVD_SUVD_CGC_GATE__SIT_MASK |
1418 UVD_SUVD_CGC_GATE__SMP_MASK |
1419 UVD_SUVD_CGC_GATE__SCM_MASK |
1420 UVD_SUVD_CGC_GATE__SDB_MASK;
1421
1422 data |= cgc_flags;
1423 data1 |= suvd_flags;
1424
1425 WREG32(mmUVD_CGC_GATE, data);
1426 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1427 }
1428 #endif
1429
uvd_v6_0_enable_mgcg(struct amdgpu_device * adev,bool enable)1430 static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1431 bool enable)
1432 {
1433 u32 orig, data;
1434
1435 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1436 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1437 data |= 0xfff;
1438 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1439
1440 orig = data = RREG32(mmUVD_CGC_CTRL);
1441 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1442 if (orig != data)
1443 WREG32(mmUVD_CGC_CTRL, data);
1444 } else {
1445 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1446 data &= ~0xfff;
1447 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1448
1449 orig = data = RREG32(mmUVD_CGC_CTRL);
1450 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1451 if (orig != data)
1452 WREG32(mmUVD_CGC_CTRL, data);
1453 }
1454 }
1455
uvd_v6_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)1456 static int uvd_v6_0_set_clockgating_state(void *handle,
1457 enum amd_clockgating_state state)
1458 {
1459 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1460 bool enable = (state == AMD_CG_STATE_GATE);
1461
1462 if (enable) {
1463 /* wait for STATUS to clear */
1464 if (uvd_v6_0_wait_for_idle(handle))
1465 return -EBUSY;
1466 uvd_v6_0_enable_clock_gating(adev, true);
1467 /* enable HW gates because UVD is idle */
1468 /* uvd_v6_0_set_hw_clock_gating(adev); */
1469 } else {
1470 /* disable HW gating and enable Sw gating */
1471 uvd_v6_0_enable_clock_gating(adev, false);
1472 }
1473 uvd_v6_0_set_sw_clock_gating(adev);
1474 return 0;
1475 }
1476
uvd_v6_0_set_powergating_state(void * handle,enum amd_powergating_state state)1477 static int uvd_v6_0_set_powergating_state(void *handle,
1478 enum amd_powergating_state state)
1479 {
1480 /* This doesn't actually powergate the UVD block.
1481 * That's done in the dpm code via the SMC. This
1482 * just re-inits the block as necessary. The actual
1483 * gating still happens in the dpm code. We should
1484 * revisit this when there is a cleaner line between
1485 * the smc and the hw blocks
1486 */
1487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1488 int ret = 0;
1489
1490 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1491
1492 if (state == AMD_PG_STATE_GATE) {
1493 uvd_v6_0_stop(adev);
1494 } else {
1495 ret = uvd_v6_0_start(adev);
1496 if (ret)
1497 goto out;
1498 }
1499
1500 out:
1501 return ret;
1502 }
1503
uvd_v6_0_get_clockgating_state(void * handle,u32 * flags)1504 static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags)
1505 {
1506 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1507 int data;
1508
1509 mutex_lock(&adev->pm.mutex);
1510
1511 if (adev->flags & AMD_IS_APU)
1512 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1513 else
1514 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1515
1516 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1517 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1518 goto out;
1519 }
1520
1521 /* AMD_CG_SUPPORT_UVD_MGCG */
1522 data = RREG32(mmUVD_CGC_CTRL);
1523 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1524 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1525
1526 out:
1527 mutex_unlock(&adev->pm.mutex);
1528 }
1529
1530 static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1531 .name = "uvd_v6_0",
1532 .early_init = uvd_v6_0_early_init,
1533 .late_init = NULL,
1534 .sw_init = uvd_v6_0_sw_init,
1535 .sw_fini = uvd_v6_0_sw_fini,
1536 .hw_init = uvd_v6_0_hw_init,
1537 .hw_fini = uvd_v6_0_hw_fini,
1538 .suspend = uvd_v6_0_suspend,
1539 .resume = uvd_v6_0_resume,
1540 .is_idle = uvd_v6_0_is_idle,
1541 .wait_for_idle = uvd_v6_0_wait_for_idle,
1542 .check_soft_reset = uvd_v6_0_check_soft_reset,
1543 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1544 .soft_reset = uvd_v6_0_soft_reset,
1545 .post_soft_reset = uvd_v6_0_post_soft_reset,
1546 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1547 .set_powergating_state = uvd_v6_0_set_powergating_state,
1548 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1549 };
1550
1551 static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1552 .type = AMDGPU_RING_TYPE_UVD,
1553 .align_mask = 0xf,
1554 .support_64bit_ptrs = false,
1555 .no_user_fence = true,
1556 .get_rptr = uvd_v6_0_ring_get_rptr,
1557 .get_wptr = uvd_v6_0_ring_get_wptr,
1558 .set_wptr = uvd_v6_0_ring_set_wptr,
1559 .parse_cs = amdgpu_uvd_ring_parse_cs,
1560 .emit_frame_size =
1561 6 + /* hdp invalidate */
1562 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1563 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1564 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1565 .emit_ib = uvd_v6_0_ring_emit_ib,
1566 .emit_fence = uvd_v6_0_ring_emit_fence,
1567 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1568 .test_ring = uvd_v6_0_ring_test_ring,
1569 .test_ib = amdgpu_uvd_ring_test_ib,
1570 .insert_nop = uvd_v6_0_ring_insert_nop,
1571 .pad_ib = amdgpu_ring_generic_pad_ib,
1572 .begin_use = amdgpu_uvd_ring_begin_use,
1573 .end_use = amdgpu_uvd_ring_end_use,
1574 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1575 };
1576
1577 static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1578 .type = AMDGPU_RING_TYPE_UVD,
1579 .align_mask = 0xf,
1580 .support_64bit_ptrs = false,
1581 .no_user_fence = true,
1582 .get_rptr = uvd_v6_0_ring_get_rptr,
1583 .get_wptr = uvd_v6_0_ring_get_wptr,
1584 .set_wptr = uvd_v6_0_ring_set_wptr,
1585 .emit_frame_size =
1586 6 + /* hdp invalidate */
1587 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1588 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1589 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1590 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1591 .emit_ib = uvd_v6_0_ring_emit_ib,
1592 .emit_fence = uvd_v6_0_ring_emit_fence,
1593 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1594 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1595 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1596 .test_ring = uvd_v6_0_ring_test_ring,
1597 .test_ib = amdgpu_uvd_ring_test_ib,
1598 .insert_nop = uvd_v6_0_ring_insert_nop,
1599 .pad_ib = amdgpu_ring_generic_pad_ib,
1600 .begin_use = amdgpu_uvd_ring_begin_use,
1601 .end_use = amdgpu_uvd_ring_end_use,
1602 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1603 };
1604
1605 static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1606 .type = AMDGPU_RING_TYPE_UVD_ENC,
1607 .align_mask = 0x3f,
1608 .nop = HEVC_ENC_CMD_NO_OP,
1609 .support_64bit_ptrs = false,
1610 .no_user_fence = true,
1611 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1612 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1613 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1614 .emit_frame_size =
1615 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1616 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1617 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1618 1, /* uvd_v6_0_enc_ring_insert_end */
1619 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1620 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1621 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1622 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1623 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1624 .test_ring = uvd_v6_0_enc_ring_test_ring,
1625 .test_ib = uvd_v6_0_enc_ring_test_ib,
1626 .insert_nop = amdgpu_ring_insert_nop,
1627 .insert_end = uvd_v6_0_enc_ring_insert_end,
1628 .pad_ib = amdgpu_ring_generic_pad_ib,
1629 .begin_use = amdgpu_uvd_ring_begin_use,
1630 .end_use = amdgpu_uvd_ring_end_use,
1631 };
1632
uvd_v6_0_set_ring_funcs(struct amdgpu_device * adev)1633 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1634 {
1635 if (adev->asic_type >= CHIP_POLARIS10) {
1636 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1637 DRM_INFO("UVD is enabled in VM mode\n");
1638 } else {
1639 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1640 DRM_INFO("UVD is enabled in physical mode\n");
1641 }
1642 }
1643
uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device * adev)1644 static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1645 {
1646 int i;
1647
1648 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1649 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1650
1651 DRM_INFO("UVD ENC is enabled in VM mode\n");
1652 }
1653
1654 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1655 .set = uvd_v6_0_set_interrupt_state,
1656 .process = uvd_v6_0_process_interrupt,
1657 };
1658
uvd_v6_0_set_irq_funcs(struct amdgpu_device * adev)1659 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1660 {
1661 if (uvd_v6_0_enc_support(adev))
1662 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1663 else
1664 adev->uvd.inst->irq.num_types = 1;
1665
1666 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1667 }
1668
1669 const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1670 {
1671 .type = AMD_IP_BLOCK_TYPE_UVD,
1672 .major = 6,
1673 .minor = 0,
1674 .rev = 0,
1675 .funcs = &uvd_v6_0_ip_funcs,
1676 };
1677
1678 const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1679 {
1680 .type = AMD_IP_BLOCK_TYPE_UVD,
1681 .major = 6,
1682 .minor = 2,
1683 .rev = 0,
1684 .funcs = &uvd_v6_0_ip_funcs,
1685 };
1686
1687 const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1688 {
1689 .type = AMD_IP_BLOCK_TYPE_UVD,
1690 .major = 6,
1691 .minor = 3,
1692 .rev = 0,
1693 .funcs = &uvd_v6_0_ip_funcs,
1694 };
1695