1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28 #include <linux/firmware.h>
29 #include <linux/module.h>
30
31 #include <drm/drm.h>
32 #include <drm/drm_drv.h>
33
34 #include "amdgpu.h"
35 #include "amdgpu_pm.h"
36 #include "amdgpu_vce.h"
37 #include "amdgpu_cs.h"
38 #include "cikd.h"
39
40 /* 1 second timeout */
41 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43 /* Firmware Names */
44 #ifdef CONFIG_DRM_AMDGPU_CIK
45 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
46 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
47 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
48 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
49 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
50 #endif
51 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
52 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
53 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
54 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
55 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
56 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
57 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
58 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
59
60 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
61 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
62 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
63
64 #ifdef CONFIG_DRM_AMDGPU_CIK
65 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
66 MODULE_FIRMWARE(FIRMWARE_KABINI);
67 MODULE_FIRMWARE(FIRMWARE_KAVERI);
68 MODULE_FIRMWARE(FIRMWARE_HAWAII);
69 MODULE_FIRMWARE(FIRMWARE_MULLINS);
70 #endif
71 MODULE_FIRMWARE(FIRMWARE_TONGA);
72 MODULE_FIRMWARE(FIRMWARE_CARRIZO);
73 MODULE_FIRMWARE(FIRMWARE_FIJI);
74 MODULE_FIRMWARE(FIRMWARE_STONEY);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
76 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
77 MODULE_FIRMWARE(FIRMWARE_POLARIS12);
78 MODULE_FIRMWARE(FIRMWARE_VEGAM);
79
80 MODULE_FIRMWARE(FIRMWARE_VEGA10);
81 MODULE_FIRMWARE(FIRMWARE_VEGA12);
82 MODULE_FIRMWARE(FIRMWARE_VEGA20);
83
84 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
85 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
86 struct dma_fence **fence);
87 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
88 bool direct, struct dma_fence **fence);
89
90 /**
91 * amdgpu_vce_sw_init - allocate memory, load vce firmware
92 *
93 * @adev: amdgpu_device pointer
94 * @size: size for the new BO
95 *
96 * First step to get VCE online, allocate memory and load the firmware
97 */
amdgpu_vce_sw_init(struct amdgpu_device * adev,unsigned long size)98 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
99 {
100 const char *fw_name;
101 const struct common_firmware_header *hdr;
102 unsigned int ucode_version, version_major, version_minor, binary_id;
103 int i, r;
104
105 switch (adev->asic_type) {
106 #ifdef CONFIG_DRM_AMDGPU_CIK
107 case CHIP_BONAIRE:
108 fw_name = FIRMWARE_BONAIRE;
109 break;
110 case CHIP_KAVERI:
111 fw_name = FIRMWARE_KAVERI;
112 break;
113 case CHIP_KABINI:
114 fw_name = FIRMWARE_KABINI;
115 break;
116 case CHIP_HAWAII:
117 fw_name = FIRMWARE_HAWAII;
118 break;
119 case CHIP_MULLINS:
120 fw_name = FIRMWARE_MULLINS;
121 break;
122 #endif
123 case CHIP_TONGA:
124 fw_name = FIRMWARE_TONGA;
125 break;
126 case CHIP_CARRIZO:
127 fw_name = FIRMWARE_CARRIZO;
128 break;
129 case CHIP_FIJI:
130 fw_name = FIRMWARE_FIJI;
131 break;
132 case CHIP_STONEY:
133 fw_name = FIRMWARE_STONEY;
134 break;
135 case CHIP_POLARIS10:
136 fw_name = FIRMWARE_POLARIS10;
137 break;
138 case CHIP_POLARIS11:
139 fw_name = FIRMWARE_POLARIS11;
140 break;
141 case CHIP_POLARIS12:
142 fw_name = FIRMWARE_POLARIS12;
143 break;
144 case CHIP_VEGAM:
145 fw_name = FIRMWARE_VEGAM;
146 break;
147 case CHIP_VEGA10:
148 fw_name = FIRMWARE_VEGA10;
149 break;
150 case CHIP_VEGA12:
151 fw_name = FIRMWARE_VEGA12;
152 break;
153 case CHIP_VEGA20:
154 fw_name = FIRMWARE_VEGA20;
155 break;
156
157 default:
158 return -EINVAL;
159 }
160
161 r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
162 if (r) {
163 dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
164 fw_name);
165 amdgpu_ucode_release(&adev->vce.fw);
166 return r;
167 }
168
169 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
170
171 ucode_version = le32_to_cpu(hdr->ucode_version);
172 version_major = (ucode_version >> 20) & 0xfff;
173 version_minor = (ucode_version >> 8) & 0xfff;
174 binary_id = ucode_version & 0xff;
175 DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
176 version_major, version_minor, binary_id);
177 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
178 (binary_id << 8));
179
180 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
181 AMDGPU_GEM_DOMAIN_VRAM |
182 AMDGPU_GEM_DOMAIN_GTT,
183 &adev->vce.vcpu_bo,
184 &adev->vce.gpu_addr, &adev->vce.cpu_addr);
185 if (r) {
186 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
187 return r;
188 }
189
190 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
191 atomic_set(&adev->vce.handles[i], 0);
192 adev->vce.filp[i] = NULL;
193 }
194
195 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
196 mutex_init(&adev->vce.idle_mutex);
197
198 return 0;
199 }
200
201 /**
202 * amdgpu_vce_sw_fini - free memory
203 *
204 * @adev: amdgpu_device pointer
205 *
206 * Last step on VCE teardown, free firmware memory
207 */
amdgpu_vce_sw_fini(struct amdgpu_device * adev)208 int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
209 {
210 unsigned int i;
211
212 if (adev->vce.vcpu_bo == NULL)
213 return 0;
214
215 drm_sched_entity_destroy(&adev->vce.entity);
216
217 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
218 (void **)&adev->vce.cpu_addr);
219
220 for (i = 0; i < adev->vce.num_rings; i++)
221 amdgpu_ring_fini(&adev->vce.ring[i]);
222
223 amdgpu_ucode_release(&adev->vce.fw);
224 mutex_destroy(&adev->vce.idle_mutex);
225
226 return 0;
227 }
228
229 /**
230 * amdgpu_vce_entity_init - init entity
231 *
232 * @adev: amdgpu_device pointer
233 *
234 */
amdgpu_vce_entity_init(struct amdgpu_device * adev)235 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
236 {
237 struct amdgpu_ring *ring;
238 struct drm_gpu_scheduler *sched;
239 int r;
240
241 ring = &adev->vce.ring[0];
242 sched = &ring->sched;
243 r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
244 &sched, 1, NULL);
245 if (r != 0) {
246 DRM_ERROR("Failed setting up VCE run queue.\n");
247 return r;
248 }
249
250 return 0;
251 }
252
253 /**
254 * amdgpu_vce_suspend - unpin VCE fw memory
255 *
256 * @adev: amdgpu_device pointer
257 *
258 */
amdgpu_vce_suspend(struct amdgpu_device * adev)259 int amdgpu_vce_suspend(struct amdgpu_device *adev)
260 {
261 int i;
262
263 cancel_delayed_work_sync(&adev->vce.idle_work);
264
265 if (adev->vce.vcpu_bo == NULL)
266 return 0;
267
268 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
269 if (atomic_read(&adev->vce.handles[i]))
270 break;
271
272 if (i == AMDGPU_MAX_VCE_HANDLES)
273 return 0;
274
275 /* TODO: suspending running encoding sessions isn't supported */
276 return -EINVAL;
277 }
278
279 /**
280 * amdgpu_vce_resume - pin VCE fw memory
281 *
282 * @adev: amdgpu_device pointer
283 *
284 */
amdgpu_vce_resume(struct amdgpu_device * adev)285 int amdgpu_vce_resume(struct amdgpu_device *adev)
286 {
287 void *cpu_addr;
288 const struct common_firmware_header *hdr;
289 unsigned int offset;
290 int r, idx;
291
292 if (adev->vce.vcpu_bo == NULL)
293 return -EINVAL;
294
295 r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
296 if (r) {
297 dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
298 return r;
299 }
300
301 r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
302 if (r) {
303 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
304 dev_err(adev->dev, "(%d) VCE map failed\n", r);
305 return r;
306 }
307
308 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
309 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
310
311 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
312 memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
313 adev->vce.fw->size - offset);
314 drm_dev_exit(idx);
315 }
316
317 amdgpu_bo_kunmap(adev->vce.vcpu_bo);
318
319 amdgpu_bo_unreserve(adev->vce.vcpu_bo);
320
321 return 0;
322 }
323
324 /**
325 * amdgpu_vce_idle_work_handler - power off VCE
326 *
327 * @work: pointer to work structure
328 *
329 * power of VCE when it's not used any more
330 */
amdgpu_vce_idle_work_handler(struct work_struct * work)331 static void amdgpu_vce_idle_work_handler(struct work_struct *work)
332 {
333 struct amdgpu_device *adev =
334 container_of(work, struct amdgpu_device, vce.idle_work.work);
335 unsigned int i, count = 0;
336
337 for (i = 0; i < adev->vce.num_rings; i++)
338 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
339
340 if (count == 0) {
341 if (adev->pm.dpm_enabled) {
342 amdgpu_dpm_enable_vce(adev, false);
343 } else {
344 amdgpu_asic_set_vce_clocks(adev, 0, 0);
345 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
346 AMD_PG_STATE_GATE);
347 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
348 AMD_CG_STATE_GATE);
349 }
350 } else {
351 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
352 }
353 }
354
355 /**
356 * amdgpu_vce_ring_begin_use - power up VCE
357 *
358 * @ring: amdgpu ring
359 *
360 * Make sure VCE is powerd up when we want to use it
361 */
amdgpu_vce_ring_begin_use(struct amdgpu_ring * ring)362 void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
363 {
364 struct amdgpu_device *adev = ring->adev;
365 bool set_clocks;
366
367 if (amdgpu_sriov_vf(adev))
368 return;
369
370 mutex_lock(&adev->vce.idle_mutex);
371 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
372 if (set_clocks) {
373 if (adev->pm.dpm_enabled) {
374 amdgpu_dpm_enable_vce(adev, true);
375 } else {
376 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
377 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
378 AMD_CG_STATE_UNGATE);
379 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
380 AMD_PG_STATE_UNGATE);
381
382 }
383 }
384 mutex_unlock(&adev->vce.idle_mutex);
385 }
386
387 /**
388 * amdgpu_vce_ring_end_use - power VCE down
389 *
390 * @ring: amdgpu ring
391 *
392 * Schedule work to power VCE down again
393 */
amdgpu_vce_ring_end_use(struct amdgpu_ring * ring)394 void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
395 {
396 if (!amdgpu_sriov_vf(ring->adev))
397 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
398 }
399
400 /**
401 * amdgpu_vce_free_handles - free still open VCE handles
402 *
403 * @adev: amdgpu_device pointer
404 * @filp: drm file pointer
405 *
406 * Close all VCE handles still open by this file pointer
407 */
amdgpu_vce_free_handles(struct amdgpu_device * adev,struct drm_file * filp)408 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
409 {
410 struct amdgpu_ring *ring = &adev->vce.ring[0];
411 int i, r;
412
413 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
414 uint32_t handle = atomic_read(&adev->vce.handles[i]);
415
416 if (!handle || adev->vce.filp[i] != filp)
417 continue;
418
419 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
420 if (r)
421 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
422
423 adev->vce.filp[i] = NULL;
424 atomic_set(&adev->vce.handles[i], 0);
425 }
426 }
427
428 /**
429 * amdgpu_vce_get_create_msg - generate a VCE create msg
430 *
431 * @ring: ring we should submit the msg to
432 * @handle: VCE session handle to use
433 * @fence: optional fence to return
434 *
435 * Open up a stream for HW test
436 */
amdgpu_vce_get_create_msg(struct amdgpu_ring * ring,uint32_t handle,struct dma_fence ** fence)437 static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
438 struct dma_fence **fence)
439 {
440 const unsigned int ib_size_dw = 1024;
441 struct amdgpu_job *job;
442 struct amdgpu_ib *ib;
443 struct amdgpu_ib ib_msg;
444 struct dma_fence *f = NULL;
445 uint64_t addr;
446 int i, r;
447
448 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
449 AMDGPU_FENCE_OWNER_UNDEFINED,
450 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
451 &job);
452 if (r)
453 return r;
454
455 memset(&ib_msg, 0, sizeof(ib_msg));
456 /* only one gpu page is needed, alloc +1 page to make addr aligned. */
457 r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
458 AMDGPU_IB_POOL_DIRECT,
459 &ib_msg);
460 if (r)
461 goto err;
462
463 ib = &job->ibs[0];
464 /* let addr point to page boundary */
465 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
466
467 /* stitch together an VCE create msg */
468 ib->length_dw = 0;
469 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
470 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
471 ib->ptr[ib->length_dw++] = handle;
472
473 if ((ring->adev->vce.fw_version >> 24) >= 52)
474 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
475 else
476 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
477 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
478 ib->ptr[ib->length_dw++] = 0x00000000;
479 ib->ptr[ib->length_dw++] = 0x00000042;
480 ib->ptr[ib->length_dw++] = 0x0000000a;
481 ib->ptr[ib->length_dw++] = 0x00000001;
482 ib->ptr[ib->length_dw++] = 0x00000080;
483 ib->ptr[ib->length_dw++] = 0x00000060;
484 ib->ptr[ib->length_dw++] = 0x00000100;
485 ib->ptr[ib->length_dw++] = 0x00000100;
486 ib->ptr[ib->length_dw++] = 0x0000000c;
487 ib->ptr[ib->length_dw++] = 0x00000000;
488 if ((ring->adev->vce.fw_version >> 24) >= 52) {
489 ib->ptr[ib->length_dw++] = 0x00000000;
490 ib->ptr[ib->length_dw++] = 0x00000000;
491 ib->ptr[ib->length_dw++] = 0x00000000;
492 ib->ptr[ib->length_dw++] = 0x00000000;
493 }
494
495 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
496 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
497 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
498 ib->ptr[ib->length_dw++] = addr;
499 ib->ptr[ib->length_dw++] = 0x00000001;
500
501 for (i = ib->length_dw; i < ib_size_dw; ++i)
502 ib->ptr[i] = 0x0;
503
504 r = amdgpu_job_submit_direct(job, ring, &f);
505 amdgpu_ib_free(ring->adev, &ib_msg, f);
506 if (r)
507 goto err;
508
509 if (fence)
510 *fence = dma_fence_get(f);
511 dma_fence_put(f);
512 return 0;
513
514 err:
515 amdgpu_job_free(job);
516 return r;
517 }
518
519 /**
520 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
521 *
522 * @ring: ring we should submit the msg to
523 * @handle: VCE session handle to use
524 * @direct: direct or delayed pool
525 * @fence: optional fence to return
526 *
527 * Close up a stream for HW test or if userspace failed to do so
528 */
amdgpu_vce_get_destroy_msg(struct amdgpu_ring * ring,uint32_t handle,bool direct,struct dma_fence ** fence)529 static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
530 bool direct, struct dma_fence **fence)
531 {
532 const unsigned int ib_size_dw = 1024;
533 struct amdgpu_job *job;
534 struct amdgpu_ib *ib;
535 struct dma_fence *f = NULL;
536 int i, r;
537
538 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
539 AMDGPU_FENCE_OWNER_UNDEFINED,
540 ib_size_dw * 4,
541 direct ? AMDGPU_IB_POOL_DIRECT :
542 AMDGPU_IB_POOL_DELAYED, &job);
543 if (r)
544 return r;
545
546 ib = &job->ibs[0];
547
548 /* stitch together an VCE destroy msg */
549 ib->length_dw = 0;
550 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
551 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
552 ib->ptr[ib->length_dw++] = handle;
553
554 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
555 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
556 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
557 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
558 ib->ptr[ib->length_dw++] = 0x00000000;
559 ib->ptr[ib->length_dw++] = 0x00000000;
560 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
561 ib->ptr[ib->length_dw++] = 0x00000000;
562
563 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
564 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
565
566 for (i = ib->length_dw; i < ib_size_dw; ++i)
567 ib->ptr[i] = 0x0;
568
569 if (direct)
570 r = amdgpu_job_submit_direct(job, ring, &f);
571 else
572 f = amdgpu_job_submit(job);
573 if (r)
574 goto err;
575
576 if (fence)
577 *fence = dma_fence_get(f);
578 dma_fence_put(f);
579 return 0;
580
581 err:
582 amdgpu_job_free(job);
583 return r;
584 }
585
586 /**
587 * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
588 *
589 * @p: cs parser
590 * @ib: indirect buffer to use
591 * @lo: address of lower dword
592 * @hi: address of higher dword
593 * @size: minimum size
594 * @index: bs/fb index
595 *
596 * Make sure that no BO cross a 4GB boundary.
597 */
amdgpu_vce_validate_bo(struct amdgpu_cs_parser * p,struct amdgpu_ib * ib,int lo,int hi,unsigned int size,int32_t index)598 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
599 struct amdgpu_ib *ib, int lo, int hi,
600 unsigned int size, int32_t index)
601 {
602 int64_t offset = ((uint64_t)size) * ((int64_t)index);
603 struct ttm_operation_ctx ctx = { false, false };
604 struct amdgpu_bo_va_mapping *mapping;
605 unsigned int i, fpfn, lpfn;
606 struct amdgpu_bo *bo;
607 uint64_t addr;
608 int r;
609
610 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
611 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
612 if (index >= 0) {
613 addr += offset;
614 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
615 lpfn = 0x100000000ULL >> PAGE_SHIFT;
616 } else {
617 fpfn = 0;
618 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
619 }
620
621 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
622 if (r) {
623 DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
624 addr, lo, hi, size, index);
625 return r;
626 }
627
628 for (i = 0; i < bo->placement.num_placement; ++i) {
629 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
630 bo->placements[i].lpfn = bo->placements[i].lpfn ?
631 min(bo->placements[i].lpfn, lpfn) : lpfn;
632 }
633 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
634 }
635
636
637 /**
638 * amdgpu_vce_cs_reloc - command submission relocation
639 *
640 * @p: parser context
641 * @ib: indirect buffer to use
642 * @lo: address of lower dword
643 * @hi: address of higher dword
644 * @size: minimum size
645 * @index: bs/fb index
646 *
647 * Patch relocation inside command stream with real buffer address
648 */
amdgpu_vce_cs_reloc(struct amdgpu_cs_parser * p,struct amdgpu_ib * ib,int lo,int hi,unsigned int size,uint32_t index)649 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
650 int lo, int hi, unsigned int size, uint32_t index)
651 {
652 struct amdgpu_bo_va_mapping *mapping;
653 struct amdgpu_bo *bo;
654 uint64_t addr;
655 int r;
656
657 if (index == 0xffffffff)
658 index = 0;
659
660 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
661 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
662 addr += ((uint64_t)size) * ((uint64_t)index);
663
664 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
665 if (r) {
666 DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
667 addr, lo, hi, size, index);
668 return r;
669 }
670
671 if ((addr + (uint64_t)size) >
672 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
673 DRM_ERROR("BO too small for addr 0x%010llx %d %d\n",
674 addr, lo, hi);
675 return -EINVAL;
676 }
677
678 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
679 addr += amdgpu_bo_gpu_offset(bo);
680 addr -= ((uint64_t)size) * ((uint64_t)index);
681
682 amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
683 amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
684
685 return 0;
686 }
687
688 /**
689 * amdgpu_vce_validate_handle - validate stream handle
690 *
691 * @p: parser context
692 * @handle: handle to validate
693 * @allocated: allocated a new handle?
694 *
695 * Validates the handle and return the found session index or -EINVAL
696 * we don't have another free session index.
697 */
amdgpu_vce_validate_handle(struct amdgpu_cs_parser * p,uint32_t handle,uint32_t * allocated)698 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
699 uint32_t handle, uint32_t *allocated)
700 {
701 unsigned int i;
702
703 /* validate the handle */
704 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
705 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
706 if (p->adev->vce.filp[i] != p->filp) {
707 DRM_ERROR("VCE handle collision detected!\n");
708 return -EINVAL;
709 }
710 return i;
711 }
712 }
713
714 /* handle not found try to alloc a new one */
715 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
716 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
717 p->adev->vce.filp[i] = p->filp;
718 p->adev->vce.img_size[i] = 0;
719 *allocated |= 1 << i;
720 return i;
721 }
722 }
723
724 DRM_ERROR("No more free VCE handles!\n");
725 return -EINVAL;
726 }
727
728 /**
729 * amdgpu_vce_ring_parse_cs - parse and validate the command stream
730 *
731 * @p: parser context
732 * @job: the job to parse
733 * @ib: the IB to patch
734 */
amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)735 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
736 struct amdgpu_job *job,
737 struct amdgpu_ib *ib)
738 {
739 unsigned int fb_idx = 0, bs_idx = 0;
740 int session_idx = -1;
741 uint32_t destroyed = 0;
742 uint32_t created = 0;
743 uint32_t allocated = 0;
744 uint32_t tmp, handle = 0;
745 uint32_t *size = &tmp;
746 unsigned int idx;
747 int i, r = 0;
748
749 job->vm = NULL;
750 ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
751
752 for (idx = 0; idx < ib->length_dw;) {
753 uint32_t len = amdgpu_ib_get_value(ib, idx);
754 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
755
756 if ((len < 8) || (len & 3)) {
757 DRM_ERROR("invalid VCE command length (%d)!\n", len);
758 r = -EINVAL;
759 goto out;
760 }
761
762 switch (cmd) {
763 case 0x00000002: /* task info */
764 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
765 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
766 break;
767
768 case 0x03000001: /* encode */
769 r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
770 0, 0);
771 if (r)
772 goto out;
773
774 r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
775 0, 0);
776 if (r)
777 goto out;
778 break;
779
780 case 0x05000001: /* context buffer */
781 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
782 0, 0);
783 if (r)
784 goto out;
785 break;
786
787 case 0x05000004: /* video bitstream buffer */
788 tmp = amdgpu_ib_get_value(ib, idx + 4);
789 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
790 tmp, bs_idx);
791 if (r)
792 goto out;
793 break;
794
795 case 0x05000005: /* feedback buffer */
796 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
797 4096, fb_idx);
798 if (r)
799 goto out;
800 break;
801
802 case 0x0500000d: /* MV buffer */
803 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
804 0, 0);
805 if (r)
806 goto out;
807
808 r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
809 0, 0);
810 if (r)
811 goto out;
812 break;
813 }
814
815 idx += len / 4;
816 }
817
818 for (idx = 0; idx < ib->length_dw;) {
819 uint32_t len = amdgpu_ib_get_value(ib, idx);
820 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
821
822 switch (cmd) {
823 case 0x00000001: /* session */
824 handle = amdgpu_ib_get_value(ib, idx + 2);
825 session_idx = amdgpu_vce_validate_handle(p, handle,
826 &allocated);
827 if (session_idx < 0) {
828 r = session_idx;
829 goto out;
830 }
831 size = &p->adev->vce.img_size[session_idx];
832 break;
833
834 case 0x00000002: /* task info */
835 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
836 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
837 break;
838
839 case 0x01000001: /* create */
840 created |= 1 << session_idx;
841 if (destroyed & (1 << session_idx)) {
842 destroyed &= ~(1 << session_idx);
843 allocated |= 1 << session_idx;
844
845 } else if (!(allocated & (1 << session_idx))) {
846 DRM_ERROR("Handle already in use!\n");
847 r = -EINVAL;
848 goto out;
849 }
850
851 *size = amdgpu_ib_get_value(ib, idx + 8) *
852 amdgpu_ib_get_value(ib, idx + 10) *
853 8 * 3 / 2;
854 break;
855
856 case 0x04000001: /* config extension */
857 case 0x04000002: /* pic control */
858 case 0x04000005: /* rate control */
859 case 0x04000007: /* motion estimation */
860 case 0x04000008: /* rdo */
861 case 0x04000009: /* vui */
862 case 0x05000002: /* auxiliary buffer */
863 case 0x05000009: /* clock table */
864 break;
865
866 case 0x0500000c: /* hw config */
867 switch (p->adev->asic_type) {
868 #ifdef CONFIG_DRM_AMDGPU_CIK
869 case CHIP_KAVERI:
870 case CHIP_MULLINS:
871 #endif
872 case CHIP_CARRIZO:
873 break;
874 default:
875 r = -EINVAL;
876 goto out;
877 }
878 break;
879
880 case 0x03000001: /* encode */
881 r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
882 *size, 0);
883 if (r)
884 goto out;
885
886 r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
887 *size / 3, 0);
888 if (r)
889 goto out;
890 break;
891
892 case 0x02000001: /* destroy */
893 destroyed |= 1 << session_idx;
894 break;
895
896 case 0x05000001: /* context buffer */
897 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
898 *size * 2, 0);
899 if (r)
900 goto out;
901 break;
902
903 case 0x05000004: /* video bitstream buffer */
904 tmp = amdgpu_ib_get_value(ib, idx + 4);
905 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
906 tmp, bs_idx);
907 if (r)
908 goto out;
909 break;
910
911 case 0x05000005: /* feedback buffer */
912 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
913 4096, fb_idx);
914 if (r)
915 goto out;
916 break;
917
918 case 0x0500000d: /* MV buffer */
919 r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
920 idx + 2, *size, 0);
921 if (r)
922 goto out;
923
924 r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
925 idx + 7, *size / 12, 0);
926 if (r)
927 goto out;
928 break;
929
930 default:
931 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
932 r = -EINVAL;
933 goto out;
934 }
935
936 if (session_idx == -1) {
937 DRM_ERROR("no session command at start of IB\n");
938 r = -EINVAL;
939 goto out;
940 }
941
942 idx += len / 4;
943 }
944
945 if (allocated & ~created) {
946 DRM_ERROR("New session without create command!\n");
947 r = -ENOENT;
948 }
949
950 out:
951 if (!r) {
952 /* No error, free all destroyed handle slots */
953 tmp = destroyed;
954 } else {
955 /* Error during parsing, free all allocated handle slots */
956 tmp = allocated;
957 }
958
959 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
960 if (tmp & (1 << i))
961 atomic_set(&p->adev->vce.handles[i], 0);
962
963 return r;
964 }
965
966 /**
967 * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
968 *
969 * @p: parser context
970 * @job: the job to parse
971 * @ib: the IB to patch
972 */
amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser * p,struct amdgpu_job * job,struct amdgpu_ib * ib)973 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
974 struct amdgpu_job *job,
975 struct amdgpu_ib *ib)
976 {
977 int session_idx = -1;
978 uint32_t destroyed = 0;
979 uint32_t created = 0;
980 uint32_t allocated = 0;
981 uint32_t tmp, handle = 0;
982 int i, r = 0, idx = 0;
983
984 while (idx < ib->length_dw) {
985 uint32_t len = amdgpu_ib_get_value(ib, idx);
986 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
987
988 if ((len < 8) || (len & 3)) {
989 DRM_ERROR("invalid VCE command length (%d)!\n", len);
990 r = -EINVAL;
991 goto out;
992 }
993
994 switch (cmd) {
995 case 0x00000001: /* session */
996 handle = amdgpu_ib_get_value(ib, idx + 2);
997 session_idx = amdgpu_vce_validate_handle(p, handle,
998 &allocated);
999 if (session_idx < 0) {
1000 r = session_idx;
1001 goto out;
1002 }
1003 break;
1004
1005 case 0x01000001: /* create */
1006 created |= 1 << session_idx;
1007 if (destroyed & (1 << session_idx)) {
1008 destroyed &= ~(1 << session_idx);
1009 allocated |= 1 << session_idx;
1010
1011 } else if (!(allocated & (1 << session_idx))) {
1012 DRM_ERROR("Handle already in use!\n");
1013 r = -EINVAL;
1014 goto out;
1015 }
1016
1017 break;
1018
1019 case 0x02000001: /* destroy */
1020 destroyed |= 1 << session_idx;
1021 break;
1022
1023 default:
1024 break;
1025 }
1026
1027 if (session_idx == -1) {
1028 DRM_ERROR("no session command at start of IB\n");
1029 r = -EINVAL;
1030 goto out;
1031 }
1032
1033 idx += len / 4;
1034 }
1035
1036 if (allocated & ~created) {
1037 DRM_ERROR("New session without create command!\n");
1038 r = -ENOENT;
1039 }
1040
1041 out:
1042 if (!r) {
1043 /* No error, free all destroyed handle slots */
1044 tmp = destroyed;
1045 amdgpu_ib_free(p->adev, ib, NULL);
1046 } else {
1047 /* Error during parsing, free all allocated handle slots */
1048 tmp = allocated;
1049 }
1050
1051 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1052 if (tmp & (1 << i))
1053 atomic_set(&p->adev->vce.handles[i], 0);
1054
1055 return r;
1056 }
1057
1058 /**
1059 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1060 *
1061 * @ring: engine to use
1062 * @job: job to retrieve vmid from
1063 * @ib: the IB to execute
1064 * @flags: unused
1065 *
1066 */
amdgpu_vce_ring_emit_ib(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)1067 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1068 struct amdgpu_job *job,
1069 struct amdgpu_ib *ib,
1070 uint32_t flags)
1071 {
1072 amdgpu_ring_write(ring, VCE_CMD_IB);
1073 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1074 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1075 amdgpu_ring_write(ring, ib->length_dw);
1076 }
1077
1078 /**
1079 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1080 *
1081 * @ring: engine to use
1082 * @addr: address
1083 * @seq: sequence number
1084 * @flags: fence related flags
1085 *
1086 */
amdgpu_vce_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)1087 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1088 unsigned int flags)
1089 {
1090 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1091
1092 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1093 amdgpu_ring_write(ring, addr);
1094 amdgpu_ring_write(ring, upper_32_bits(addr));
1095 amdgpu_ring_write(ring, seq);
1096 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1097 amdgpu_ring_write(ring, VCE_CMD_END);
1098 }
1099
1100 /**
1101 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1102 *
1103 * @ring: the engine to test on
1104 *
1105 */
amdgpu_vce_ring_test_ring(struct amdgpu_ring * ring)1106 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1107 {
1108 struct amdgpu_device *adev = ring->adev;
1109 uint32_t rptr;
1110 unsigned int i;
1111 int r, timeout = adev->usec_timeout;
1112
1113 /* skip ring test for sriov*/
1114 if (amdgpu_sriov_vf(adev))
1115 return 0;
1116
1117 r = amdgpu_ring_alloc(ring, 16);
1118 if (r)
1119 return r;
1120
1121 rptr = amdgpu_ring_get_rptr(ring);
1122
1123 amdgpu_ring_write(ring, VCE_CMD_END);
1124 amdgpu_ring_commit(ring);
1125
1126 for (i = 0; i < timeout; i++) {
1127 if (amdgpu_ring_get_rptr(ring) != rptr)
1128 break;
1129 udelay(1);
1130 }
1131
1132 if (i >= timeout)
1133 r = -ETIMEDOUT;
1134
1135 return r;
1136 }
1137
1138 /**
1139 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1140 *
1141 * @ring: the engine to test on
1142 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1143 *
1144 */
amdgpu_vce_ring_test_ib(struct amdgpu_ring * ring,long timeout)1145 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1146 {
1147 struct dma_fence *fence = NULL;
1148 long r;
1149
1150 /* skip vce ring1/2 ib test for now, since it's not reliable */
1151 if (ring != &ring->adev->vce.ring[0])
1152 return 0;
1153
1154 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1155 if (r)
1156 goto error;
1157
1158 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1159 if (r)
1160 goto error;
1161
1162 r = dma_fence_wait_timeout(fence, false, timeout);
1163 if (r == 0)
1164 r = -ETIMEDOUT;
1165 else if (r > 0)
1166 r = 0;
1167
1168 error:
1169 dma_fence_put(fence);
1170 return r;
1171 }
1172
amdgpu_vce_get_ring_prio(int ring)1173 enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1174 {
1175 switch (ring) {
1176 case 0:
1177 return AMDGPU_RING_PRIO_0;
1178 case 1:
1179 return AMDGPU_RING_PRIO_1;
1180 case 2:
1181 return AMDGPU_RING_PRIO_2;
1182 default:
1183 return AMDGPU_RING_PRIO_0;
1184 }
1185 }
1186