Lines Matching refs:uvd
145 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); in amdgpu_uvd_sw_init()
216 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
223 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
227 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
228 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
233 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; in amdgpu_uvd_sw_init()
235 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
254 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
256 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
261 (adev->uvd.fw_version < FW_1_66_16)) in amdgpu_uvd_sw_init()
273 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
275 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_uvd_sw_init()
279 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; in amdgpu_uvd_sw_init()
283 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in amdgpu_uvd_sw_init()
284 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_init()
287 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_init()
288 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_init()
295 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_sw_init()
296 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
297 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
302 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
306 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; in amdgpu_uvd_sw_init()
309 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; in amdgpu_uvd_sw_init()
312 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; in amdgpu_uvd_sw_init()
315 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; in amdgpu_uvd_sw_init()
318 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; in amdgpu_uvd_sw_init()
328 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_sw_fini()
329 drm_sched_entity_destroy(&adev->uvd.entity); in amdgpu_uvd_sw_fini()
331 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_sw_fini()
332 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_fini()
334 kvfree(adev->uvd.inst[j].saved_bo); in amdgpu_uvd_sw_fini()
336 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_fini()
337 &adev->uvd.inst[j].gpu_addr, in amdgpu_uvd_sw_fini()
338 (void **)&adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_fini()
340 amdgpu_ring_fini(&adev->uvd.inst[j].ring); in amdgpu_uvd_sw_fini()
343 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in amdgpu_uvd_sw_fini()
345 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
362 ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_entity_init()
364 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_uvd_entity_init()
381 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_suspend()
385 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_suspend()
386 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_suspend()
389 if (i == adev->uvd.max_handles) in amdgpu_uvd_suspend()
393 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_suspend()
394 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_suspend()
396 if (adev->uvd.inst[j].vcpu_bo == NULL) in amdgpu_uvd_suspend()
399 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); in amdgpu_uvd_suspend()
400 ptr = adev->uvd.inst[j].cpu_addr; in amdgpu_uvd_suspend()
402 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_uvd_suspend()
403 if (!adev->uvd.inst[j].saved_bo) in amdgpu_uvd_suspend()
408 memset(adev->uvd.inst[j].saved_bo, 0, size); in amdgpu_uvd_suspend()
410 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); in amdgpu_uvd_suspend()
425 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in amdgpu_uvd_resume()
426 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_resume()
428 if (adev->uvd.inst[i].vcpu_bo == NULL) in amdgpu_uvd_resume()
431 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); in amdgpu_uvd_resume()
432 ptr = adev->uvd.inst[i].cpu_addr; in amdgpu_uvd_resume()
434 if (adev->uvd.inst[i].saved_bo != NULL) { in amdgpu_uvd_resume()
435 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); in amdgpu_uvd_resume()
436 kvfree(adev->uvd.inst[i].saved_bo); in amdgpu_uvd_resume()
437 adev->uvd.inst[i].saved_bo = NULL; in amdgpu_uvd_resume()
442 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
445 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, in amdgpu_uvd_resume()
452 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); in amdgpu_uvd_resume()
460 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_free_handles()
463 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_free_handles()
464 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
466 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
479 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
480 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
529 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
687 if (!adev->uvd.use_ctx_buf){ in amdgpu_uvd_cs_msg_decode()
735 adev->uvd.decode_image_width = width; in amdgpu_uvd_cs_msg_decode()
785 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
786 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
792 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
793 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
809 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
810 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
811 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
824 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_cs_msg()
825 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
893 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
901 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
1038 if (!parser->adev->uvd.address_64_bit) { in amdgpu_uvd_ring_parse_cs()
1075 if (!ring->adev->uvd.address_64_bit) { in amdgpu_uvd_send_msg()
1134 r = amdgpu_job_submit(job, &adev->uvd.entity, in amdgpu_uvd_send_msg()
1222 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1225 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in amdgpu_uvd_idle_work_handler()
1226 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_idle_work_handler()
1228 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); in amdgpu_uvd_idle_work_handler()
1229 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { in amdgpu_uvd_idle_work_handler()
1230 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); in amdgpu_uvd_idle_work_handler()
1246 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_idle_work_handler()
1258 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_ring_begin_use()
1275 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_ring_end_use()
1322 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_used_handles()
1328 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()