Lines Matching refs:uvd

155 	if (adev->uvd.address_64_bit)  in amdgpu_uvd_create_msg_bo_helper()
192 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); in amdgpu_uvd_sw_init()
263 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
270 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
274 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
275 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
280 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; in amdgpu_uvd_sw_init()
282 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
301 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
303 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
308 (adev->uvd.fw_version < FW_1_66_16)) in amdgpu_uvd_sw_init()
320 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
322 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_uvd_sw_init()
326 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; in amdgpu_uvd_sw_init()
330 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in amdgpu_uvd_sw_init()
331 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_init()
334 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_init()
335 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_init()
342 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_sw_init()
343 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
344 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
349 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
351 r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo); in amdgpu_uvd_sw_init()
357 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; in amdgpu_uvd_sw_init()
360 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; in amdgpu_uvd_sw_init()
363 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; in amdgpu_uvd_sw_init()
366 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; in amdgpu_uvd_sw_init()
369 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; in amdgpu_uvd_sw_init()
377 void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo); in amdgpu_uvd_sw_fini()
380 drm_sched_entity_destroy(&adev->uvd.entity); in amdgpu_uvd_sw_fini()
382 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_sw_fini()
383 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_fini()
385 kvfree(adev->uvd.inst[j].saved_bo); in amdgpu_uvd_sw_fini()
387 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_fini()
388 &adev->uvd.inst[j].gpu_addr, in amdgpu_uvd_sw_fini()
389 (void **)&adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_fini()
391 amdgpu_ring_fini(&adev->uvd.inst[j].ring); in amdgpu_uvd_sw_fini()
394 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in amdgpu_uvd_sw_fini()
396 amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr); in amdgpu_uvd_sw_fini()
397 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
414 ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_entity_init()
416 r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_uvd_entity_init()
433 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_suspend()
437 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_suspend()
438 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_suspend()
441 if (i == adev->uvd.max_handles) in amdgpu_uvd_suspend()
445 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_suspend()
446 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_suspend()
448 if (adev->uvd.inst[j].vcpu_bo == NULL) in amdgpu_uvd_suspend()
451 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); in amdgpu_uvd_suspend()
452 ptr = adev->uvd.inst[j].cpu_addr; in amdgpu_uvd_suspend()
454 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_uvd_suspend()
455 if (!adev->uvd.inst[j].saved_bo) in amdgpu_uvd_suspend()
461 memset(adev->uvd.inst[j].saved_bo, 0, size); in amdgpu_uvd_suspend()
463 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); in amdgpu_uvd_suspend()
481 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in amdgpu_uvd_resume()
482 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_resume()
484 if (adev->uvd.inst[i].vcpu_bo == NULL) in amdgpu_uvd_resume()
487 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); in amdgpu_uvd_resume()
488 ptr = adev->uvd.inst[i].cpu_addr; in amdgpu_uvd_resume()
490 if (adev->uvd.inst[i].saved_bo != NULL) { in amdgpu_uvd_resume()
492 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); in amdgpu_uvd_resume()
495 kvfree(adev->uvd.inst[i].saved_bo); in amdgpu_uvd_resume()
496 adev->uvd.inst[i].saved_bo = NULL; in amdgpu_uvd_resume()
501 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
505 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, in amdgpu_uvd_resume()
514 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); in amdgpu_uvd_resume()
522 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_free_handles()
525 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_free_handles()
526 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
528 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
541 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
542 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
591 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
750 if (!adev->uvd.use_ctx_buf){ in amdgpu_uvd_cs_msg_decode()
798 adev->uvd.decode_image_width = width; in amdgpu_uvd_cs_msg_decode()
849 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
850 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
856 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
857 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
873 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
874 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
875 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
888 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_cs_msg()
889 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
955 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
963 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
1101 if (!parser->adev->uvd.address_64_bit) { in amdgpu_uvd_ring_parse_cs()
1184 r = amdgpu_job_submit(job, &adev->uvd.entity, in amdgpu_uvd_send_msg()
1212 struct amdgpu_bo *bo = adev->uvd.ib_bo; in amdgpu_uvd_get_create_msg()
1245 bo = adev->uvd.ib_bo; in amdgpu_uvd_get_destroy_msg()
1272 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1275 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in amdgpu_uvd_idle_work_handler()
1276 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_idle_work_handler()
1278 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); in amdgpu_uvd_idle_work_handler()
1279 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { in amdgpu_uvd_idle_work_handler()
1280 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); in amdgpu_uvd_idle_work_handler()
1296 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_idle_work_handler()
1308 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_ring_begin_use()
1325 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_ring_end_use()
1380 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_used_handles()
1386 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()