Lines Matching refs:uvd

131 	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);  in amdgpu_uvd_sw_init()
188 r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); in amdgpu_uvd_sw_init()
195 r = amdgpu_ucode_validate(adev->uvd.fw); in amdgpu_uvd_sw_init()
199 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_init()
200 adev->uvd.fw = NULL; in amdgpu_uvd_sw_init()
205 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES; in amdgpu_uvd_sw_init()
207 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_sw_init()
226 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
228 adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | in amdgpu_uvd_sw_init()
233 (adev->uvd.fw_version < FW_1_66_16)) in amdgpu_uvd_sw_init()
245 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES; in amdgpu_uvd_sw_init()
247 adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_uvd_sw_init()
251 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; in amdgpu_uvd_sw_init()
255 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in amdgpu_uvd_sw_init()
256 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_init()
259 AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_init()
260 &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_init()
267 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_sw_init()
268 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_sw_init()
269 adev->uvd.filp[i] = NULL; in amdgpu_uvd_sw_init()
274 adev->uvd.address_64_bit = true; in amdgpu_uvd_sw_init()
278 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; in amdgpu_uvd_sw_init()
281 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; in amdgpu_uvd_sw_init()
284 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; in amdgpu_uvd_sw_init()
287 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; in amdgpu_uvd_sw_init()
290 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; in amdgpu_uvd_sw_init()
300 drm_sched_entity_destroy(&adev->uvd.entity); in amdgpu_uvd_sw_fini()
302 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_sw_fini()
303 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_sw_fini()
305 kvfree(adev->uvd.inst[j].saved_bo); in amdgpu_uvd_sw_fini()
307 amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, in amdgpu_uvd_sw_fini()
308 &adev->uvd.inst[j].gpu_addr, in amdgpu_uvd_sw_fini()
309 (void **)&adev->uvd.inst[j].cpu_addr); in amdgpu_uvd_sw_fini()
311 amdgpu_ring_fini(&adev->uvd.inst[j].ring); in amdgpu_uvd_sw_fini()
314 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in amdgpu_uvd_sw_fini()
316 release_firmware(adev->uvd.fw); in amdgpu_uvd_sw_fini()
333 ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_entity_init()
335 r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); in amdgpu_uvd_entity_init()
350 cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_suspend()
354 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_suspend()
355 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_suspend()
358 if (i == adev->uvd.max_handles) in amdgpu_uvd_suspend()
362 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in amdgpu_uvd_suspend()
363 if (adev->uvd.harvest_config & (1 << j)) in amdgpu_uvd_suspend()
365 if (adev->uvd.inst[j].vcpu_bo == NULL) in amdgpu_uvd_suspend()
368 size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); in amdgpu_uvd_suspend()
369 ptr = adev->uvd.inst[j].cpu_addr; in amdgpu_uvd_suspend()
371 adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_uvd_suspend()
372 if (!adev->uvd.inst[j].saved_bo) in amdgpu_uvd_suspend()
375 memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); in amdgpu_uvd_suspend()
386 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in amdgpu_uvd_resume()
387 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_resume()
389 if (adev->uvd.inst[i].vcpu_bo == NULL) in amdgpu_uvd_resume()
392 size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); in amdgpu_uvd_resume()
393 ptr = adev->uvd.inst[i].cpu_addr; in amdgpu_uvd_resume()
395 if (adev->uvd.inst[i].saved_bo != NULL) { in amdgpu_uvd_resume()
396 memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); in amdgpu_uvd_resume()
397 kvfree(adev->uvd.inst[i].saved_bo); in amdgpu_uvd_resume()
398 adev->uvd.inst[i].saved_bo = NULL; in amdgpu_uvd_resume()
403 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in amdgpu_uvd_resume()
406 memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, in amdgpu_uvd_resume()
413 amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); in amdgpu_uvd_resume()
421 struct amdgpu_ring *ring = &adev->uvd.inst[0].ring; in amdgpu_uvd_free_handles()
424 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_free_handles()
425 uint32_t handle = atomic_read(&adev->uvd.handles[i]); in amdgpu_uvd_free_handles()
427 if (handle != 0 && adev->uvd.filp[i] == filp) { in amdgpu_uvd_free_handles()
440 adev->uvd.filp[i] = NULL; in amdgpu_uvd_free_handles()
441 atomic_set(&adev->uvd.handles[i], 0); in amdgpu_uvd_free_handles()
490 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass1()
648 if (!adev->uvd.use_ctx_buf){ in amdgpu_uvd_cs_msg_decode()
696 adev->uvd.decode_image_width = width; in amdgpu_uvd_cs_msg_decode()
746 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
747 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
753 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { in amdgpu_uvd_cs_msg()
754 adev->uvd.filp[i] = ctx->parser->filp; in amdgpu_uvd_cs_msg()
770 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_cs_msg()
771 if (atomic_read(&adev->uvd.handles[i]) == handle) { in amdgpu_uvd_cs_msg()
772 if (adev->uvd.filp[i] != ctx->parser->filp) { in amdgpu_uvd_cs_msg()
785 for (i = 0; i < adev->uvd.max_handles; ++i) in amdgpu_uvd_cs_msg()
786 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); in amdgpu_uvd_cs_msg()
854 if (!ctx->parser->adev->uvd.address_64_bit) { in amdgpu_uvd_cs_pass2()
862 (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { in amdgpu_uvd_cs_pass2()
999 if (!parser->adev->uvd.address_64_bit) { in amdgpu_uvd_ring_parse_cs()
1036 if (!ring->adev->uvd.address_64_bit) { in amdgpu_uvd_send_msg()
1093 r = amdgpu_job_submit(job, &adev->uvd.entity, in amdgpu_uvd_send_msg()
1181 container_of(work, struct amdgpu_device, uvd.idle_work.work); in amdgpu_uvd_idle_work_handler()
1184 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in amdgpu_uvd_idle_work_handler()
1185 if (adev->uvd.harvest_config & (1 << i)) in amdgpu_uvd_idle_work_handler()
1187 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); in amdgpu_uvd_idle_work_handler()
1188 for (j = 0; j < adev->uvd.num_enc_rings; ++j) { in amdgpu_uvd_idle_work_handler()
1189 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); in amdgpu_uvd_idle_work_handler()
1205 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_idle_work_handler()
1217 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); in amdgpu_uvd_ring_begin_use()
1234 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); in amdgpu_uvd_ring_end_use()
1281 for (i = 0; i < adev->uvd.max_handles; ++i) { in amdgpu_uvd_used_handles()
1287 if (atomic_read(&adev->uvd.handles[i])) in amdgpu_uvd_used_handles()