Lines Matching refs:vcn
82 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); in amdgpu_vcn_sw_init()
83 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init()
84 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init()
85 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init()
86 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init()
87 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init()
102 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
112 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
118 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
124 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
130 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
136 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
142 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
148 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
157 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
163 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
169 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
175 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); in amdgpu_vcn_sw_init()
182 r = amdgpu_ucode_validate(adev->vcn.fw); in amdgpu_vcn_sw_init()
186 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_init()
187 adev->vcn.fw = NULL; in amdgpu_vcn_sw_init()
191 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_sw_init()
192 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vcn_sw_init()
226 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_sw_init()
227 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_sw_init()
231 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, in amdgpu_vcn_sw_init()
232 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); in amdgpu_vcn_sw_init()
238 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + in amdgpu_vcn_sw_init()
240 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + in amdgpu_vcn_sw_init()
243 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_init()
245 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, in amdgpu_vcn_sw_init()
246 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); in amdgpu_vcn_sw_init()
261 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_sw_fini()
262 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_sw_fini()
265 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_fini()
266 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, in amdgpu_vcn_sw_fini()
267 &adev->vcn.inst[j].dpg_sram_gpu_addr, in amdgpu_vcn_sw_fini()
268 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); in amdgpu_vcn_sw_fini()
270 kvfree(adev->vcn.inst[j].saved_bo); in amdgpu_vcn_sw_fini()
272 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, in amdgpu_vcn_sw_fini()
273 &adev->vcn.inst[j].gpu_addr, in amdgpu_vcn_sw_fini()
274 (void **)&adev->vcn.inst[j].cpu_addr); in amdgpu_vcn_sw_fini()
276 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_sw_fini()
278 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_sw_fini()
279 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_sw_fini()
282 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_fini()
283 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_fini()
284 mutex_destroy(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_fini()
318 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_suspend()
320 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_suspend()
321 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_suspend()
323 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_suspend()
326 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_suspend()
327 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_suspend()
329 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_vcn_suspend()
330 if (!adev->vcn.inst[i].saved_bo) in amdgpu_vcn_suspend()
334 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); in amdgpu_vcn_suspend()
347 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_resume()
348 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_resume()
350 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_resume()
353 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_resume()
354 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_resume()
356 if (adev->vcn.inst[i].saved_bo != NULL) { in amdgpu_vcn_resume()
358 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); in amdgpu_vcn_resume()
361 kvfree(adev->vcn.inst[i].saved_bo); in amdgpu_vcn_resume()
362 adev->vcn.inst[i].saved_bo = NULL; in amdgpu_vcn_resume()
367 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_resume()
371 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, in amdgpu_vcn_resume()
387 container_of(work, struct amdgpu_device, vcn.idle_work.work); in amdgpu_vcn_idle_work_handler()
392 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_idle_work_handler()
393 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_idle_work_handler()
396 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in amdgpu_vcn_idle_work_handler()
397 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_idle_work_handler()
404 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) in amdgpu_vcn_idle_work_handler()
409 adev->vcn.pause_dpg_mode(adev, j, &new_state); in amdgpu_vcn_idle_work_handler()
412 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_idle_work_handler()
416 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler()
424 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_idle_work_handler()
433 atomic_inc(&adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_begin_use()
435 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { in amdgpu_vcn_ring_begin_use()
442 mutex_lock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
450 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_begin_use()
456 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_ring_begin_use()
457 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
459 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
465 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); in amdgpu_vcn_ring_begin_use()
467 mutex_unlock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
474 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_end_use()
476 atomic_dec(&ring->adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_end_use()
478 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_ring_end_use()
492 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); in amdgpu_vcn_dec_ring_test_ring()
496 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); in amdgpu_vcn_dec_ring_test_ring()
500 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); in amdgpu_vcn_dec_ring_test_ring()
563 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); in amdgpu_vcn_dec_send_msg()
565 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); in amdgpu_vcn_dec_send_msg()
567 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); in amdgpu_vcn_dec_send_msg()
570 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); in amdgpu_vcn_dec_send_msg()