Lines Matching refs:vcn

71 	INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);  in amdgpu_vcn_sw_init()
72 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init()
73 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init()
74 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init()
75 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init()
76 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init()
91 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
101 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
107 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
113 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
119 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
125 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
131 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
137 r = request_firmware(&adev->vcn.fw, fw_name, adev->dev); in amdgpu_vcn_sw_init()
144 r = amdgpu_ucode_validate(adev->vcn.fw); in amdgpu_vcn_sw_init()
148 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_init()
149 adev->vcn.fw = NULL; in amdgpu_vcn_sw_init()
153 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_sw_init()
154 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vcn_sw_init()
188 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_sw_init()
189 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_sw_init()
193 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, in amdgpu_vcn_sw_init()
194 &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); in amdgpu_vcn_sw_init()
200 adev->vcn.inst[i].fw_shared_cpu_addr = adev->vcn.inst[i].cpu_addr + in amdgpu_vcn_sw_init()
202 adev->vcn.inst[i].fw_shared_gpu_addr = adev->vcn.inst[i].gpu_addr + in amdgpu_vcn_sw_init()
205 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_init()
207 AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, in amdgpu_vcn_sw_init()
208 &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); in amdgpu_vcn_sw_init()
223 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_sw_fini()
225 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_sw_fini()
226 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_sw_fini()
229 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_fini()
230 amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo, in amdgpu_vcn_sw_fini()
231 &adev->vcn.inst[j].dpg_sram_gpu_addr, in amdgpu_vcn_sw_fini()
232 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); in amdgpu_vcn_sw_fini()
234 kvfree(adev->vcn.inst[j].saved_bo); in amdgpu_vcn_sw_fini()
236 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, in amdgpu_vcn_sw_fini()
237 &adev->vcn.inst[j].gpu_addr, in amdgpu_vcn_sw_fini()
238 (void **)&adev->vcn.inst[j].cpu_addr); in amdgpu_vcn_sw_fini()
240 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_sw_fini()
242 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_sw_fini()
243 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_sw_fini()
246 release_firmware(adev->vcn.fw); in amdgpu_vcn_sw_fini()
247 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_fini()
248 mutex_destroy(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_fini()
259 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_suspend()
261 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_suspend()
262 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_suspend()
264 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_suspend()
267 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_suspend()
268 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_suspend()
270 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_vcn_suspend()
271 if (!adev->vcn.inst[i].saved_bo) in amdgpu_vcn_suspend()
274 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); in amdgpu_vcn_suspend()
285 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_resume()
286 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_resume()
288 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_resume()
291 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_resume()
292 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_resume()
294 if (adev->vcn.inst[i].saved_bo != NULL) { in amdgpu_vcn_resume()
295 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); in amdgpu_vcn_resume()
296 kvfree(adev->vcn.inst[i].saved_bo); in amdgpu_vcn_resume()
297 adev->vcn.inst[i].saved_bo = NULL; in amdgpu_vcn_resume()
302 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_resume()
305 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, in amdgpu_vcn_resume()
319 container_of(work, struct amdgpu_device, vcn.idle_work.work); in amdgpu_vcn_idle_work_handler()
323 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_idle_work_handler()
324 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_idle_work_handler()
327 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { in amdgpu_vcn_idle_work_handler()
328 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_idle_work_handler()
335 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) in amdgpu_vcn_idle_work_handler()
340 adev->vcn.pause_dpg_mode(adev, j, &new_state); in amdgpu_vcn_idle_work_handler()
343 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_idle_work_handler()
347 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler()
351 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_idle_work_handler()
359 atomic_inc(&adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_begin_use()
360 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_ring_begin_use()
362 mutex_lock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
370 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_begin_use()
376 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_ring_begin_use()
377 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
379 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
385 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); in amdgpu_vcn_ring_begin_use()
387 mutex_unlock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
394 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_end_use()
396 atomic_dec(&ring->adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_end_use()
398 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_ring_end_use()
412 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); in amdgpu_vcn_dec_ring_test_ring()
416 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); in amdgpu_vcn_dec_ring_test_ring()
420 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); in amdgpu_vcn_dec_ring_test_ring()
450 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); in amdgpu_vcn_dec_send_msg()
452 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); in amdgpu_vcn_dec_send_msg()
454 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); in amdgpu_vcn_dec_send_msg()
457 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); in amdgpu_vcn_dec_send_msg()