Lines Matching refs:vcn
94 r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name); in amdgpu_vcn_early_init()
96 amdgpu_ucode_release(&adev->vcn.fw); in amdgpu_vcn_early_init()
109 INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); in amdgpu_vcn_sw_init()
110 mutex_init(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_init()
111 mutex_init(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_init()
112 atomic_set(&adev->vcn.total_submission_cnt, 0); in amdgpu_vcn_sw_init()
113 for (i = 0; i < adev->vcn.num_vcn_inst; i++) in amdgpu_vcn_sw_init()
114 atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); in amdgpu_vcn_sw_init()
118 adev->vcn.indirect_sram = true; in amdgpu_vcn_sw_init()
132 adev->vcn.indirect_sram = false; in amdgpu_vcn_sw_init()
138 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_sw_init()
139 adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); in amdgpu_vcn_sw_init()
185 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_sw_init()
186 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_sw_init()
192 &adev->vcn.inst[i].vcpu_bo, in amdgpu_vcn_sw_init()
193 &adev->vcn.inst[i].gpu_addr, in amdgpu_vcn_sw_init()
194 &adev->vcn.inst[i].cpu_addr); in amdgpu_vcn_sw_init()
200 adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr + in amdgpu_vcn_sw_init()
202 adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr + in amdgpu_vcn_sw_init()
205 adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size; in amdgpu_vcn_sw_init()
208 adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE; in amdgpu_vcn_sw_init()
209 adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE; in amdgpu_vcn_sw_init()
210 adev->vcn.inst[i].fw_shared.log_offset = log_offset; in amdgpu_vcn_sw_init()
213 if (adev->vcn.indirect_sram) { in amdgpu_vcn_sw_init()
217 &adev->vcn.inst[i].dpg_sram_bo, in amdgpu_vcn_sw_init()
218 &adev->vcn.inst[i].dpg_sram_gpu_addr, in amdgpu_vcn_sw_init()
219 &adev->vcn.inst[i].dpg_sram_cpu_addr); in amdgpu_vcn_sw_init()
234 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_sw_fini()
235 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_sw_fini()
239 &adev->vcn.inst[j].dpg_sram_bo, in amdgpu_vcn_sw_fini()
240 &adev->vcn.inst[j].dpg_sram_gpu_addr, in amdgpu_vcn_sw_fini()
241 (void **)&adev->vcn.inst[j].dpg_sram_cpu_addr); in amdgpu_vcn_sw_fini()
243 kvfree(adev->vcn.inst[j].saved_bo); in amdgpu_vcn_sw_fini()
245 amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, in amdgpu_vcn_sw_fini()
246 &adev->vcn.inst[j].gpu_addr, in amdgpu_vcn_sw_fini()
247 (void **)&adev->vcn.inst[j].cpu_addr); in amdgpu_vcn_sw_fini()
249 amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_sw_fini()
251 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_sw_fini()
252 amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_sw_fini()
255 amdgpu_ucode_release(&adev->vcn.fw); in amdgpu_vcn_sw_fini()
256 mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround); in amdgpu_vcn_sw_fini()
257 mutex_destroy(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_sw_fini()
277 int vcn_config = adev->vcn.vcn_config[vcn_instance]; in amdgpu_vcn_is_disabled_vcn()
295 cancel_delayed_work_sync(&adev->vcn.idle_work); in amdgpu_vcn_suspend()
297 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_suspend()
298 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_suspend()
300 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_suspend()
303 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_suspend()
304 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_suspend()
306 adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); in amdgpu_vcn_suspend()
307 if (!adev->vcn.inst[i].saved_bo) in amdgpu_vcn_suspend()
311 memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); in amdgpu_vcn_suspend()
324 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { in amdgpu_vcn_resume()
325 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_resume()
327 if (adev->vcn.inst[i].vcpu_bo == NULL) in amdgpu_vcn_resume()
330 size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); in amdgpu_vcn_resume()
331 ptr = adev->vcn.inst[i].cpu_addr; in amdgpu_vcn_resume()
333 if (adev->vcn.inst[i].saved_bo != NULL) { in amdgpu_vcn_resume()
335 memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); in amdgpu_vcn_resume()
338 kvfree(adev->vcn.inst[i].saved_bo); in amdgpu_vcn_resume()
339 adev->vcn.inst[i].saved_bo = NULL; in amdgpu_vcn_resume()
344 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_resume()
348 memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, in amdgpu_vcn_resume()
364 container_of(work, struct amdgpu_device, vcn.idle_work.work); in amdgpu_vcn_idle_work_handler()
369 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { in amdgpu_vcn_idle_work_handler()
370 if (adev->vcn.harvest_config & (1 << j)) in amdgpu_vcn_idle_work_handler()
373 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_idle_work_handler()
374 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); in amdgpu_vcn_idle_work_handler()
380 unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt))) in amdgpu_vcn_idle_work_handler()
385 adev->vcn.pause_dpg_mode(adev, j, &new_state); in amdgpu_vcn_idle_work_handler()
388 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); in amdgpu_vcn_idle_work_handler()
392 if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { in amdgpu_vcn_idle_work_handler()
400 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_idle_work_handler()
409 atomic_inc(&adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_begin_use()
411 if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { in amdgpu_vcn_ring_begin_use()
418 mutex_lock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
426 atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_begin_use()
432 for (i = 0; i < adev->vcn.num_enc_rings; ++i) in amdgpu_vcn_ring_begin_use()
433 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); in amdgpu_vcn_ring_begin_use()
435 if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
441 adev->vcn.pause_dpg_mode(adev, ring->me, &new_state); in amdgpu_vcn_ring_begin_use()
443 mutex_unlock(&adev->vcn.vcn_pg_lock); in amdgpu_vcn_ring_begin_use()
450 atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt); in amdgpu_vcn_ring_end_use()
452 atomic_dec(&ring->adev->vcn.total_submission_cnt); in amdgpu_vcn_ring_end_use()
454 schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT); in amdgpu_vcn_ring_end_use()
468 WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); in amdgpu_vcn_dec_ring_test_ring()
472 amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); in amdgpu_vcn_dec_ring_test_ring()
476 tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); in amdgpu_vcn_dec_ring_test_ring()
537 ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); in amdgpu_vcn_dec_send_msg()
539 ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); in amdgpu_vcn_dec_send_msg()
541 ib->ptr[4] = PACKET0(adev->vcn.internal.cmd, 0); in amdgpu_vcn_dec_send_msg()
544 ib->ptr[i] = PACKET0(adev->vcn.internal.nop, 0); in amdgpu_vcn_dec_send_msg()
1033 hdr = (const struct common_firmware_header *)adev->vcn.fw->data; in amdgpu_vcn_setup_ucode()
1035 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_setup_ucode()
1036 if (adev->vcn.harvest_config & (1 << i)) in amdgpu_vcn_setup_ucode()
1045 adev->firmware.ucode[idx].fw = adev->vcn.fw; in amdgpu_vcn_setup_ucode()
1063 struct amdgpu_vcn_inst *vcn; in amdgpu_debugfs_vcn_fwlog_read() local
1069 vcn = file_inode(f)->i_private; in amdgpu_debugfs_vcn_fwlog_read()
1070 if (!vcn) in amdgpu_debugfs_vcn_fwlog_read()
1073 if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log) in amdgpu_debugfs_vcn_fwlog_read()
1076 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; in amdgpu_debugfs_vcn_fwlog_read()
1128 struct amdgpu_vcn_inst *vcn) in amdgpu_debugfs_vcn_fwlog_init() argument
1136 debugfs_create_file_size(name, S_IFREG | 0444, root, vcn, in amdgpu_debugfs_vcn_fwlog_init()
1142 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) in amdgpu_vcn_fwlog_init() argument
1145 volatile uint32_t *flag = vcn->fw_shared.cpu_addr; in amdgpu_vcn_fwlog_init()
1146 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; in amdgpu_vcn_fwlog_init()
1147 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; in amdgpu_vcn_fwlog_init()
1149 volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr in amdgpu_vcn_fwlog_init()
1150 + vcn->fw_shared.log_offset; in amdgpu_vcn_fwlog_init()
1169 struct ras_common_if *ras_if = adev->vcn.ras_if; in amdgpu_vcn_process_poison_irq()
1200 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { in amdgpu_vcn_ras_late_init()
1201 if (adev->vcn.harvest_config & (1 << i) || in amdgpu_vcn_ras_late_init()
1202 !adev->vcn.inst[i].ras_poison_irq.funcs) in amdgpu_vcn_ras_late_init()
1205 r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0); in amdgpu_vcn_ras_late_init()
1222 if (!adev->vcn.ras) in amdgpu_vcn_ras_sw_init()
1225 ras = adev->vcn.ras; in amdgpu_vcn_ras_sw_init()
1235 adev->vcn.ras_if = &ras->ras_block.ras_comm; in amdgpu_vcn_ras_sw_init()
1250 .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr, in amdgpu_vcn_psp_update_sram()
1251 .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr - in amdgpu_vcn_psp_update_sram()
1252 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr), in amdgpu_vcn_psp_update_sram()