Lines Matching refs:uvd

89 	if (ring == &adev->uvd.inst[ring->me].ring_enc[0])  in uvd_v7_0_enc_ring_get_rptr()
123 if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) in uvd_v7_0_enc_ring_get_wptr()
161 if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) in uvd_v7_0_enc_ring_set_wptr()
384 adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; in uvd_v7_0_early_init()
385 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in uvd_v7_0_early_init()
388 adev->uvd.harvest_config |= 1 << i; in uvd_v7_0_early_init()
391 if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 | in uvd_v7_0_early_init()
396 adev->uvd.num_uvd_inst = 1; in uvd_v7_0_early_init()
400 adev->uvd.num_enc_rings = 1; in uvd_v7_0_early_init()
402 adev->uvd.num_enc_rings = 2; in uvd_v7_0_early_init()
417 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in uvd_v7_0_sw_init()
418 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_sw_init()
421 … amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq); in uvd_v7_0_sw_init()
426 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_sw_init()
427 …id(adev, amdgpu_ih_clientid_uvds[j], i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, &adev->uvd.inst[j].irq); in uvd_v7_0_sw_init()
439 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; in uvd_v7_0_sw_init()
441 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw; in uvd_v7_0_sw_init()
451 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in uvd_v7_0_sw_init()
452 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_sw_init()
455 ring = &adev->uvd.inst[j].ring; in uvd_v7_0_sw_init()
457 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); in uvd_v7_0_sw_init()
462 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_sw_init()
463 ring = &adev->uvd.inst[j].ring_enc[i]; in uvd_v7_0_sw_init()
476 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); in uvd_v7_0_sw_init()
504 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in uvd_v7_0_sw_fini()
505 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_sw_fini()
507 for (i = 0; i < adev->uvd.num_enc_rings; ++i) in uvd_v7_0_sw_fini()
508 amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); in uvd_v7_0_sw_fini()
534 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { in uvd_v7_0_hw_init()
535 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_hw_init()
537 ring = &adev->uvd.inst[j].ring; in uvd_v7_0_hw_init()
580 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_hw_init()
581 ring = &adev->uvd.inst[j].ring_enc[i]; in uvd_v7_0_hw_init()
616 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_hw_fini()
617 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_hw_fini()
619 adev->uvd.inst[i].ring.ready = false; in uvd_v7_0_hw_fini()
662 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_mc_resume()
663 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_mc_resume()
673 lower_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_mc_resume()
675 upper_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_mc_resume()
684 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_mc_resume()
686 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_mc_resume()
691 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_mc_resume()
693 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_mc_resume()
705 WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); in uvd_v7_0_mc_resume()
736 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_mmsch_start()
737 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_mmsch_start()
739 WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); in uvd_v7_0_mmsch_start()
740 adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; in uvd_v7_0_mmsch_start()
741 adev->uvd.inst[i].ring_enc[0].wptr = 0; in uvd_v7_0_mmsch_start()
742 adev->uvd.inst[i].ring_enc[0].wptr_old = 0; in uvd_v7_0_mmsch_start()
794 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_sriov_start()
795 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_sriov_start()
797 ring = &adev->uvd.inst[i].ring; in uvd_v7_0_sriov_start()
799 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); in uvd_v7_0_sriov_start()
812 lower_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_sriov_start()
814 upper_32_bits(adev->uvd.inst[i].gpu_addr)); in uvd_v7_0_sriov_start()
823 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_sriov_start()
825 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); in uvd_v7_0_sriov_start()
830 lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_sriov_start()
832 upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); in uvd_v7_0_sriov_start()
837 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); in uvd_v7_0_sriov_start()
896 ring = &adev->uvd.inst[i].ring_enc[0]; in uvd_v7_0_sriov_start()
935 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { in uvd_v7_0_start()
936 if (adev->uvd.harvest_config & (1 << k)) in uvd_v7_0_start()
949 for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { in uvd_v7_0_start()
950 if (adev->uvd.harvest_config & (1 << k)) in uvd_v7_0_start()
952 ring = &adev->uvd.inst[k].ring; in uvd_v7_0_start()
1090 ring = &adev->uvd.inst[k].ring_enc[0]; in uvd_v7_0_start()
1097 ring = &adev->uvd.inst[k].ring_enc[1]; in uvd_v7_0_start()
1118 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { in uvd_v7_0_stop()
1119 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_stop()
1465 adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset;
1468 adev->uvd.inst[ring->me].srbm_soft_reset = 0;
1477 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1489 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1491 srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset;
1519 if (!adev->uvd.inst[ring->me].srbm_soft_reset)
1559 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring); in uvd_v7_0_process_interrupt()
1562 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]); in uvd_v7_0_process_interrupt()
1566 amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]); in uvd_v7_0_process_interrupt()
1836 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in uvd_v7_0_set_ring_funcs()
1837 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_set_ring_funcs()
1839 adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; in uvd_v7_0_set_ring_funcs()
1840 adev->uvd.inst[i].ring.me = i; in uvd_v7_0_set_ring_funcs()
1849 for (j = 0; j < adev->uvd.num_uvd_inst; j++) { in uvd_v7_0_set_enc_ring_funcs()
1850 if (adev->uvd.harvest_config & (1 << j)) in uvd_v7_0_set_enc_ring_funcs()
1852 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { in uvd_v7_0_set_enc_ring_funcs()
1853 adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; in uvd_v7_0_set_enc_ring_funcs()
1854 adev->uvd.inst[j].ring_enc[i].me = j; in uvd_v7_0_set_enc_ring_funcs()
1870 for (i = 0; i < adev->uvd.num_uvd_inst; i++) { in uvd_v7_0_set_irq_funcs()
1871 if (adev->uvd.harvest_config & (1 << i)) in uvd_v7_0_set_irq_funcs()
1873 adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; in uvd_v7_0_set_irq_funcs()
1874 adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; in uvd_v7_0_set_irq_funcs()