Lines Matching refs:sdma

417 	for (i = 0; i < adev->sdma.num_instances; i++) {  in sdma_v4_0_destroy_inst_ctx()
418 if (adev->sdma.instance[i].fw != NULL) in sdma_v4_0_destroy_inst_ctx()
419 release_firmware(adev->sdma.instance[i].fw); in sdma_v4_0_destroy_inst_ctx()
427 memset((void*)adev->sdma.instance, 0, in sdma_v4_0_destroy_inst_ctx()
483 err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev); in sdma_v4_0_init_microcode()
487 err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]); in sdma_v4_0_init_microcode()
491 for (i = 1; i < adev->sdma.num_instances; i++) { in sdma_v4_0_init_microcode()
495 memcpy((void*)&adev->sdma.instance[i], in sdma_v4_0_init_microcode()
496 (void*)&adev->sdma.instance[0], in sdma_v4_0_init_microcode()
502 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); in sdma_v4_0_init_microcode()
506 err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]); in sdma_v4_0_init_microcode()
516 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_init_microcode()
519 info->fw = adev->sdma.instance[i].fw; in sdma_v4_0_init_microcode()
674 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_insert_nop() local
678 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_insert_nop()
808 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_0_gfx_stop() local
812 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_gfx_stop()
813 sdma[i] = &adev->sdma.instance[i].ring; in sdma_v4_0_gfx_stop()
815 if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { in sdma_v4_0_gfx_stop()
827 sdma[i]->sched.ready = false; in sdma_v4_0_gfx_stop()
852 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_0_page_stop() local
857 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_page_stop()
858 sdma[i] = &adev->sdma.instance[i].page; in sdma_v4_0_page_stop()
860 if ((adev->mman.buffer_funcs_ring == sdma[i]) && in sdma_v4_0_page_stop()
875 sdma[i]->sched.ready = false; in sdma_v4_0_page_stop()
916 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_ctx_switch_enable()
946 if (adev->sdma.has_page_queue) in sdma_v4_0_enable()
950 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_enable()
985 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; in sdma_v4_0_gfx_resume()
1075 struct amdgpu_ring *ring = &adev->sdma.instance[i].page; in sdma_v4_0_page_resume()
1252 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_load_microcode()
1253 if (!adev->sdma.instance[i].fw) in sdma_v4_0_load_microcode()
1256 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v4_0_load_microcode()
1261 (adev->sdma.instance[i].fw->data + in sdma_v4_0_load_microcode()
1271 adev->sdma.instance[i].fw_version); in sdma_v4_0_load_microcode()
1308 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_start()
1313 if (adev->sdma.has_page_queue) in sdma_v4_0_start()
1338 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_start()
1339 ring = &adev->sdma.instance[i].ring; in sdma_v4_0_start()
1345 if (adev->sdma.has_page_queue) { in sdma_v4_0_start()
1346 struct amdgpu_ring *page = &adev->sdma.instance[i].page; in sdma_v4_0_start()
1578 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_pad_ib() local
1584 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_pad_ib()
1646 uint fw_version = adev->sdma.instance[0].fw_version; in sdma_v4_0_fw_support_paging_queue()
1667 adev->sdma.num_instances = 1; in sdma_v4_0_early_init()
1669 adev->sdma.num_instances = 8; in sdma_v4_0_early_init()
1671 adev->sdma.num_instances = 2; in sdma_v4_0_early_init()
1681 adev->sdma.has_page_queue = false; in sdma_v4_0_early_init()
1683 adev->sdma.has_page_queue = true; in sdma_v4_0_early_init()
1700 struct ras_common_if **ras_if = &adev->sdma.ras_if; in sdma_v4_0_late_init()
1771 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_late_init()
1772 r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, in sdma_v4_0_late_init()
1799 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1802 &adev->sdma.trap_irq); in sdma_v4_0_sw_init()
1808 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1811 &adev->sdma.ecc_irq); in sdma_v4_0_sw_init()
1816 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1817 ring = &adev->sdma.instance[i].ring; in sdma_v4_0_sw_init()
1828 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, in sdma_v4_0_sw_init()
1833 if (adev->sdma.has_page_queue) { in sdma_v4_0_sw_init()
1834 ring = &adev->sdma.instance[i].page; in sdma_v4_0_sw_init()
1846 &adev->sdma.trap_irq, in sdma_v4_0_sw_init()
1862 adev->sdma.ras_if) { in sdma_v4_0_sw_fini()
1863 struct ras_common_if *ras_if = adev->sdma.ras_if; in sdma_v4_0_sw_fini()
1877 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_fini()
1878 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v4_0_sw_fini()
1879 if (adev->sdma.has_page_queue) in sdma_v4_0_sw_fini()
1880 amdgpu_ring_fini(&adev->sdma.instance[i].page); in sdma_v4_0_sw_fini()
1914 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_hw_fini()
1915 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, in sdma_v4_0_hw_fini()
1949 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_is_idle()
1962 u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_0_wait_for_idle() local
1966 for (j = 0; j < adev->sdma.num_instances; j++) { in sdma_v4_0_wait_for_idle()
1967 sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG); in sdma_v4_0_wait_for_idle()
1968 if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK)) in sdma_v4_0_wait_for_idle()
1971 if (j == adev->sdma.num_instances) in sdma_v4_0_wait_for_idle()
2010 amdgpu_fence_process(&adev->sdma.instance[instance].ring); in sdma_v4_0_process_trap_irq()
2014 amdgpu_fence_process(&adev->sdma.instance[instance].page); in sdma_v4_0_process_trap_irq()
2021 amdgpu_fence_process(&adev->sdma.instance[instance].page); in sdma_v4_0_process_trap_irq()
2060 struct ras_common_if *ras_if = adev->sdma.ras_if; in sdma_v4_0_process_ecc_irq()
2088 drm_sched_fault(&adev->sdma.instance[instance].ring.sched); in sdma_v4_0_process_illegal_inst_irq()
2117 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_clock_gating()
2131 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_clock_gating()
2156 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_light_sleep()
2164 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_light_sleep()
2390 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_set_ring_funcs()
2392 adev->sdma.instance[i].ring.funcs = in sdma_v4_0_set_ring_funcs()
2395 adev->sdma.instance[i].ring.funcs = in sdma_v4_0_set_ring_funcs()
2397 adev->sdma.instance[i].ring.me = i; in sdma_v4_0_set_ring_funcs()
2398 if (adev->sdma.has_page_queue) { in sdma_v4_0_set_ring_funcs()
2400 adev->sdma.instance[i].page.funcs = in sdma_v4_0_set_ring_funcs()
2403 adev->sdma.instance[i].page.funcs = in sdma_v4_0_set_ring_funcs()
2405 adev->sdma.instance[i].page.me = i; in sdma_v4_0_set_ring_funcs()
2428 switch (adev->sdma.num_instances) { in sdma_v4_0_set_irq_funcs()
2430 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; in sdma_v4_0_set_irq_funcs()
2431 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; in sdma_v4_0_set_irq_funcs()
2434 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; in sdma_v4_0_set_irq_funcs()
2435 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST; in sdma_v4_0_set_irq_funcs()
2439 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; in sdma_v4_0_set_irq_funcs()
2440 adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; in sdma_v4_0_set_irq_funcs()
2443 adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs; in sdma_v4_0_set_irq_funcs()
2444 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs; in sdma_v4_0_set_irq_funcs()
2445 adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs; in sdma_v4_0_set_irq_funcs()
2510 if (adev->sdma.has_page_queue) in sdma_v4_0_set_buffer_funcs()
2511 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; in sdma_v4_0_set_buffer_funcs()
2513 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v4_0_set_buffer_funcs()
2530 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_set_vm_pte_funcs()
2531 if (adev->sdma.has_page_queue) in sdma_v4_0_set_vm_pte_funcs()
2532 sched = &adev->sdma.instance[i].page.sched; in sdma_v4_0_set_vm_pte_funcs()
2534 sched = &adev->sdma.instance[i].ring.sched; in sdma_v4_0_set_vm_pte_funcs()
2538 adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; in sdma_v4_0_set_vm_pte_funcs()