Lines Matching full:sdma

555 	for (i = 0; i < adev->sdma.num_instances; i++) {  in sdma_v4_0_setup_ulv()
619 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_init_microcode()
627 for every SDMA instance */ in sdma_v4_0_init_microcode()
780 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_insert_nop() local
784 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_insert_nop()
913 * @enable: enable SDMA RB/IB
923 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_gfx_enable()
959 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_page_stop()
1008 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_ctx_switch_enable()
1020 * Enable SDMA utilization. Its only supported on in sdma_v4_0_ctx_switch_enable()
1025 adev->sdma.instance[i].fw_version >= 14) in sdma_v4_0_ctx_switch_enable()
1049 if (adev->sdma.has_page_queue) in sdma_v4_0_enable()
1053 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_enable()
1088 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; in sdma_v4_0_gfx_resume()
1175 struct amdgpu_ring *ring = &adev->sdma.instance[i].page; in sdma_v4_0_page_resume()
1333 * sdma_v4_0_load_microcode - load the sDMA ME ucode
1350 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_load_microcode()
1351 if (!adev->sdma.instance[i].fw) in sdma_v4_0_load_microcode()
1354 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; in sdma_v4_0_load_microcode()
1359 (adev->sdma.instance[i].fw->data + in sdma_v4_0_load_microcode()
1369 adev->sdma.instance[i].fw_version); in sdma_v4_0_load_microcode()
1401 /* enable sdma ring preemption */ in sdma_v4_0_start()
1406 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_start()
1411 if (adev->sdma.has_page_queue) in sdma_v4_0_start()
1436 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_start()
1437 ring = &adev->sdma.instance[i].ring; in sdma_v4_0_start()
1443 if (adev->sdma.has_page_queue) { in sdma_v4_0_start()
1444 struct amdgpu_ring *page = &adev->sdma.instance[i].page; in sdma_v4_0_start()
1591 * Update PTEs by copying them from the GART using sDMA (VEGA10).
1619 * Update PTEs by writing them manually using sDMA (VEGA10).
1640 * sdma_v4_0_vm_set_pte_pde - update the page tables using sDMA
1649 * Update the page tables using sDMA (VEGA10).
1677 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); in sdma_v4_0_ring_pad_ib() local
1683 if (sdma && sdma->burst_nop && (i == 0)) in sdma_v4_0_ring_pad_ib()
1714 * sdma_v4_0_ring_emit_vm_flush - vm flush using sDMA
1721 * using sDMA (VEGA10).
1746 uint fw_version = adev->sdma.instance[0].fw_version; in sdma_v4_0_fw_support_paging_queue()
1768 DRM_ERROR("Failed to load sdma firmware!\n"); in sdma_v4_0_early_init()
1775 adev->sdma.has_page_queue = false; in sdma_v4_0_early_init()
1777 adev->sdma.has_page_queue = true; in sdma_v4_0_early_init()
1799 if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && in sdma_v4_0_late_init()
1800 adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count) in sdma_v4_0_late_init()
1801 adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev); in sdma_v4_0_late_init()
1813 /* SDMA trap event */ in sdma_v4_0_sw_init()
1814 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1817 &adev->sdma.trap_irq); in sdma_v4_0_sw_init()
1822 /* SDMA SRAM ECC event */ in sdma_v4_0_sw_init()
1823 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1826 &adev->sdma.ecc_irq); in sdma_v4_0_sw_init()
1831 /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/ in sdma_v4_0_sw_init()
1832 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1835 &adev->sdma.vm_hole_irq); in sdma_v4_0_sw_init()
1841 &adev->sdma.doorbell_invalid_irq); in sdma_v4_0_sw_init()
1847 &adev->sdma.pool_timeout_irq); in sdma_v4_0_sw_init()
1853 &adev->sdma.srbm_write_irq); in sdma_v4_0_sw_init()
1858 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_init()
1859 ring = &adev->sdma.instance[i].ring; in sdma_v4_0_sw_init()
1863 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i, in sdma_v4_0_sw_init()
1869 sprintf(ring->name, "sdma%d", i); in sdma_v4_0_sw_init()
1870 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, in sdma_v4_0_sw_init()
1876 if (adev->sdma.has_page_queue) { in sdma_v4_0_sw_init()
1877 ring = &adev->sdma.instance[i].page; in sdma_v4_0_sw_init()
1889 &adev->sdma.trap_irq, in sdma_v4_0_sw_init()
1905 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_sw_fini()
1906 amdgpu_ring_fini(&adev->sdma.instance[i].ring); in sdma_v4_0_sw_fini()
1907 if (adev->sdma.has_page_queue) in sdma_v4_0_sw_fini()
1908 amdgpu_ring_fini(&adev->sdma.instance[i].page); in sdma_v4_0_sw_fini()
1939 /* disable the scheduler for SDMA */ in sdma_v4_0_hw_fini()
1944 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_hw_fini()
1945 amdgpu_irq_put(adev, &adev->sdma.ecc_irq, in sdma_v4_0_hw_fini()
1962 /* SMU saves SDMA state for us */ in sdma_v4_0_suspend()
1975 /* SMU restores SDMA state for us */ in sdma_v4_0_resume()
1991 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_is_idle()
2004 u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; in sdma_v4_0_wait_for_idle() local
2008 for (j = 0; j < adev->sdma.num_instances; j++) { in sdma_v4_0_wait_for_idle()
2009 sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG); in sdma_v4_0_wait_for_idle()
2010 if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK)) in sdma_v4_0_wait_for_idle()
2013 if (j == adev->sdma.num_instances) in sdma_v4_0_wait_for_idle()
2048 DRM_DEBUG("IH: SDMA trap\n"); in sdma_v4_0_process_trap_irq()
2052 amdgpu_fence_process(&adev->sdma.instance[instance].ring); in sdma_v4_0_process_trap_irq()
2056 amdgpu_fence_process(&adev->sdma.instance[instance].page); in sdma_v4_0_process_trap_irq()
2063 amdgpu_fence_process(&adev->sdma.instance[instance].page); in sdma_v4_0_process_trap_irq()
2098 DRM_ERROR("Illegal instruction in SDMA command stream\n"); in sdma_v4_0_process_illegal_inst_irq()
2106 drm_sched_fault(&adev->sdma.instance[instance].ring.sched); in sdma_v4_0_process_illegal_inst_irq()
2135 if (instance < 0 || instance >= adev->sdma.num_instances) { in sdma_v4_0_print_iv_entry()
2136 dev_err(adev->dev, "sdma instance invalid %d\n", instance); in sdma_v4_0_print_iv_entry()
2147 "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u " in sdma_v4_0_print_iv_entry()
2168 dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); in sdma_v4_0_process_doorbell_invalid_irq()
2188 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); in sdma_v4_0_process_srbm_write_irq()
2201 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_clock_gating()
2215 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_clock_gating()
2240 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_light_sleep()
2241 /* 1-not override: enable sdma mem light sleep */ in sdma_v4_0_update_medium_grain_light_sleep()
2248 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_update_medium_grain_light_sleep()
2249 /* 0-override:disable sdma mem light sleep */ in sdma_v4_0_update_medium_grain_light_sleep()
2363 * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1).
2469 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_set_ring_funcs()
2471 adev->sdma.instance[i].ring.funcs = in sdma_v4_0_set_ring_funcs()
2474 adev->sdma.instance[i].ring.funcs = in sdma_v4_0_set_ring_funcs()
2476 adev->sdma.instance[i].ring.me = i; in sdma_v4_0_set_ring_funcs()
2477 if (adev->sdma.has_page_queue) { in sdma_v4_0_set_ring_funcs()
2479 adev->sdma.instance[i].page.funcs = in sdma_v4_0_set_ring_funcs()
2482 adev->sdma.instance[i].page.funcs = in sdma_v4_0_set_ring_funcs()
2484 adev->sdma.instance[i].page.me = i; in sdma_v4_0_set_ring_funcs()
2521 adev->sdma.trap_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2522 adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2524 switch (adev->sdma.num_instances) { in sdma_v4_0_set_irq_funcs()
2527 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2528 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2529 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2530 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; in sdma_v4_0_set_irq_funcs()
2535 adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs; in sdma_v4_0_set_irq_funcs()
2536 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs; in sdma_v4_0_set_irq_funcs()
2537 adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs; in sdma_v4_0_set_irq_funcs()
2538 adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs; in sdma_v4_0_set_irq_funcs()
2539 adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs; in sdma_v4_0_set_irq_funcs()
2540 adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs; in sdma_v4_0_set_irq_funcs()
2541 adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs; in sdma_v4_0_set_irq_funcs()
2545 * sdma_v4_0_emit_copy_buffer - copy buffer using the sDMA engine
2575 * sdma_v4_0_emit_fill_buffer - fill buffer using the sDMA engine
2609 if (adev->sdma.has_page_queue) in sdma_v4_0_set_buffer_funcs()
2610 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; in sdma_v4_0_set_buffer_funcs()
2612 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; in sdma_v4_0_set_buffer_funcs()
2629 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_set_vm_pte_funcs()
2630 if (adev->sdma.has_page_queue) in sdma_v4_0_set_vm_pte_funcs()
2631 sched = &adev->sdma.instance[i].page.sched; in sdma_v4_0_set_vm_pte_funcs()
2633 sched = &adev->sdma.instance[i].ring.sched; in sdma_v4_0_set_vm_pte_funcs()
2636 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; in sdma_v4_0_set_vm_pte_funcs()
2648 /* the SDMA_EDC_COUNTER register in each sdma instance in sdma_v4_0_get_ras_error_count()
2655 DRM_INFO("Detected %s in SDMA%d, SED %d\n", in sdma_v4_0_get_ras_error_count()
2689 for (i = 0; i < adev->sdma.num_instances; i++) { in sdma_v4_0_query_ras_error_count()
2691 dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i); in sdma_v4_0_query_ras_error_count()
2703 for (i = 0; i < adev->sdma.num_instances; i++) in sdma_v4_0_reset_ras_error_count()
2725 adev->sdma.ras = &sdma_v4_0_ras; in sdma_v4_0_set_ras_funcs()
2728 adev->sdma.ras = &sdma_v4_4_ras; in sdma_v4_0_set_ras_funcs()
2734 if (adev->sdma.ras) { in sdma_v4_0_set_ras_funcs()
2735 amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block); in sdma_v4_0_set_ras_funcs()
2737 strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma"); in sdma_v4_0_set_ras_funcs()
2738 adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA; in sdma_v4_0_set_ras_funcs()
2739 adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; in sdma_v4_0_set_ras_funcs()
2740 adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm; in sdma_v4_0_set_ras_funcs()
2743 if (!adev->sdma.ras->ras_block.ras_late_init) in sdma_v4_0_set_ras_funcs()
2744 adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init; in sdma_v4_0_set_ras_funcs()
2747 if (!adev->sdma.ras->ras_block.ras_cb) in sdma_v4_0_set_ras_funcs()
2748 adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb; in sdma_v4_0_set_ras_funcs()