Lines Matching +full:0 +full:xd4000000

52 #define GAUDI2_TPC_FULL_MASK			0x1FFFFFF
53 #define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF
54 #define GAUDI2_DECODER_FULL_MASK 0x3FF
80 #define GAUDI2_ARB_WDT_TIMEOUT (0x1000000)
101 #define PCIE_DEC_EN_MASK 0x300
102 #define DEC_WORK_STATE_IDLE 0
111 #define GAUDI2_HBM_MMU_SCRM_MOD_SHIFT 0
116 #define MMU_RANGE_INV_EN_SHIFT 0
123 #define GAUDI2_PMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 2, 0)
124 #define GAUDI2_HMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 1, 0)
142 [HBM_ID0] = 0xFFFC,
143 [HBM_ID1] = 0xFFCF,
144 [HBM_ID2] = 0xF7F7,
145 [HBM_ID3] = 0x7F7F,
146 [HBM_ID4] = 0xFCFF,
147 [HBM_ID5] = 0xCFFF,
151 [0] = HBM_ID0,
510 "FENCE 0 inc over max value and clipped",
514 "FENCE 0 dec under min value and clipped",
531 "FENCE 0 inc over max value and clipped",
535 "FENCE 0 dec under min value and clipped",
681 {"calculated SO value overflow/underflow", "SOB group ID", 0x7FF},
682 {"payload address of monitor is not aligned to 4B", "monitor addr", 0xFFFF},
683 {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id", 0xFFFF},
1487 "gaudi2 vdec 0_0", "gaudi2 vdec 0_0 abnormal",
1488 "gaudi2 vdec 0_1", "gaudi2 vdec 0_1 abnormal",
1524 RTR_ID_X_Y(0, 0),/* 24 no id */
1525 RTR_ID_X_Y(0, 0),/* 25 no id */
1526 RTR_ID_X_Y(0, 0),/* 26 no id */
1527 RTR_ID_X_Y(0, 0),/* 27 no id */
1593 {0, 0}, {1, 0}, {0, 1}, {1, 1}, {1, 2}, {1, 3}, {0, 2}, {0, 3},
1617 MME_WAP0 = 0,
1722 static s64 gaudi2_state_dump_specs_props[SP_MAX] = {0};
1756 ctx->rc = 0; in gaudi2_iterate_tpcs()
1758 for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) { in gaudi2_iterate_tpcs()
1759 for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) { in gaudi2_iterate_tpcs()
1781 ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx); in gaudi2_iterate_tpcs()
1803 return 0; in set_number_of_functional_hbms()
1814 "HBM binning supports max of %d faulty HBMs, supplied mask 0x%llx.\n", in set_number_of_functional_hbms()
1824 return 0; in set_number_of_functional_hbms()
1879 return 0; in gaudi2_set_dram_properties()
1886 u32 num_sync_stream_queues = 0; in gaudi2_set_fixed_properties()
1898 for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) { in gaudi2_set_fixed_properties()
1900 q_props[i].driver_only = 0; in gaudi2_set_fixed_properties()
1903 q_props[i].supports_sync_stream = 0; in gaudi2_set_fixed_properties()
1937 prop->mmu_pgt_size = 0x800000; /* 8MB */ in gaudi2_set_fixed_properties()
2048 prop->first_available_user_sob[0] = GAUDI2_RESERVED_SOB_NUMBER + in gaudi2_set_fixed_properties()
2051 prop->first_available_user_mon[0] = GAUDI2_RESERVED_MON_NUMBER + in gaudi2_set_fixed_properties()
2056 prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER; in gaudi2_set_fixed_properties()
2071 return 0; in gaudi2_set_fixed_properties()
2086 return 0; in gaudi2_pci_bars_map()
2126 return 0; in gaudi2_init_iatu()
2128 /* Temporary inbound Region 0 - Bar 0 - Point to CFG in gaudi2_init_iatu()
2136 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region); in gaudi2_init_iatu()
2142 bar_addr_low = RREG32(mmPCIE_DBI_BAR0_REG + STM_FLASH_ALIGNED_OFF) & ~0xF; in gaudi2_init_iatu()
2146 /* Inbound Region 0 - Bar 0 - Point to CFG */ in gaudi2_init_iatu()
2149 inbound_region.offset_in_bar = 0; in gaudi2_init_iatu()
2152 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region); in gaudi2_init_iatu()
2156 /* Inbound Region 1 - Bar 0 - Point to BAR0_RESERVED + SRAM */ in gaudi2_init_iatu()
2174 /* Outbound Region 0 - Point to Host */ in gaudi2_init_iatu()
2196 dev_err(hdev->dev, "TPC binning is supported for max of %d faulty TPCs, provided mask 0x%llx\n", in gaudi2_tpc_binning_init_prop()
2205 return 0; in gaudi2_tpc_binning_init_prop()
2213 u8 subst_idx = 0; in gaudi2_set_tpc_binning_masks()
2222 for (i = 0 ; i < MAX_FAULTY_TPCS ; i++) { in gaudi2_set_tpc_binning_masks()
2225 if (tpc_binning_mask == 0) in gaudi2_set_tpc_binning_masks()
2228 if (subst_idx == 0) { in gaudi2_set_tpc_binning_masks()
2263 return 0; in gaudi2_set_tpc_binning_masks()
2278 …r(hdev->dev, "decoder binning is supported for max of single faulty decoder, provided mask 0x%x\n", in gaudi2_set_dec_binning_masks()
2290 return 0; in gaudi2_set_dec_binning_masks()
2299 prop->dram_binning_mask = 0; in gaudi2_set_dram_binning_masks()
2324 "EDMA binning is supported for max of single faulty EDMA, provided mask 0x%x\n", in gaudi2_set_edma_binning_masks()
2330 prop->edma_binning_mask = 0; in gaudi2_set_edma_binning_masks()
2332 return 0; in gaudi2_set_edma_binning_masks()
2349 return 0; in gaudi2_set_edma_binning_masks()
2360 return 0; in gaudi2_set_xbar_edge_enable_mask()
2364 * note that it can be set to value other than 0 only after cpucp packet (i.e. in gaudi2_set_xbar_edge_enable_mask()
2365 * only the FW can set a redundancy value). for user it'll always be 0. in gaudi2_set_xbar_edge_enable_mask()
2385 return 0; in gaudi2_set_xbar_edge_enable_mask()
2397 hdev->asic_prop.faulty_dram_cluster_map = 0; in gaudi2_set_cluster_binning_masks_common()
2413 return 0; in gaudi2_set_cluster_binning_masks_common()
2432 return 0; in gaudi2_set_cluster_binning_masks()
2444 return 0; in gaudi2_cpucp_info_get()
2450 return 0; in gaudi2_cpucp_info_get()
2503 if (max_power < 0) in gaudi2_cpucp_info_get()
2508 return 0; in gaudi2_cpucp_info_get()
2518 return 0; in gaudi2_fetch_psoc_frequency()
2526 return 0; in gaudi2_fetch_psoc_frequency()
2588 return 0; in gaudi2_early_init()
2602 return 0; in gaudi2_early_fini()
2666 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
2678 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
2688 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
2694 reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 0); in gaudi2_scrub_arc_dccm()
2698 rc = gaudi2_send_job_to_kdma(hdev, 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], in gaudi2_scrub_arc_dccm()
2704 return 0; in gaudi2_scrub_arc_dccm()
2743 return 0; in gaudi2_late_init()
2746 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0); in gaudi2_late_init()
2779 for (i = 0 ; i < NUM_ARC_CPUS ; i++) { in gaudi2_user_mapped_blocks_init()
2815 for (i = 0 ; i < num_umr_blocks ; i++) { in gaudi2_user_mapped_blocks_init()
2848 int i, j, rc = 0; in gaudi2_alloc_cpu_accessible_dma_mem()
2855 for (i = 0 ; i < GAUDI2_ALLOC_CPU_MEM_RETRY_CNT ; i++) { in gaudi2_alloc_cpu_accessible_dma_mem()
2879 for (j = 0 ; j < i ; j++) in gaudi2_alloc_cpu_accessible_dma_mem()
2913 region->offset_in_bar = 0; in gaudi2_set_pci_memory_regions()
2941 for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM; in gaudi2_user_interrupt_setup()
2945 for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++) in gaudi2_user_interrupt_setup()
2967 for (i = 0 ; i < ARRAY_SIZE(gaudi2_irq_map_table) ; i++) { in gaudi2_sw_init()
2981 for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) in gaudi2_sw_init()
2993 GAUDI2_DMA_POOL_BLK_SIZE, DEVICE_CACHE_LINE_SIZE, 0); in gaudi2_sw_init()
3051 return 0; in gaudi2_sw_init()
3086 return 0; in gaudi2_sw_fini()
3118 * counters to 0 (standard clear of fence counters)
3137 val = skip_fence ? U32_MAX : 0; in gaudi2_clear_qm_fence_counters_common()
3166 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_stop_dma_qmans()
3167 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_stop_dma_qmans()
3190 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_stop_mme_qmans()
3207 for (i = 0 ; i < TPC_ID_SIZE ; i++) { in gaudi2_stop_tpc_qmans()
3225 for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { in gaudi2_stop_rot_qmans()
3245 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_stop_nic_qmans()
3258 reg_val = FIELD_PREP(PDMA0_CORE_CFG_1_HALT_MASK, 0x1); in gaudi2_stall_dma_common()
3277 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_dma_stall()
3278 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_dma_stall()
3301 for (i = 0 ; i < NUM_OF_DCORES ; i++) in gaudi2_mme_stall()
3315 for (i = 0 ; i < TPC_ID_SIZE ; i++) { in gaudi2_tpc_stall()
3333 reg_val = FIELD_PREP(ROT_MSS_HALT_WBC_MASK, 0x1) | in gaudi2_rotator_stall()
3334 FIELD_PREP(ROT_MSS_HALT_RSB_MASK, 0x1) | in gaudi2_rotator_stall()
3335 FIELD_PREP(ROT_MSS_HALT_MRSB_MASK, 0x1); in gaudi2_rotator_stall()
3337 for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { in gaudi2_rotator_stall()
3347 WREG32(reg_base + QM_GLBL_CFG0_OFFSET, 0); in gaudi2_disable_qman_common()
3365 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_disable_dma_qmans()
3366 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_disable_dma_qmans()
3389 for (i = 0 ; i < NUM_OF_DCORES ; i++) in gaudi2_disable_mme_qmans()
3403 for (i = 0 ; i < TPC_ID_SIZE ; i++) { in gaudi2_disable_tpc_qmans()
3421 for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { in gaudi2_disable_rot_qmans()
3441 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_disable_nic_qmans()
3453 WREG32(mmPSOC_TIMESTAMP_BASE, 0); in gaudi2_enable_timestamp()
3456 WREG32(mmPSOC_TIMESTAMP_BASE + 0xC, 0); in gaudi2_enable_timestamp()
3457 WREG32(mmPSOC_TIMESTAMP_BASE + 0x8, 0); in gaudi2_enable_timestamp()
3466 WREG32(mmPSOC_TIMESTAMP_BASE, 0); in gaudi2_disable_timestamp()
3512 for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0; in gaudi2_dec_enable_msix()
3529 rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i), in gaudi2_dec_enable_msix()
3539 return 0; in gaudi2_dec_enable_msix()
3555 return 0; in gaudi2_enable_msix()
3559 if (rc < 0) { in gaudi2_enable_msix()
3567 rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_COMPLETION), cq); in gaudi2_enable_msix()
3574 rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi2_irq_name(GAUDI2_IRQ_NUM_EVENT_QUEUE), in gaudi2_enable_msix()
3587 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0; in gaudi2_enable_msix()
3594 rc = request_irq(irq, irq_handler, 0, gaudi2_irq_name(i), &hdev->user_interrupt[j]); in gaudi2_enable_msix()
3603 return 0; in gaudi2_enable_msix()
3646 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count; in gaudi2_sync_irqs()
3672 for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0; in gaudi2_disable_msix()
3690 u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1); in gaudi2_stop_dcore_dec()
3700 for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { in gaudi2_stop_dcore_dec()
3707 WREG32(mmDCORE0_DEC0_CMD_SWREG16 + offset, 0); in gaudi2_stop_dcore_dec()
3730 u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1); in gaudi2_stop_pcie_dec()
3740 for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { in gaudi2_stop_pcie_dec()
3747 WREG32(mmPCIE_DEC0_CMD_SWREG16 + offset, 0); in gaudi2_stop_pcie_dec()
3773 if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == 0) in gaudi2_stop_dec()
3776 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) in gaudi2_stop_dec()
3825 val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 0); in gaudi2_verify_arc_running_mode()
3856 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_nic_qmans_manual_flush()
3870 for (i = 0 ; i < num_cores ; i++) { in gaudi2_set_engine_cores()
3875 for (i = 0 ; i < num_cores ; i++) { in gaudi2_set_engine_cores()
3888 return 0; in gaudi2_set_engine_cores()
3994 return 0; in gaudi2_init_cpu()
3997 return 0; in gaudi2_init_cpu()
4005 return 0; in gaudi2_init_cpu()
4019 return 0; in gaudi2_init_cpu_queues()
4022 return 0; in gaudi2_init_cpu_queues()
4042 WREG32(mmCPU_IF_EQ_RD_OFFS, 0); in gaudi2_init_cpu_queues()
4044 WREG32(mmCPU_IF_PF_PQ_PI, 0); in gaudi2_init_cpu_queues()
4074 return 0; in gaudi2_init_cpu_queues()
4083 for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) { in gaudi2_init_qman_pq()
4092 WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0); in gaudi2_init_qman_pq()
4093 WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0); in gaudi2_init_qman_pq()
4106 for (cp_id = 0 ; cp_id < NUM_OF_CP_PER_QMAN; cp_id++) { in gaudi2_init_qman_cp()
4116 WREG32(reg_base + QM_CP_CFG_OFFSET, FIELD_PREP(PDMA0_QM_CP_CFG_SWITCH_EN_MASK, 0x1)); in gaudi2_init_qman_cp()
4128 for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) { in gaudi2_init_qman_pqc()
4139 WREG32(reg_base + QM_PQC_PI_0_OFFSET + pq_offset, 0); in gaudi2_init_qman_pqc()
4192 return 0; in gaudi2_get_dyn_sp_reg()
4217 WREG32(reg_base + QM_GLBL_CFG1_OFFSET, 0); in gaudi2_init_qman_common()
4218 WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0); in gaudi2_init_qman_common()
4239 for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) in gaudi2_init_qman()
4339 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_init_edma()
4340 for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { in gaudi2_init_edma()
4378 WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0); in gaudi2_arm_monitors_for_virt_msix_db()
4395 payload = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 0x7FFF) | /* "-1" */ in gaudi2_arm_monitors_for_virt_msix_db()
4408 mask = ~BIT(sob_id & 0x7); in gaudi2_arm_monitors_for_virt_msix_db()
4409 mode = 0; /* comparison mode is "greater than or equal to" */ in gaudi2_arm_monitors_for_virt_msix_db()
4440 for (decoder_id = 0 ; decoder_id < NUMBER_OF_DEC ; ++decoder_id) { in gaudi2_prepare_sm_for_virt_msix_db()
4467 for (i = 0 ; i < GAUDI2_MAX_PENDING_CS ; i++) in gaudi2_init_sm()
4493 for (i = 0 ; i < GAUDI2_RESERVED_CQ_NUMBER ; i++) { in gaudi2_init_sm()
4506 WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_SEC, 0x10000); in gaudi2_init_sm()
4507 WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV, 0); in gaudi2_init_sm()
4519 reg_val = FIELD_PREP(MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK, 0); in gaudi2_init_mme_acc()
4527 WREG32(reg_base + MME_ACC_AP_LFSR_POLY_OFFSET, 0x80DEADAF); in gaudi2_init_mme_acc()
4529 for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) { in gaudi2_init_mme_acc()
4538 u32 queue_id_base, reg_base, clk_en_addr = 0; in gaudi2_init_dcore_mme()
4541 case 0: in gaudi2_init_dcore_mme()
4561 WREG32(clk_en_addr, 0x1); in gaudi2_init_dcore_mme()
4580 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_init_mme()
4590 WREG32(reg_base + TPC_CFG_TPC_INTR_MASK_OFFSET, 0x23FFFE); in gaudi2_init_tpc_cfg()
4611 if (dcore == 0 && inst == (NUM_DCORE0_TPC - 1)) in gaudi2_init_tpc_config()
4635 init_cfg_data.dcore_tpc_qid_base[0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0; in gaudi2_init_tpc()
4651 for (i = 0 ; i < NUM_OF_ROT ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { in gaudi2_init_rotator()
4702 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) in gaudi2_init_dec()
4703 for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { in gaudi2_init_dec()
4719 for (dec_id = 0 ; dec_id < NUM_OF_PCIE_VDEC ; dec_id++) { in gaudi2_init_dec()
4751 WREG32(msix_gw_table_base + last_reg_offset, GENMASK(last_bit, 0)); in gaudi2_init_msix_gw_table()
4754 WREG32(msix_gw_table_base + i, 0xFFFFFFFF); in gaudi2_init_msix_gw_table()
4771 WREG32(stlb_base + STLB_BUSY_OFFSET, 0x80000000); in gaudi2_mmu_update_asid_hop0_addr()
4777 !(status & 0x80000000), in gaudi2_mmu_update_asid_hop0_addr()
4786 return 0; in gaudi2_mmu_update_asid_hop0_addr()
4795 WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INVALIDATION, 0x1); in gaudi2_mmu_send_invalidate_cache_cmd()
4818 status & 0x1, in gaudi2_mmu_invalidate_cache_status_poll()
4825 /* Need to manually reset the status to 0 */ in gaudi2_mmu_invalidate_cache_status_poll()
4826 WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS, 0x0); in gaudi2_mmu_invalidate_cache_status_poll()
4833 return 0; in gaudi2_mmu_invalidate_cache_status_poll()
4842 !(status & 0x1), in gaudi2_mmu_invalidate_cache_status_poll()
4930 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { in gaudi2_hmmus_invalidate_cache()
4931 for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) { in gaudi2_hmmus_invalidate_cache()
4940 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { in gaudi2_hmmus_invalidate_cache()
4941 for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) { in gaudi2_hmmus_invalidate_cache()
4954 return 0; in gaudi2_hmmus_invalidate_cache()
4961 int rc = 0; in gaudi2_mmu_invalidate_cache()
4975 invld_params.flags = 0; in gaudi2_mmu_invalidate_cache()
4985 struct gaudi2_cache_invld_params invld_params = {0}; in gaudi2_mmu_invalidate_cache_range()
4989 int rc = 0; in gaudi2_mmu_invalidate_cache_range()
4992 return 0; in gaudi2_mmu_invalidate_cache_range()
5045 for (asid = 0 ; asid < max_asid ; asid++) { in gaudi2_mmu_update_hop0_addr()
5054 return 0; in gaudi2_mmu_update_hop0_addr()
5084 WREG32(mmu_base + MMU_BYPASS_OFFSET, 0); in gaudi2_mmu_init_common()
5109 return 0; in gaudi2_pci_mmu_init()
5115 (0 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT) | in gaudi2_pci_mmu_init()
5126 WREG32(stlb_base + STLB_LL_LOOKUP_MASK_63_32_OFFSET, 0); in gaudi2_pci_mmu_init()
5149 return 0; in gaudi2_pci_mmu_init()
5169 return 0; in gaudi2_dcore_hmmu_init()
5179 FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK, 0) | in gaudi2_dcore_hmmu_init()
5201 return 0; in gaudi2_dcore_hmmu_init()
5208 for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) in gaudi2_hbm_mmu_init()
5209 for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) { in gaudi2_hbm_mmu_init()
5215 return 0; in gaudi2_hbm_mmu_init()
5230 return 0; in gaudi2_mmu_init()
5311 return 0; in gaudi2_hw_init()
5469 int i, rc = 0; in gaudi2_poll_btm_indication()
5478 for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++) in gaudi2_poll_btm_indication()
5483 reg_val == 0, in gaudi2_poll_btm_indication()
5488 dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", reg_val); in gaudi2_poll_btm_indication()
5493 int i, rc = 0; in gaudi2_get_soft_rst_done_indication()
5496 for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++) in gaudi2_get_soft_rst_done_indication()
5506 dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n", in gaudi2_get_soft_rst_done_indication()
5572 memset(gaudi2->events_stat, 0, sizeof(gaudi2->events_stat)); in gaudi2_hw_fini()
5585 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0); in gaudi2_suspend()
5627 u64 hw_cap_mask = 0; in gaudi2_is_queue_enabled()
5628 u64 hw_tpc_cap_bit = 0; in gaudi2_is_queue_enabled()
5629 u64 hw_nic_cap_bit = 0; in gaudi2_is_queue_enabled()
5630 u64 hw_test_cap_bit = 0; in gaudi2_is_queue_enabled()
5677 return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(0)); in gaudi2_is_queue_enabled()
5708 return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(0)); in gaudi2_is_queue_enabled()
5804 * Masking the H/W queue ID with 0x3 extracts the QMAN internal PQ in gaudi2_ring_doorbell()
5807 pq_offset = (hw_queue_id & 0x3) * 4; in gaudi2_ring_doorbell()
5832 pqe[0] = pbd[0]; in gaudi2_pqe_write()
5855 *result = 0; in gaudi2_send_cpu_message()
5856 return 0; in gaudi2_send_cpu_message()
5897 return 0; in gaudi2_dma_map_single()
5924 return 0; in gaudi2_validate_cb_address()
5930 return 0; in gaudi2_validate_cb_address()
5937 return 0; in gaudi2_validate_cb_address()
5949 return 0; in gaudi2_validate_cb_address()
5953 return 0; in gaudi2_validate_cb_address()
5956 return 0; in gaudi2_validate_cb_address()
5959 dev_err(hdev->dev, "CB address %p + 0x%x for internal QMAN is not valid\n", in gaudi2_validate_cb_address()
5977 return 0; in gaudi2_cs_parser()
5985 return 0; in gaudi2_send_heartbeat()
6018 WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0); in gaudi2_arm_cq_monitor()
6020 /* Configure this address with CQ_ID 0 because CQ_EN is set */ in gaudi2_arm_cq_monitor()
6027 mask = ~(1 << (sob_id & 0x7)); in gaudi2_arm_cq_monitor()
6042 u32 comp_val, commit_mask, *polling_addr, timeout, status = 0; in gaudi2_send_job_to_kdma()
6096 *polling_addr = 0; in gaudi2_send_job_to_kdma()
6106 return 0; in gaudi2_send_job_to_kdma()
6113 for (i = 0 ; i < size ; i += sizeof(u32)) in gaudi2_memset_device_lbw()
6123 WREG32(reg_base + QM_PQC_CFG_OFFSET, 0); in gaudi2_qman_set_test_mode()
6132 u32 sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4; in gaudi2_test_queue()
6134 u32 timeout_usec, tmp, sob_base = 1, sob_val = 0x5a5a; in gaudi2_test_queue()
6163 WREG32(sob_addr, 0); in gaudi2_test_queue()
6181 dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n", in gaudi2_test_queue()
6187 WREG32(sob_addr, 0); in gaudi2_test_queue()
6203 return 0; in gaudi2_test_cpu_queue()
6210 int i, rc, ret_val = 0; in gaudi2_test_queues()
6249 irq_arr_size = gaudi2->num_of_valid_hw_events * sizeof(gaudi2->hw_events[0]); in gaudi2_compute_reset_late_init()
6261 if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1))) in gaudi2_is_tpc_engine_idle()
6322 for (i = 0; i < NUM_OF_DCORES; i++) { in gaudi2_is_device_idle()
6323 for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) { in gaudi2_is_device_idle()
6361 for (i = 0 ; i < NUM_OF_PDMA ; i++) { in gaudi2_is_device_idle()
6388 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { in gaudi2_is_device_idle()
6420 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_is_device_idle()
6461 for (i = 0 ; i < NUM_OF_DCORES ; i++) { in gaudi2_is_device_idle()
6462 for (j = 0 ; j < NUM_OF_DEC_PER_DCORE ; j++) { in gaudi2_is_device_idle()
6490 for (i = 0 ; i < NUM_OF_DEC_PER_DCORE ; i++) { in gaudi2_is_device_idle()
6514 for (i = 0 ; i < NUM_OF_ROT ; i++) { in gaudi2_is_device_idle()
6530 hl_engine_data_sprintf(e, rot_fmt, i, 0, is_eng_idle ? "Y" : "N", in gaudi2_is_device_idle()
6563 return 0; in gaudi2_get_eeprom_data()
6620 WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6622 WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6627 WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6630 WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6639 if (dcore_id > 0) { in gaudi2_mmu_dcore_prepare()
6643 WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6646 WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6649 for (i = 0 ; i < NUM_OF_MME_SBTE_PORTS ; i++) { in gaudi2_mmu_dcore_prepare()
6652 dcore_offset + ports_offset, 0); in gaudi2_mmu_dcore_prepare()
6657 for (i = 0 ; i < NUM_OF_MME_WB_PORTS ; i++) { in gaudi2_mmu_dcore_prepare()
6660 dcore_offset + ports_offset, 0); in gaudi2_mmu_dcore_prepare()
6665 WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); in gaudi2_mmu_dcore_prepare()
6671 for (vdec_id = 0 ; vdec_id < NUM_OF_DEC_PER_DCORE ; vdec_id++) { in gaudi2_mmu_dcore_prepare()
6673 gaudi2_mmu_vdec_dcore_prepare(hdev, dcore_id, vdec_id, rw_asid, 0); in gaudi2_mmu_dcore_prepare()
6709 u32 reg_base, reg_offset, reg_val = 0; in gaudi2_arc_mmu_prepare()
6714 reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK, 0); in gaudi2_arc_mmu_prepare()
6768 return 0; in gaudi2_arc_mmu_prepare_all()
6781 WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
6783 WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
6786 WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
6788 WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_MMU_BP, 0); in gaudi2_mmu_shared_prepare()
6791 for (i = 0 ; i < NUM_OF_ROT ; i++) { in gaudi2_mmu_shared_prepare()
6794 WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0); in gaudi2_mmu_shared_prepare()
6801 if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 0)) in gaudi2_mmu_shared_prepare()
6802 gudi2_mmu_vdec_shared_prepare(hdev, 0, rw_asid, 0); in gaudi2_mmu_shared_prepare()
6805 gudi2_mmu_vdec_shared_prepare(hdev, 1, rw_asid, 0); in gaudi2_mmu_shared_prepare()
6808 for (i = 0 ; i < NUM_OF_ARC_FARMS_ARC ; i++) in gaudi2_mmu_shared_prepare()
6809 gudi2_mmu_arc_farm_arc_dup_eng_prepare(hdev, i, rw_asid, 0); in gaudi2_mmu_shared_prepare()
6815 return 0; in gaudi2_mmu_shared_prepare()
6823 WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0); in gaudi2_tpc_mmu_prepare()
6825 WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0); in gaudi2_tpc_mmu_prepare()
6846 return 0; in gaudi2_mmu_prepare()
6856 for (i = 0 ; i < NUM_OF_DCORES ; i++) in gaudi2_mmu_prepare()
6859 return 0; in gaudi2_mmu_prepare()
6903 u64 ecc_address = 0, ecc_syndrom = 0; in gaudi2_handle_ecc_event()
6904 u8 memory_wrapper_idx = 0; in gaudi2_handle_ecc_event()
6932 * range [0, queue_len - 1] in gaudi2_queue_idx_dec()
6998 for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) { in gaudi2_print_last_pqes_on_err()
7007 /* len 0 means uninitialized entry- break */ in gaudi2_print_last_pqes_on_err()
7046 for (i = 0 ; i < QMAN_STREAMS ; i++) in print_qman_data_on_err()
7061 for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) { in gaudi2_handle_qman_err_generic()
7075 for (j = 0 ; j < num_error_causes ; j++) in gaudi2_handle_qman_err_generic()
7091 for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) { in gaudi2_handle_qman_err_generic()
7127 "%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n", in gaudi2_razwi_rr_hbw_shared_printf_info()
7147 …"%s-RAZWI SHARED RR LBW WR error, mstr_if 0x%llx, captured address 0x%x, Initiator coordinates 0x%… in gaudi2_razwi_rr_lbw_shared_printf_info()
7159 …"%s-RAZWI SHARED RR LBW AR error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x… in gaudi2_razwi_rr_lbw_shared_printf_info()
7175 u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0; in gaudi2_ack_module_razwi_event_handler()
7176 u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0; in gaudi2_ack_module_razwi_event_handler()
7259 rtr_mstr_if_base_addr = 0; in gaudi2_ack_module_razwi_event_handler()
7350 for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) { in gaudi2_check_if_razwi_happened()
7352 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
7356 for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) in gaudi2_check_if_razwi_happened()
7362 for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) in gaudi2_check_if_razwi_happened()
7364 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
7367 for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++) in gaudi2_check_if_razwi_happened()
7368 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
7371 for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++) in gaudi2_check_if_razwi_happened()
7373 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0, in gaudi2_check_if_razwi_happened()
7377 for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++) in gaudi2_check_if_razwi_happened()
7379 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
7382 for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++) in gaudi2_check_if_razwi_happened()
7383 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL); in gaudi2_check_if_razwi_happened()
7468 WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AW_SET, 0x1); in gaudi2_razwi_unmapped_addr_hbw_printf_info()
7474 WREG32(rtr_ctrl_base_addr + DEC_RAZWI_HBW_AR_SET, 0x1); in gaudi2_razwi_unmapped_addr_hbw_printf_info()
7494 WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1); in gaudi2_razwi_unmapped_addr_lbw_printf_info()
7499 WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_SET, 0x1); in gaudi2_razwi_unmapped_addr_lbw_printf_info()
7514 razwi_mask_info, razwi_intr = 0; in gaudi2_ack_psoc_razwi_event_handler()
7528 "PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n", in gaudi2_ack_psoc_razwi_event_handler()
7535 if (xy == 0) { in gaudi2_ack_psoc_razwi_event_handler()
7537 "PSOC RAZWI interrupt: received event from 0 rtr coordinates\n"); in gaudi2_ack_psoc_razwi_event_handler()
7542 for (rtr_id = 0 ; rtr_id < rtr_map_arr_len ; rtr_id++) in gaudi2_ack_psoc_razwi_event_handler()
7548 "PSOC RAZWI interrupt: invalid rtr coordinates (0x%x)\n", xy); in gaudi2_ack_psoc_razwi_event_handler()
7587 u32 i, sts_val, sts_clr_val = 0; in _gaudi2_handle_qm_sei_err()
7591 for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) { in _gaudi2_handle_qm_sei_err()
7650 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, 0, 0, razwi_info); in gaudi2_handle_qm_sei_err()
7783 u32 i, sts_val, sts_clr_val = 0; in gaudi2_handle_arc_farm_sei_err()
7787 for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) { in gaudi2_handle_arc_farm_sei_err()
7800 u32 i, sts_val, sts_clr_val = 0; in gaudi2_handle_cpu_sei_err()
7804 for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) { in gaudi2_handle_cpu_sei_err()
7821 for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++) in gaudi2_handle_rot_err()
7827 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, in gaudi2_handle_rot_err()
7837 for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++) in gaudi2_tpc_ack_interrupts()
7843 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, in gaudi2_tpc_ack_interrupts()
7850 u32 sts_addr, sts_val, sts_clr_val = 0; in gaudi2_handle_dec_err()
7865 for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) { in gaudi2_handle_dec_err()
7874 gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info); in gaudi2_handle_dec_err()
7883 u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0; in gaudi2_handle_mme_err()
7891 for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) { in gaudi2_handle_mme_err()
7911 for (i = 0 ; i < GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE ; i++) in gaudi2_handle_mme_sbte_err()
7920 u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0; in gaudi2_handle_mme_wap_err()
7928 for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) { in gaudi2_handle_mme_wap_err()
7953 for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) in gaudi2_handle_kdma_core_event()
7963 for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) in gaudi2_handle_dma_core_event()
7977 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
7984 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
7991 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
7998 WREG32(razwi_happened_addr, 0x1); in gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info()
8006 for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) { in gaudi2_print_pcie_addr_dec_info()
8028 for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) { in gaudi2_handle_pif_fatal()
8043 for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) { in gaudi2_handle_hif_fatal()
8065 dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n", in gaudi2_handle_page_error()
8068 WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0); in gaudi2_handle_page_error()
8086 dev_err_ratelimited(hdev->dev, "%s access error on va 0x%llx\n", in gaudi2_handle_access_error()
8088 WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE), 0); in gaudi2_handle_access_error()
8094 u32 spi_sei_cause, interrupt_clr = 0x0; in gaudi2_handle_mmu_spi_sei_generic()
8099 for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) { in gaudi2_handle_mmu_spi_sei_generic()
8104 if (i == 0) in gaudi2_handle_mmu_spi_sei_generic()
8109 if (gaudi2_mmu_spi_sei[i].clear_bit >= 0) in gaudi2_handle_mmu_spi_sei_generic()
8141 for (i = 0 ; i < GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE ; i++) { in gaudi2_handle_sm_err()
8145 dev_err_ratelimited(hdev->dev, "SM%u SEI ERR. err cause: %s. %s: 0x%X\n", in gaudi2_handle_sm_err()
8158 WREG32(sei_cause_addr, 0); in gaudi2_handle_sm_err()
8171 WREG32(cq_intr_addr, 0); in gaudi2_handle_sm_err()
8262 for (beat = 0 ; beat < 4 ; beat++) { in gaudi2_hbm_sei_handle_read_err()
8310 dev_err_ratelimited(hdev->dev, "CK-0 DERR: 0x%02x, CK-1 DERR: 0x%02x\n", in gaudi2_hbm_sei_print_wr_par_info()
8311 derr & 0x3, derr & 0xc); in gaudi2_hbm_sei_print_wr_par_info()
8315 for (i = 0 ; i < HBM_WR_PAR_CMD_LIFO_LEN ; i++) { in gaudi2_hbm_sei_print_wr_par_info()
8337 for (i = 0 ; i < HBM_CA_ERR_CMD_LIFO_LEN ; i++) in gaudi2_hbm_sei_print_ca_par_info()
8338 dev_err_ratelimited(hdev->dev, "cmd%u: ROW(0x%04x) COL(0x%05x)\n", i, in gaudi2_hbm_sei_print_ca_par_info()
8339 le16_to_cpu(row_cmd[i]) & (u16)GENMASK(13, 0), in gaudi2_hbm_sei_print_ca_par_info()
8340 le32_to_cpu(col_cmd[i]) & (u32)GENMASK(17, 0)); in gaudi2_hbm_sei_print_ca_par_info()
8425 for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++) in gaudi2_handle_hbm_mc_spi()
8433 ktime_t zero_time = ktime_set(0, 0); in gaudi2_print_clk_change_info()
8492 "pcie p2p transaction terminated due to security, req_id(0x%x)\n", in gaudi2_handle_pcie_p2p_msix()
8495 WREG32(mmPCIE_WRAP_P2P_INTR, 0x1); in gaudi2_handle_pcie_p2p_msix()
8500 "pcie msi-x gen denied due to vector num check failure, vec(0x%X)\n", in gaudi2_handle_pcie_p2p_msix()
8503 WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1); in gaudi2_handle_pcie_p2p_msix()
8518 if (cause & BIT_ULL(0)) in gaudi2_handle_pcie_drain()
8533 for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) { in gaudi2_handle_psoc_drain()
8580 u64 event_mask = 0; in gaudi2_handle_eqe()
8939 dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n", in gaudi2_handle_eqe()
8940 le64_to_cpu(eq_entry->data[0])); in gaudi2_handle_eqe()
8944 dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n", in gaudi2_handle_eqe()
8945 le64_to_cpu(eq_entry->data[0])); in gaudi2_handle_eqe()
8996 int rc = 0; in gaudi2_memset_device_memory()
8998 sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4; in gaudi2_memset_device_memory()
9010 if (prop->edma_enabled_mask == 0) { in gaudi2_memset_device_memory()
9020 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_memset_device_memory()
9021 for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { in gaudi2_memset_device_memory()
9034 int dma_num = 0; in gaudi2_memset_device_memory()
9036 WREG32(sob_addr, 0); in gaudi2_memset_device_memory()
9037 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_memset_device_memory()
9038 for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { in gaudi2_memset_device_memory()
9085 for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { in gaudi2_memset_device_memory()
9086 for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { in gaudi2_memset_device_memory()
9097 WREG32(sob_addr, 0); in gaudi2_memset_device_memory()
9110 dev_err(hdev->dev, "Failed to scrub dram, address: 0x%llx size: %llu\n", in gaudi2_scrub_device_dram()
9123 return 0; in gaudi2_scrub_device_mem()
9127 size = hdev->pldm ? 0x10000 : (prop->sram_size - SRAM_USER_BASE_OFFSET); in gaudi2_scrub_device_mem()
9128 dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx, val: 0x%llx\n", in gaudi2_scrub_device_mem()
9142 return 0; in gaudi2_scrub_device_mem()
9152 offset = hdev->asic_prop.first_available_cq[0] * 4; in gaudi2_restore_user_sm_registers()
9163 gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
9164 gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
9165 gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0); in gaudi2_restore_user_sm_registers()
9166 gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
9167 gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
9178 gaudi2_memset_device_lbw(hdev, cq_lbw_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
9179 gaudi2_memset_device_lbw(hdev, cq_lbw_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
9180 gaudi2_memset_device_lbw(hdev, cq_lbw_data_addr, size, 0); in gaudi2_restore_user_sm_registers()
9181 gaudi2_memset_device_lbw(hdev, cq_base_l_addr, size, 0); in gaudi2_restore_user_sm_registers()
9182 gaudi2_memset_device_lbw(hdev, cq_base_h_addr, size, 0); in gaudi2_restore_user_sm_registers()
9183 gaudi2_memset_device_lbw(hdev, cq_size_addr, size, 0); in gaudi2_restore_user_sm_registers()
9193 offset = hdev->asic_prop.first_available_user_mon[0] * 4; in gaudi2_restore_user_sm_registers()
9202 gaudi2_memset_device_lbw(hdev, addr, size, 0); in gaudi2_restore_user_sm_registers()
9210 gaudi2_memset_device_lbw(hdev, mon_cfg_addr, size, 0); in gaudi2_restore_user_sm_registers()
9215 offset = hdev->asic_prop.first_available_user_sob[0] * 4; in gaudi2_restore_user_sm_registers()
9217 val = 0; in gaudi2_restore_user_sm_registers()
9248 WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0); in gaudi2_restore_user_qm_registers()
9267 WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0); in gaudi2_restore_nic_qm_registers()
9276 return 0; in gaudi2_context_switch()
9290 for (i = 0 ; i < cfg_ctx->instances ; i++) { in gaudi2_init_block_instances()
9309 for (i = 0 ; i < cfg_ctx->blocks ; i++) in gaudi2_init_blocks_with_mask()
9325 int rc = 0; in gaudi2_debugfs_read_dma()
9367 pos = 0; in gaudi2_debugfs_read_dma()
9371 while (size_left > 0) { in gaudi2_debugfs_read_dma()
9412 return 0; in gaudi2_internal_cb_pool_init()
9457 return 0; in gaudi2_internal_cb_pool_init()
9568 return 0; in gaudi2_pre_schedule_cs()
9585 return 0; in gaudi2_pre_schedule_cs()
9600 memset(pkt, 0, pkt_size); in gaudi2_gen_signal_cb()
9622 memset(pkt, 0, pkt_size); in gaudi2_add_mon_msg_short()
9625 ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */ in gaudi2_add_mon_msg_short()
9627 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); in gaudi2_add_mon_msg_short()
9628 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 0); in gaudi2_add_mon_msg_short()
9644 return 0; in gaudi2_add_arm_monitor_pkt()
9647 memset(pkt, 0, pkt_size); in gaudi2_add_arm_monitor_pkt()
9651 value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/ in gaudi2_add_arm_monitor_pkt()
9655 ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */ in gaudi2_add_arm_monitor_pkt()
9657 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); in gaudi2_add_arm_monitor_pkt()
9670 memset(pkt, 0, pkt_size); in gaudi2_add_fence_pkt()
9677 ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); in gaudi2_add_fence_pkt()
9691 u64 monitor_base, fence_addr = 0; in gaudi2_gen_wait_cb()
9744 WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, 0); in gaudi2_reset_sob()
9762 return 0; in gaudi2_collective_wait_init_cs()
9778 * (addr[47:0] / 48M) * 64M + addr % 48M + addr[63:48]
9786 * PA1 0x3000000 VA1 0x9C000000 SVA1= (VA1/48M)*64M 0xD0000000 <- PA1/48M 0x1
9787 * PA2 0x9000000 VA2 0x9F000000 SVA2= (VA2/48M)*64M 0xD4000000 <- PA2/48M 0x3
9833 u32 base = 0, dcore_id, dec_id; in gaudi2_get_dec_base_addr()
9860 for (i = 0 ; i < NUM_USER_MAPPED_BLOCKS ; i++) { in gaudi2_get_hw_block_id()
9865 return 0; in gaudi2_get_hw_block_id()
9978 return 0; in gaudi2_get_mmu_base()
10002 for (i = 0 ; i < num_of_hmmus ; i++) { in gaudi2_ack_mmu_page_fault_or_access_error()
10013 return 0; in gaudi2_ack_mmu_page_fault_or_access_error()
10043 return 0; in gaudi2_gen_sync_to_engine_map()
10049 return 0; in gaudi2_monitor_valid()
10056 return 0; in gaudi2_print_single_monitor()
10065 return 0; in gaudi2_print_fences_single_engine()
10085 return 0; in gaudi2_get_sob_addr()
10111 return 0; in gaudi2_mmu_get_real_page_size()
10127 return 0; in gaudi2_mmu_get_real_page_size()
10145 return 0; in gaudi2_send_device_activity()