Lines Matching +full:parallel +full:- +full:memories
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
27 * - Range registers
28 * - MMU
31 * - Range registers (protect the first 512MB)
34 * - Range registers
35 * - Protection bits
40 * - DMA is not secured.
41 * - PQ and CQ are secured.
42 * - CP is secured: The driver needs to parse CB but WREG should be allowed
52 * - Clear SRAM on context switch (happens on context switch when device is
54 * - MMU page tables area clear (happens on init)
56 * QMAN DMA 2-7, TPC, MME, NIC:
62 #define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
63 #define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
411 [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
414 [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
417 [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
426 mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
428 mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
429 [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
523 struct asic_fixed_properties *prop = &hdev->asic_prop; in set_default_power_values()
525 if (hdev->card_type == cpucp_card_type_pmc) { in set_default_power_values()
526 prop->max_power_default = MAX_POWER_DEFAULT_PMC; in set_default_power_values()
528 if (prop->fw_security_enabled) in set_default_power_values()
529 prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC; in set_default_power_values()
531 prop->dc_power_default = DC_POWER_DEFAULT_PMC; in set_default_power_values()
533 prop->max_power_default = MAX_POWER_DEFAULT_PCI; in set_default_power_values()
534 prop->dc_power_default = DC_POWER_DEFAULT_PCI; in set_default_power_values()
540 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_set_fixed_properties()
544 prop->max_queues = GAUDI_QUEUE_ID_SIZE; in gaudi_set_fixed_properties()
545 prop->hw_queues_props = kcalloc(prop->max_queues, in gaudi_set_fixed_properties()
549 if (!prop->hw_queues_props) in gaudi_set_fixed_properties()
550 return -ENOMEM; in gaudi_set_fixed_properties()
552 for (i = 0 ; i < prop->max_queues ; i++) { in gaudi_set_fixed_properties()
554 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; in gaudi_set_fixed_properties()
555 prop->hw_queues_props[i].driver_only = 0; in gaudi_set_fixed_properties()
556 prop->hw_queues_props[i].supports_sync_stream = 1; in gaudi_set_fixed_properties()
557 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
561 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; in gaudi_set_fixed_properties()
562 prop->hw_queues_props[i].driver_only = 1; in gaudi_set_fixed_properties()
563 prop->hw_queues_props[i].supports_sync_stream = 0; in gaudi_set_fixed_properties()
564 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
567 prop->hw_queues_props[i].type = QUEUE_TYPE_INT; in gaudi_set_fixed_properties()
568 prop->hw_queues_props[i].driver_only = 0; in gaudi_set_fixed_properties()
569 prop->hw_queues_props[i].supports_sync_stream = 0; in gaudi_set_fixed_properties()
570 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
574 prop->hw_queues_props[i].collective_mode = in gaudi_set_fixed_properties()
578 prop->cache_line_size = DEVICE_CACHE_LINE_SIZE; in gaudi_set_fixed_properties()
579 prop->cfg_base_address = CFG_BASE; in gaudi_set_fixed_properties()
580 prop->device_dma_offset_for_host_access = HOST_PHYS_BASE; in gaudi_set_fixed_properties()
581 prop->host_base_address = HOST_PHYS_BASE; in gaudi_set_fixed_properties()
582 prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE; in gaudi_set_fixed_properties()
583 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; in gaudi_set_fixed_properties()
584 prop->completion_mode = HL_COMPLETION_MODE_JOB; in gaudi_set_fixed_properties()
585 prop->collective_first_sob = 0; in gaudi_set_fixed_properties()
586 prop->collective_first_mon = 0; in gaudi_set_fixed_properties()
589 prop->sync_stream_first_sob = in gaudi_set_fixed_properties()
596 prop->sync_stream_first_mon = in gaudi_set_fixed_properties()
600 prop->dram_base_address = DRAM_PHYS_BASE; in gaudi_set_fixed_properties()
601 prop->dram_size = GAUDI_HBM_SIZE_32GB; in gaudi_set_fixed_properties()
602 prop->dram_end_address = prop->dram_base_address + prop->dram_size; in gaudi_set_fixed_properties()
603 prop->dram_user_base_address = DRAM_BASE_ADDR_USER; in gaudi_set_fixed_properties()
605 prop->sram_base_address = SRAM_BASE_ADDR; in gaudi_set_fixed_properties()
606 prop->sram_size = SRAM_SIZE; in gaudi_set_fixed_properties()
607 prop->sram_end_address = prop->sram_base_address + prop->sram_size; in gaudi_set_fixed_properties()
608 prop->sram_user_base_address = in gaudi_set_fixed_properties()
609 prop->sram_base_address + SRAM_USER_BASE_OFFSET; in gaudi_set_fixed_properties()
611 prop->mmu_cache_mng_addr = MMU_CACHE_MNG_ADDR; in gaudi_set_fixed_properties()
612 prop->mmu_cache_mng_size = MMU_CACHE_MNG_SIZE; in gaudi_set_fixed_properties()
614 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; in gaudi_set_fixed_properties()
615 if (hdev->pldm) in gaudi_set_fixed_properties()
616 prop->mmu_pgt_size = 0x800000; /* 8MB */ in gaudi_set_fixed_properties()
618 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; in gaudi_set_fixed_properties()
619 prop->mmu_pte_size = HL_PTE_SIZE; in gaudi_set_fixed_properties()
620 prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; in gaudi_set_fixed_properties()
621 prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; in gaudi_set_fixed_properties()
622 prop->dram_page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
623 prop->device_mem_alloc_default_page_size = prop->dram_page_size; in gaudi_set_fixed_properties()
624 prop->dram_supports_virtual_memory = false; in gaudi_set_fixed_properties()
626 prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT; in gaudi_set_fixed_properties()
627 prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT; in gaudi_set_fixed_properties()
628 prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT; in gaudi_set_fixed_properties()
629 prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT; in gaudi_set_fixed_properties()
630 prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT; in gaudi_set_fixed_properties()
631 prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK; in gaudi_set_fixed_properties()
632 prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK; in gaudi_set_fixed_properties()
633 prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK; in gaudi_set_fixed_properties()
634 prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK; in gaudi_set_fixed_properties()
635 prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK; in gaudi_set_fixed_properties()
636 prop->pmmu.start_addr = VA_HOST_SPACE_START; in gaudi_set_fixed_properties()
637 prop->pmmu.end_addr = in gaudi_set_fixed_properties()
638 (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1; in gaudi_set_fixed_properties()
639 prop->pmmu.page_size = PAGE_SIZE_4KB; in gaudi_set_fixed_properties()
640 prop->pmmu.num_hops = MMU_ARCH_5_HOPS; in gaudi_set_fixed_properties()
641 prop->pmmu.last_mask = LAST_MASK; in gaudi_set_fixed_properties()
642 /* TODO: will be duplicated until implementing per-MMU props */ in gaudi_set_fixed_properties()
643 prop->pmmu.hop_table_size = prop->mmu_hop_table_size; in gaudi_set_fixed_properties()
644 prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size; in gaudi_set_fixed_properties()
647 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); in gaudi_set_fixed_properties()
648 prop->pmmu_huge.page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
651 memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu)); in gaudi_set_fixed_properties()
652 prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2); in gaudi_set_fixed_properties()
653 prop->dmmu.end_addr = VA_HOST_SPACE_END; in gaudi_set_fixed_properties()
654 prop->dmmu.page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
656 prop->cfg_size = CFG_SIZE; in gaudi_set_fixed_properties()
657 prop->max_asid = MAX_ASID; in gaudi_set_fixed_properties()
658 prop->num_of_events = GAUDI_EVENT_SIZE; in gaudi_set_fixed_properties()
659 prop->tpc_enabled_mask = TPC_ENABLED_MASK; in gaudi_set_fixed_properties()
663 prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT; in gaudi_set_fixed_properties()
664 prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE; in gaudi_set_fixed_properties()
666 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE; in gaudi_set_fixed_properties()
667 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI; in gaudi_set_fixed_properties()
669 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, in gaudi_set_fixed_properties()
672 prop->max_pending_cs = GAUDI_MAX_PENDING_CS; in gaudi_set_fixed_properties()
674 prop->first_available_user_sob[HL_GAUDI_WS_DCORE] = in gaudi_set_fixed_properties()
675 prop->sync_stream_first_sob + in gaudi_set_fixed_properties()
677 prop->first_available_user_mon[HL_GAUDI_WS_DCORE] = in gaudi_set_fixed_properties()
678 prop->sync_stream_first_mon + in gaudi_set_fixed_properties()
681 prop->first_available_user_interrupt = USHRT_MAX; in gaudi_set_fixed_properties()
684 prop->first_available_cq[i] = USHRT_MAX; in gaudi_set_fixed_properties()
686 prop->fw_cpu_boot_dev_sts0_valid = false; in gaudi_set_fixed_properties()
687 prop->fw_cpu_boot_dev_sts1_valid = false; in gaudi_set_fixed_properties()
688 prop->hard_reset_done_by_fw = false; in gaudi_set_fixed_properties()
689 prop->gic_interrupts_enable = true; in gaudi_set_fixed_properties()
691 prop->server_type = HL_SERVER_TYPE_UNKNOWN; in gaudi_set_fixed_properties()
693 prop->clk_pll_index = HL_GAUDI_MME_PLL; in gaudi_set_fixed_properties()
694 prop->max_freq_value = GAUDI_MAX_CLK_FREQ; in gaudi_set_fixed_properties()
696 prop->use_get_power_for_reset_history = true; in gaudi_set_fixed_properties()
698 prop->configurable_stop_on_err = true; in gaudi_set_fixed_properties()
700 prop->set_max_power_on_device_init = true; in gaudi_set_fixed_properties()
702 prop->dma_mask = 48; in gaudi_set_fixed_properties()
717 hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] + in gaudi_pci_bars_map()
718 (CFG_BASE - SPI_FLASH_BASE_ADDR); in gaudi_pci_bars_map()
725 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_set_hbm_bar_base()
730 if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr)) in gaudi_set_hbm_bar_base()
733 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_set_hbm_bar_base()
736 /* Inbound Region 2 - Bar 4 - Point to HBM */ in gaudi_set_hbm_bar_base()
745 old_addr = gaudi->hbm_bar_cur_addr; in gaudi_set_hbm_bar_base()
746 gaudi->hbm_bar_cur_addr = addr; in gaudi_set_hbm_bar_base()
758 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_init_iatu()
761 /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */ in gaudi_init_iatu()
769 /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */ in gaudi_init_iatu()
777 /* Inbound Region 2 - Bar 4 - Point to HBM */ in gaudi_init_iatu()
785 /* Outbound Region 0 - Point to Host */ in gaudi_init_iatu()
801 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_early_init()
802 struct pci_dev *pdev = hdev->pdev; in gaudi_early_init()
809 dev_err(hdev->dev, "Failed setting fixed properties\n"); in gaudi_early_init()
817 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n", in gaudi_early_init()
819 rc = -ENODEV; in gaudi_early_init()
826 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n", in gaudi_early_init()
828 rc = -ENODEV; in gaudi_early_init()
832 prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); in gaudi_early_init()
833 hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID); in gaudi_early_init()
836 if (hdev->asic_prop.fw_security_enabled) { in gaudi_early_init()
837 hdev->asic_prop.iatu_done_by_fw = true; in gaudi_early_init()
840 * GIC-security-bit can ONLY be set by CPUCP, so in this stage in gaudi_early_init()
843 hdev->asic_prop.gic_interrupts_enable = false; in gaudi_early_init()
855 hdev->asic_prop.iatu_done_by_fw = true; in gaudi_early_init()
863 * version to determine whether we run with a security-enabled firmware in gaudi_early_init()
867 if (hdev->reset_on_preboot_fail) in gaudi_early_init()
868 hdev->asic_funcs->hw_fini(hdev, true, false); in gaudi_early_init()
873 dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n"); in gaudi_early_init()
874 hdev->asic_funcs->hw_fini(hdev, true, false); in gaudi_early_init()
882 kfree(hdev->asic_prop.hw_queues_props); in gaudi_early_init()
888 kfree(hdev->asic_prop.hw_queues_props); in gaudi_early_fini()
895 * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
903 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_fetch_psoc_frequency()
907 if ((hdev->fw_components & FW_TYPE_LINUX) && in gaudi_fetch_psoc_frequency()
908 (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PLL_INFO_EN)) { in gaudi_fetch_psoc_frequency()
909 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_fetch_psoc_frequency()
911 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_fetch_psoc_frequency()
943 dev_warn(hdev->dev, "Received invalid div select value: %#x", div_sel); in gaudi_fetch_psoc_frequency()
948 prop->psoc_timestamp_frequency = freq; in gaudi_fetch_psoc_frequency()
949 prop->psoc_pci_pll_nr = nr; in gaudi_fetch_psoc_frequency()
950 prop->psoc_pci_pll_nf = nf; in gaudi_fetch_psoc_frequency()
951 prop->psoc_pci_pll_od = od; in gaudi_fetch_psoc_frequency()
952 prop->psoc_pci_pll_div_factor = div_fctr; in gaudi_fetch_psoc_frequency()
960 struct asic_fixed_properties *prop = &hdev->asic_prop; in _gaudi_init_tpc_mem()
971 return -EFAULT; in _gaudi_init_tpc_mem()
973 init_tpc_mem_pkt = cb->kernel_address; in _gaudi_init_tpc_mem()
977 init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size); in _gaudi_init_tpc_mem()
984 init_tpc_mem_pkt->ctl = cpu_to_le32(ctl); in _gaudi_init_tpc_mem()
986 init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr); in _gaudi_init_tpc_mem()
990 round_up(prop->sram_user_base_address, SZ_8K)); in _gaudi_init_tpc_mem()
991 init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr); in _gaudi_init_tpc_mem()
995 dev_err(hdev->dev, "Failed to allocate a new job\n"); in _gaudi_init_tpc_mem()
996 rc = -ENOMEM; in _gaudi_init_tpc_mem()
1000 job->id = 0; in _gaudi_init_tpc_mem()
1001 job->user_cb = cb; in _gaudi_init_tpc_mem()
1002 atomic_inc(&job->user_cb->cs_cnt); in _gaudi_init_tpc_mem()
1003 job->user_cb_size = cb_size; in _gaudi_init_tpc_mem()
1004 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in _gaudi_init_tpc_mem()
1005 job->patched_cb = job->user_cb; in _gaudi_init_tpc_mem()
1006 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); in _gaudi_init_tpc_mem()
1022 hl_userptr_delete_list(hdev, &job->userptr_list); in _gaudi_init_tpc_mem()
1025 atomic_dec(&cb->cs_cnt); in _gaudi_init_tpc_mem()
1029 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in _gaudi_init_tpc_mem()
1035 * gaudi_init_tpc_mem() - Initialize TPC memories.
1038 * Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
1051 rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev); in gaudi_init_tpc_mem()
1052 if (rc == -EINTR && count-- > 0) { in gaudi_init_tpc_mem()
1058 dev_err(hdev->dev, "Failed to load firmware file %s\n", in gaudi_init_tpc_mem()
1063 fw_size = fw->size; in gaudi_init_tpc_mem()
1066 dev_err(hdev->dev, in gaudi_init_tpc_mem()
1069 rc = -ENOMEM; in gaudi_init_tpc_mem()
1073 memcpy(cpu_addr, fw->data, fw_size); in gaudi_init_tpc_mem()
1077 hl_asic_dma_free_coherent(hdev, fw->size, cpu_addr, dma_handle); in gaudi_init_tpc_mem()
1086 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_collective_map_sobs()
1087 struct gaudi_collective_properties *prop = &gaudi->collective_props; in gaudi_collective_map_sobs()
1093 stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream]; in gaudi_collective_map_sobs()
1094 sob_id = prop->hw_sob_group[sob_group_id].base_sob_id; in gaudi_collective_map_sobs()
1098 q = &hdev->kernel_queues[queue_id + (4 * i)]; in gaudi_collective_map_sobs()
1099 q->sync_stream_prop.collective_sob_id = sob_id + i; in gaudi_collective_map_sobs()
1106 q = &hdev->kernel_queues[queue_id]; in gaudi_collective_map_sobs()
1107 q->sync_stream_prop.collective_sob_id = in gaudi_collective_map_sobs()
1111 q = &hdev->kernel_queues[queue_id]; in gaudi_collective_map_sobs()
1112 q->sync_stream_prop.collective_sob_id = in gaudi_collective_map_sobs()
1120 struct hl_device *hdev = hw_sob_group->hdev; in gaudi_sob_group_hw_reset()
1125 (hw_sob_group->base_sob_id * 4) + (i * 4)), 0); in gaudi_sob_group_hw_reset()
1127 kref_init(&hw_sob_group->kref); in gaudi_sob_group_hw_reset()
1134 struct hl_device *hdev = hw_sob_group->hdev; in gaudi_sob_group_reset_error()
1136 dev_crit(hdev->dev, in gaudi_sob_group_reset_error()
1138 hw_sob_group->base_sob_id); in gaudi_sob_group_reset_error()
1146 prop = &gaudi->collective_props; in gaudi_collective_mstr_sob_mask_set()
1148 memset(prop->mstr_sob_mask, 0, sizeof(prop->mstr_sob_mask)); in gaudi_collective_mstr_sob_mask_set()
1151 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) in gaudi_collective_mstr_sob_mask_set()
1152 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |= in gaudi_collective_mstr_sob_mask_set()
1155 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |= in gaudi_collective_mstr_sob_mask_set()
1165 gaudi = hdev->asic_specific; in gaudi_collective_init()
1166 prop = &gaudi->collective_props; in gaudi_collective_init()
1167 sob_id = hdev->asic_prop.collective_first_sob; in gaudi_collective_init()
1175 prop->hw_sob_group[i].hdev = hdev; in gaudi_collective_init()
1176 prop->hw_sob_group[i].base_sob_id = sob_id; in gaudi_collective_init()
1178 gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref); in gaudi_collective_init()
1182 prop->next_sob_group_val[i] = 1; in gaudi_collective_init()
1183 prop->curr_sob_group_idx[i] = 0; in gaudi_collective_init()
1194 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_reset_sob_group()
1195 struct gaudi_collective_properties *cprop = &gaudi->collective_props; in gaudi_reset_sob_group()
1197 kref_put(&cprop->hw_sob_group[sob_group].kref, in gaudi_reset_sob_group()
1210 gaudi = hdev->asic_specific; in gaudi_collective_master_init_job()
1211 cprop = &gaudi->collective_props; in gaudi_collective_master_init_job()
1212 queue_id = job->hw_queue_id; in gaudi_collective_master_init_job()
1213 prop = &hdev->kernel_queues[queue_id].sync_stream_prop; in gaudi_collective_master_init_job()
1216 cprop->hw_sob_group[sob_group_offset].base_sob_id; in gaudi_collective_master_init_job()
1217 master_monitor = prop->collective_mstr_mon_id[0]; in gaudi_collective_master_init_job()
1219 cprop->hw_sob_group[sob_group_offset].queue_id = queue_id; in gaudi_collective_master_init_job()
1221 dev_dbg(hdev->dev, in gaudi_collective_master_init_job()
1223 master_sob_base, cprop->mstr_sob_mask[0], in gaudi_collective_master_init_job()
1224 cprop->next_sob_group_val[stream], in gaudi_collective_master_init_job()
1227 wait_prop.data = (void *) job->patched_cb; in gaudi_collective_master_init_job()
1229 wait_prop.sob_mask = cprop->mstr_sob_mask[0]; in gaudi_collective_master_init_job()
1230 wait_prop.sob_val = cprop->next_sob_group_val[stream]; in gaudi_collective_master_init_job()
1237 master_monitor = prop->collective_mstr_mon_id[1]; in gaudi_collective_master_init_job()
1239 dev_dbg(hdev->dev, in gaudi_collective_master_init_job()
1241 master_sob_base, cprop->mstr_sob_mask[1], in gaudi_collective_master_init_job()
1242 cprop->next_sob_group_val[stream], in gaudi_collective_master_init_job()
1246 wait_prop.sob_mask = cprop->mstr_sob_mask[1]; in gaudi_collective_master_init_job()
1259 queue_id = job->hw_queue_id; in gaudi_collective_slave_init_job()
1260 prop = &hdev->kernel_queues[queue_id].sync_stream_prop; in gaudi_collective_slave_init_job()
1262 if (job->cs->encaps_signals) { in gaudi_collective_slave_init_job()
1267 hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job, in gaudi_collective_slave_init_job()
1270 dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n", in gaudi_collective_slave_init_job()
1271 job->cs->sequence, in gaudi_collective_slave_init_job()
1272 cs_cmpl->hw_sob->sob_id, in gaudi_collective_slave_init_job()
1273 cs_cmpl->sob_val); in gaudi_collective_slave_init_job()
1277 wait_prop.data = (void *) job->user_cb; in gaudi_collective_slave_init_job()
1278 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; in gaudi_collective_slave_init_job()
1280 wait_prop.sob_val = cs_cmpl->sob_val; in gaudi_collective_slave_init_job()
1281 wait_prop.mon_id = prop->collective_slave_mon_id; in gaudi_collective_slave_init_job()
1285 dev_dbg(hdev->dev, in gaudi_collective_slave_init_job()
1287 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, in gaudi_collective_slave_init_job()
1288 prop->collective_slave_mon_id, queue_id); in gaudi_collective_slave_init_job()
1292 dev_dbg(hdev->dev, in gaudi_collective_slave_init_job()
1294 prop->collective_sob_id, queue_id); in gaudi_collective_slave_init_job()
1296 cb_size += gaudi_gen_signal_cb(hdev, job->user_cb, in gaudi_collective_slave_init_job()
1297 prop->collective_sob_id, cb_size, false); in gaudi_collective_slave_init_job()
1303 container_of(cs->signal_fence, struct hl_cs_compl, base_fence); in gaudi_collective_wait_init_cs()
1305 container_of(cs->fence, struct hl_cs_compl, base_fence); in gaudi_collective_wait_init_cs()
1306 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; in gaudi_collective_wait_init_cs()
1314 ctx = cs->ctx; in gaudi_collective_wait_init_cs()
1315 hdev = ctx->hdev; in gaudi_collective_wait_init_cs()
1316 gaudi = hdev->asic_specific; in gaudi_collective_wait_init_cs()
1317 cprop = &gaudi->collective_props; in gaudi_collective_wait_init_cs()
1319 if (cs->encaps_signals) { in gaudi_collective_wait_init_cs()
1320 cs_cmpl->hw_sob = handle->hw_sob; in gaudi_collective_wait_init_cs()
1327 cs_cmpl->sob_val = 0; in gaudi_collective_wait_init_cs()
1330 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; in gaudi_collective_wait_init_cs()
1331 cs_cmpl->sob_val = signal_cs_cmpl->sob_val; in gaudi_collective_wait_init_cs()
1346 spin_lock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1348 if (completion_done(&cs->signal_fence->completion)) { in gaudi_collective_wait_init_cs()
1349 spin_unlock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1350 return -EINVAL; in gaudi_collective_wait_init_cs()
1353 kref_get(&cs_cmpl->hw_sob->kref); in gaudi_collective_wait_init_cs()
1355 spin_unlock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1358 job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); in gaudi_collective_wait_init_cs()
1359 stream = job->hw_queue_id % 4; in gaudi_collective_wait_init_cs()
1361 stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream]; in gaudi_collective_wait_init_cs()
1363 list_for_each_entry(job, &cs->job_list, cs_node) { in gaudi_collective_wait_init_cs()
1364 queue_id = job->hw_queue_id; in gaudi_collective_wait_init_cs()
1366 if (hdev->kernel_queues[queue_id].collective_mode == in gaudi_collective_wait_init_cs()
1374 cs_cmpl->sob_group = sob_group_offset; in gaudi_collective_wait_init_cs()
1377 kref_get(&cprop->hw_sob_group[sob_group_offset].kref); in gaudi_collective_wait_init_cs()
1378 cprop->next_sob_group_val[stream]++; in gaudi_collective_wait_init_cs()
1380 if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) { in gaudi_collective_wait_init_cs()
1386 kref_put(&cprop->hw_sob_group[sob_group_offset].kref, in gaudi_collective_wait_init_cs()
1388 cprop->next_sob_group_val[stream] = 1; in gaudi_collective_wait_init_cs()
1390 cprop->curr_sob_group_idx[stream] = in gaudi_collective_wait_init_cs()
1391 (cprop->curr_sob_group_idx[stream] + 1) & in gaudi_collective_wait_init_cs()
1392 (HL_RSVD_SOBS - 1); in gaudi_collective_wait_init_cs()
1396 dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n", in gaudi_collective_wait_init_cs()
1397 cprop->curr_sob_group_idx[stream], stream); in gaudi_collective_wait_init_cs()
1401 hl_fence_put(cs->signal_fence); in gaudi_collective_wait_init_cs()
1402 cs->signal_fence = NULL; in gaudi_collective_wait_init_cs()
1415 return cacheline_end - user_cb_size + additional_commands; in gaudi_get_patched_cb_extra_size()
1432 cntr = &hdev->aggregated_cs_counters; in gaudi_collective_wait_create_job()
1457 hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id]; in gaudi_collective_wait_create_job()
1458 job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); in gaudi_collective_wait_create_job()
1460 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1461 atomic64_inc(&cntr->out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1462 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_collective_wait_create_job()
1463 return -ENOMEM; in gaudi_collective_wait_create_job()
1468 hdev->mmu_enable && !patched_cb); in gaudi_collective_wait_create_job()
1470 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1471 atomic64_inc(&cntr->out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1473 return -EFAULT; in gaudi_collective_wait_create_job()
1476 job->id = 0; in gaudi_collective_wait_create_job()
1477 job->cs = cs; in gaudi_collective_wait_create_job()
1478 job->user_cb = cb; in gaudi_collective_wait_create_job()
1479 atomic_inc(&job->user_cb->cs_cnt); in gaudi_collective_wait_create_job()
1480 job->user_cb_size = cb_size; in gaudi_collective_wait_create_job()
1481 job->hw_queue_id = queue_id; in gaudi_collective_wait_create_job()
1487 if (cs->encaps_signals) in gaudi_collective_wait_create_job()
1488 job->encaps_sig_wait_offset = encaps_signal_offset; in gaudi_collective_wait_create_job()
1492 * We call hl_cb_destroy() out of two reasons - we don't need in gaudi_collective_wait_create_job()
1497 job->patched_cb = job->user_cb; in gaudi_collective_wait_create_job()
1499 job->patched_cb = NULL; in gaudi_collective_wait_create_job()
1501 job->job_cb_size = job->user_cb_size; in gaudi_collective_wait_create_job()
1502 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_collective_wait_create_job()
1505 if (hw_queue_prop->type == QUEUE_TYPE_EXT) in gaudi_collective_wait_create_job()
1508 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in gaudi_collective_wait_create_job()
1510 list_add_tail(&job->cs_node, &cs->job_list); in gaudi_collective_wait_create_job()
1522 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_collective_wait_create_jobs()
1530 hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id]; in gaudi_collective_wait_create_jobs()
1531 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { in gaudi_collective_wait_create_jobs()
1532 dev_err(hdev->dev, in gaudi_collective_wait_create_jobs()
1535 return -EINVAL; in gaudi_collective_wait_create_jobs()
1541 dev_err(hdev->dev, in gaudi_collective_wait_create_jobs()
1544 return -EINVAL; in gaudi_collective_wait_create_jobs()
1560 * First monitor for NICs 0-7, second monitor for NICs 8-9 and the in gaudi_collective_wait_create_jobs()
1564 * all wait for the user to signal sob 'cs_cmpl->sob_val'. in gaudi_collective_wait_create_jobs()
1574 if (gaudi->hw_cap_initialized & in gaudi_collective_wait_create_jobs()
1604 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_late_init()
1607 rc = gaudi->cpucp_info_get(hdev); in gaudi_late_init()
1609 dev_err(hdev->dev, "Failed to get cpucp info\n"); in gaudi_late_init()
1613 if ((hdev->card_type == cpucp_card_type_pci) && in gaudi_late_init()
1614 (hdev->nic_ports_mask & 0x3)) { in gaudi_late_init()
1615 dev_info(hdev->dev, in gaudi_late_init()
1617 hdev->nic_ports_mask &= ~0x3; in gaudi_late_init()
1631 gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1); in gaudi_late_init()
1636 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); in gaudi_late_init()
1641 rc = hdev->asic_funcs->scrub_device_mem(hdev); in gaudi_late_init()
1647 dev_err(hdev->dev, "Failed to fetch psoc frequency\n"); in gaudi_late_init()
1653 dev_err(hdev->dev, "Failed to clear MMU page tables range\n"); in gaudi_late_init()
1659 dev_err(hdev->dev, "Failed to initialize TPC memories\n"); in gaudi_late_init()
1665 dev_err(hdev->dev, "Failed to init collective\n"); in gaudi_late_init()
1696 * The device CPU works with 40-bits addresses, while bit 39 must be set in gaudi_alloc_cpu_accessible_dma_mem()
1709 rc = -ENOMEM; in gaudi_alloc_cpu_accessible_dma_mem()
1713 end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1; in gaudi_alloc_cpu_accessible_dma_mem()
1720 dev_err(hdev->dev, in gaudi_alloc_cpu_accessible_dma_mem()
1722 rc = -EFAULT; in gaudi_alloc_cpu_accessible_dma_mem()
1726 hdev->cpu_accessible_dma_mem = virt_addr_arr[i]; in gaudi_alloc_cpu_accessible_dma_mem()
1727 hdev->cpu_accessible_dma_address = dma_addr_arr[i]; in gaudi_alloc_cpu_accessible_dma_mem()
1728 hdev->cpu_pci_msb_addr = in gaudi_alloc_cpu_accessible_dma_mem()
1729 GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address); in gaudi_alloc_cpu_accessible_dma_mem()
1731 if (!hdev->asic_prop.fw_security_enabled) in gaudi_alloc_cpu_accessible_dma_mem()
1732 GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); in gaudi_alloc_cpu_accessible_dma_mem()
1744 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_free_internal_qmans_pq_mem()
1749 q = &gaudi->internal_qmans[i]; in gaudi_free_internal_qmans_pq_mem()
1750 if (!q->pq_kernel_addr) in gaudi_free_internal_qmans_pq_mem()
1752 hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr); in gaudi_free_internal_qmans_pq_mem()
1758 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_alloc_internal_qmans_pq_mem()
1766 q = &gaudi->internal_qmans[i]; in gaudi_alloc_internal_qmans_pq_mem()
1770 q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1773 q->pq_size = MME_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1776 q->pq_size = TPC_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1779 q->pq_size = NIC_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1782 dev_err(hdev->dev, "Bad internal queue index %d", i); in gaudi_alloc_internal_qmans_pq_mem()
1783 rc = -EINVAL; in gaudi_alloc_internal_qmans_pq_mem()
1787 q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr, in gaudi_alloc_internal_qmans_pq_mem()
1789 if (!q->pq_kernel_addr) { in gaudi_alloc_internal_qmans_pq_mem()
1790 rc = -ENOMEM; in gaudi_alloc_internal_qmans_pq_mem()
1804 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_set_pci_memory_regions()
1808 region = &hdev->pci_mem_region[PCI_REGION_CFG]; in gaudi_set_pci_memory_regions()
1809 region->region_base = CFG_BASE; in gaudi_set_pci_memory_regions()
1810 region->region_size = CFG_SIZE; in gaudi_set_pci_memory_regions()
1811 region->offset_in_bar = CFG_BASE - SPI_FLASH_BASE_ADDR; in gaudi_set_pci_memory_regions()
1812 region->bar_size = CFG_BAR_SIZE; in gaudi_set_pci_memory_regions()
1813 region->bar_id = CFG_BAR_ID; in gaudi_set_pci_memory_regions()
1814 region->used = 1; in gaudi_set_pci_memory_regions()
1817 region = &hdev->pci_mem_region[PCI_REGION_SRAM]; in gaudi_set_pci_memory_regions()
1818 region->region_base = SRAM_BASE_ADDR; in gaudi_set_pci_memory_regions()
1819 region->region_size = SRAM_SIZE; in gaudi_set_pci_memory_regions()
1820 region->offset_in_bar = 0; in gaudi_set_pci_memory_regions()
1821 region->bar_size = SRAM_BAR_SIZE; in gaudi_set_pci_memory_regions()
1822 region->bar_id = SRAM_BAR_ID; in gaudi_set_pci_memory_regions()
1823 region->used = 1; in gaudi_set_pci_memory_regions()
1826 region = &hdev->pci_mem_region[PCI_REGION_DRAM]; in gaudi_set_pci_memory_regions()
1827 region->region_base = DRAM_PHYS_BASE; in gaudi_set_pci_memory_regions()
1828 region->region_size = hdev->asic_prop.dram_size; in gaudi_set_pci_memory_regions()
1829 region->offset_in_bar = 0; in gaudi_set_pci_memory_regions()
1830 region->bar_size = prop->dram_pci_bar_size; in gaudi_set_pci_memory_regions()
1831 region->bar_id = HBM_BAR_ID; in gaudi_set_pci_memory_regions()
1832 region->used = 1; in gaudi_set_pci_memory_regions()
1835 region = &hdev->pci_mem_region[PCI_REGION_SP_SRAM]; in gaudi_set_pci_memory_regions()
1836 region->region_base = PSOC_SCRATCHPAD_ADDR; in gaudi_set_pci_memory_regions()
1837 region->region_size = PSOC_SCRATCHPAD_SIZE; in gaudi_set_pci_memory_regions()
1838 region->offset_in_bar = PSOC_SCRATCHPAD_ADDR - SPI_FLASH_BASE_ADDR; in gaudi_set_pci_memory_regions()
1839 region->bar_size = CFG_BAR_SIZE; in gaudi_set_pci_memory_regions()
1840 region->bar_id = CFG_BAR_ID; in gaudi_set_pci_memory_regions()
1841 region->used = 1; in gaudi_set_pci_memory_regions()
1853 return -ENOMEM; in gaudi_sw_init()
1858 dev_err(hdev->dev, in gaudi_sw_init()
1861 rc = -EINVAL; in gaudi_sw_init()
1865 gaudi->events[event_id++] = in gaudi_sw_init()
1870 gaudi->cpucp_info_get = gaudi_cpucp_info_get; in gaudi_sw_init()
1872 hdev->asic_specific = gaudi; in gaudi_sw_init()
1875 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), in gaudi_sw_init()
1876 &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0); in gaudi_sw_init()
1877 if (!hdev->dma_pool) { in gaudi_sw_init()
1878 dev_err(hdev->dev, "failed to create DMA pool\n"); in gaudi_sw_init()
1879 rc = -ENOMEM; in gaudi_sw_init()
1887 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); in gaudi_sw_init()
1888 if (!hdev->cpu_accessible_dma_pool) { in gaudi_sw_init()
1889 dev_err(hdev->dev, in gaudi_sw_init()
1891 rc = -ENOMEM; in gaudi_sw_init()
1895 rc = gen_pool_add(hdev->cpu_accessible_dma_pool, in gaudi_sw_init()
1896 (uintptr_t) hdev->cpu_accessible_dma_mem, in gaudi_sw_init()
1897 HL_CPU_ACCESSIBLE_MEM_SIZE, -1); in gaudi_sw_init()
1899 dev_err(hdev->dev, in gaudi_sw_init()
1901 rc = -EFAULT; in gaudi_sw_init()
1909 spin_lock_init(&gaudi->hw_queues_lock); in gaudi_sw_init()
1911 hdev->supports_sync_stream = true; in gaudi_sw_init()
1912 hdev->supports_coresight = true; in gaudi_sw_init()
1913 hdev->supports_staged_submission = true; in gaudi_sw_init()
1914 hdev->supports_wait_for_multi_cs = true; in gaudi_sw_init()
1916 hdev->asic_funcs->set_pci_memory_regions(hdev); in gaudi_sw_init()
1917 hdev->stream_master_qid_arr = in gaudi_sw_init()
1918 hdev->asic_funcs->get_stream_master_qid_arr(); in gaudi_sw_init()
1919 hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE; in gaudi_sw_init()
1924 gen_pool_destroy(hdev->cpu_accessible_dma_pool); in gaudi_sw_init()
1926 if (!hdev->asic_prop.fw_security_enabled) in gaudi_sw_init()
1927 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, in gaudi_sw_init()
1928 hdev->cpu_pci_msb_addr); in gaudi_sw_init()
1929 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, in gaudi_sw_init()
1930 hdev->cpu_accessible_dma_address); in gaudi_sw_init()
1932 dma_pool_destroy(hdev->dma_pool); in gaudi_sw_init()
1940 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_sw_fini()
1944 gen_pool_destroy(hdev->cpu_accessible_dma_pool); in gaudi_sw_fini()
1946 if (!hdev->asic_prop.fw_security_enabled) in gaudi_sw_fini()
1947 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, in gaudi_sw_fini()
1948 hdev->cpu_pci_msb_addr); in gaudi_sw_fini()
1950 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, in gaudi_sw_fini()
1951 hdev->cpu_accessible_dma_address); in gaudi_sw_fini()
1953 dma_pool_destroy(hdev->dma_pool); in gaudi_sw_fini()
1965 if (hdev->disabled) in gaudi_irq_handler_single()
1968 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) in gaudi_irq_handler_single()
1969 hl_irq_handler_cq(irq, &hdev->completion_queue[i]); in gaudi_irq_handler_single()
1971 hl_irq_handler_eq(irq, &hdev->event_queue); in gaudi_irq_handler_single()
1986 dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n", in gaudi_pci_irq_vector()
1992 return pci_irq_vector(hdev->pdev, msi_vec); in gaudi_pci_irq_vector()
1999 dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n"); in gaudi_enable_msi_single()
2005 dev_err(hdev->dev, in gaudi_enable_msi_single()
2013 int cq_cnt = hdev->asic_prop.completion_queues_count; in gaudi_enable_msi_multi()
2019 &hdev->completion_queue[i]); in gaudi_enable_msi_multi()
2021 dev_err(hdev->dev, "Failed to request IRQ %d", irq); in gaudi_enable_msi_multi()
2028 &hdev->event_queue); in gaudi_enable_msi_multi()
2030 dev_err(hdev->dev, "Failed to request IRQ %d", irq); in gaudi_enable_msi_multi()
2039 &hdev->completion_queue[i]); in gaudi_enable_msi_multi()
2045 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_enable_msi()
2048 if (gaudi->hw_cap_initialized & HW_CAP_MSI) in gaudi_enable_msi()
2051 rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI); in gaudi_enable_msi()
2053 dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc); in gaudi_enable_msi()
2058 gaudi->multi_msi_mode = false; in gaudi_enable_msi()
2061 gaudi->multi_msi_mode = true; in gaudi_enable_msi()
2068 gaudi->hw_cap_initialized |= HW_CAP_MSI; in gaudi_enable_msi()
2073 pci_free_irq_vectors(hdev->pdev); in gaudi_enable_msi()
2079 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_sync_irqs()
2080 int i, cq_cnt = hdev->asic_prop.completion_queues_count; in gaudi_sync_irqs()
2082 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) in gaudi_sync_irqs()
2086 if (gaudi->multi_msi_mode) { in gaudi_sync_irqs()
2100 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_msi()
2101 int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count; in gaudi_disable_msi()
2103 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) in gaudi_disable_msi()
2108 if (gaudi->multi_msi_mode) { in gaudi_disable_msi()
2111 free_irq(irq, &hdev->event_queue); in gaudi_disable_msi()
2115 free_irq(irq, &hdev->completion_queue[i]); in gaudi_disable_msi()
2121 pci_free_irq_vectors(hdev->pdev); in gaudi_disable_msi()
2123 gaudi->hw_cap_initialized &= ~HW_CAP_MSI; in gaudi_disable_msi()
2128 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_scrambler_sram()
2130 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_scrambler_sram()
2133 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & in gaudi_init_scrambler_sram()
2137 if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER) in gaudi_init_scrambler_sram()
2191 gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER; in gaudi_init_scrambler_sram()
2196 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_scrambler_hbm()
2198 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_scrambler_hbm()
2201 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_scrambler_hbm()
2205 if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER) in gaudi_init_scrambler_hbm()
2259 gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER; in gaudi_init_scrambler_hbm()
2264 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_e2e()
2267 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_e2e()
2516 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_hbm_cred()
2519 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_hbm_cred()
2595 writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i); in gaudi_init_golden_registers()
2607 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_pci_dma_qman()
2660 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_pci_dma_qman()
2662 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); in gaudi_init_pci_dma_qman()
2666 if (hdev->stop_on_err) in gaudi_init_pci_dma_qman()
2697 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_dma_core()
2706 /* WA for H/W bug H3-2116 */ in gaudi_init_dma_core()
2710 if (hdev->stop_on_err) in gaudi_init_dma_core()
2715 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_dma_core()
2717 le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl); in gaudi_init_dma_core()
2744 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_pci_dma_qmans()
2748 if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA) in gaudi_init_pci_dma_qmans()
2768 q = &hdev->kernel_queues[q_idx]; in gaudi_init_pci_dma_qmans()
2769 q->cq_id = cq_id++; in gaudi_init_pci_dma_qmans()
2770 q->msi_vec = nic_skip + cpu_skip + msi_vec++; in gaudi_init_pci_dma_qmans()
2772 q->bus_address); in gaudi_init_pci_dma_qmans()
2780 gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA; in gaudi_init_pci_dma_qmans()
2787 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_hbm_dma_qman()
2831 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_hbm_dma_qman()
2833 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); in gaudi_init_hbm_dma_qman()
2844 if (hdev->stop_on_err) in gaudi_init_hbm_dma_qman()
2890 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_hbm_dma_qmans()
2895 if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA) in gaudi_init_hbm_dma_qmans()
2908 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_hbm_dma_qmans()
2909 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_hbm_dma_qmans()
2922 gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA; in gaudi_init_hbm_dma_qmans()
2929 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_mme_qman()
2964 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_mme_qman()
2966 le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl); in gaudi_init_mme_qman()
2977 (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2; in gaudi_init_mme_qman()
2980 if (hdev->stop_on_err) in gaudi_init_mme_qman()
3014 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_mme_qmans()
3020 if (gaudi->hw_cap_initialized & HW_CAP_MME) in gaudi_init_mme_qmans()
3028 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; in gaudi_init_mme_qmans()
3032 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_mme_qmans()
3033 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_mme_qmans()
3041 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; in gaudi_init_mme_qmans()
3048 gaudi->hw_cap_initialized |= HW_CAP_MME; in gaudi_init_mme_qmans()
3055 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_tpc_qman()
3081 (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); in gaudi_init_tpc_qman()
3100 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_tpc_qman()
3102 le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl); in gaudi_init_tpc_qman()
3113 if (hdev->stop_on_err) in gaudi_init_tpc_qman()
3159 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_tpc_qmans()
3163 u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH - in gaudi_init_tpc_qmans()
3167 if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK) in gaudi_init_tpc_qmans()
3177 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_tpc_qmans()
3178 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_tpc_qmans()
3195 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; in gaudi_init_tpc_qmans()
3197 gaudi->hw_cap_initialized |= in gaudi_init_tpc_qmans()
3206 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_nic_qman()
3257 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_nic_qman()
3259 le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl); in gaudi_init_nic_qman()
3263 if (hdev->stop_on_err) in gaudi_init_nic_qman()
3292 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_nic_qmans()
3297 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_init_nic_qmans()
3299 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_init_nic_qmans()
3302 if (!hdev->nic_ports_mask) in gaudi_init_nic_qmans()
3305 if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK) in gaudi_init_nic_qmans()
3308 dev_dbg(hdev->dev, "Initializing NIC QMANs\n"); in gaudi_init_nic_qmans()
3311 if (!(hdev->nic_ports_mask & (1 << nic_id))) { in gaudi_init_nic_qmans()
3314 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_init_nic_qmans()
3323 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_nic_qmans()
3324 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_nic_qmans()
3334 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_init_nic_qmans()
3338 gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id); in gaudi_init_nic_qmans()
3344 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_pci_dma_qmans()
3346 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_disable_pci_dma_qmans()
3356 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_hbm_dma_qmans()
3358 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_disable_hbm_dma_qmans()
3370 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_mme_qmans()
3372 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_disable_mme_qmans()
3381 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_tpc_qmans()
3385 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_disable_tpc_qmans()
3390 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; in gaudi_disable_tpc_qmans()
3396 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_nic_qmans()
3399 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_disable_nic_qmans()
3401 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_disable_nic_qmans()
3407 if (gaudi->hw_cap_initialized & nic_mask) in gaudi_disable_nic_qmans()
3412 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_disable_nic_qmans()
3420 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_pci_dma_qmans()
3422 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_stop_pci_dma_qmans()
3433 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_hbm_dma_qmans()
3435 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_stop_hbm_dma_qmans()
3449 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_mme_qmans()
3451 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_stop_mme_qmans()
3461 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_tpc_qmans()
3463 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_stop_tpc_qmans()
3478 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_nic_qmans()
3482 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) in gaudi_stop_nic_qmans()
3488 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) in gaudi_stop_nic_qmans()
3494 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) in gaudi_stop_nic_qmans()
3500 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) in gaudi_stop_nic_qmans()
3506 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) in gaudi_stop_nic_qmans()
3512 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) in gaudi_stop_nic_qmans()
3518 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) in gaudi_stop_nic_qmans()
3524 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) in gaudi_stop_nic_qmans()
3530 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) in gaudi_stop_nic_qmans()
3536 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) in gaudi_stop_nic_qmans()
3545 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_pci_dma_stall()
3547 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_pci_dma_stall()
3557 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hbm_dma_stall()
3559 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_hbm_dma_stall()
3571 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mme_stall()
3573 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_mme_stall()
3576 /* WA for H3-1800 bug: do ACC and SBAB writes twice */ in gaudi_mme_stall()
3597 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_tpc_stall()
3599 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_tpc_stall()
3617 if (hdev->asic_prop.fw_security_enabled) in gaudi_disable_clock_gating()
3624 qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG); in gaudi_disable_clock_gating()
3636 qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG); in gaudi_disable_clock_gating()
3643 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); in gaudi_enable_timestamp()
3645 /* Zero the lower/upper parts of the 64-bit counter */ in gaudi_enable_timestamp()
3646 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); in gaudi_enable_timestamp()
3647 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); in gaudi_enable_timestamp()
3650 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); in gaudi_enable_timestamp()
3656 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); in gaudi_disable_timestamp()
3663 if (hdev->pldm) in gaudi_halt_engines()
3700 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_mmu_init()
3701 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_init()
3705 if (!hdev->mmu_enable) in gaudi_mmu_init()
3708 if (gaudi->hw_cap_initialized & HW_CAP_MMU) in gaudi_mmu_init()
3711 for (i = 0 ; i < prop->max_asid ; i++) { in gaudi_mmu_init()
3712 hop0_addr = prop->mmu_pgt_addr + in gaudi_mmu_init()
3713 (i * prop->mmu_hop_table_size); in gaudi_mmu_init()
3717 dev_err(hdev->dev, in gaudi_mmu_init()
3724 WREG32(mmSTLB_CACHE_INV_BASE_39_8, prop->mmu_cache_mng_addr >> 8); in gaudi_mmu_init()
3725 WREG32(mmSTLB_CACHE_INV_BASE_49_40, prop->mmu_cache_mng_addr >> 40); in gaudi_mmu_init()
3741 gaudi->mmu_cache_inv_pi = 1; in gaudi_mmu_init()
3743 gaudi->hw_cap_initialized |= HW_CAP_MMU; in gaudi_mmu_init()
3755 dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET; in gaudi_load_firmware_to_device()
3764 dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET; in gaudi_load_boot_fit_to_device()
3774 dynamic_loader = &hdev->fw_loader.dynamic_loader; in gaudi_init_dynamic_firmware_loader()
3779 * hard-coded) in later stages of the protocol those values will be in gaudi_init_dynamic_firmware_loader()
3781 * will always be up-to-date in gaudi_init_dynamic_firmware_loader()
3783 dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs; in gaudi_init_dynamic_firmware_loader()
3784 dyn_regs->kmd_msg_to_cpu = in gaudi_init_dynamic_firmware_loader()
3786 dyn_regs->cpu_cmd_status_to_host = in gaudi_init_dynamic_firmware_loader()
3789 dynamic_loader->wait_for_bl_timeout = GAUDI_WAIT_FOR_BL_TIMEOUT_USEC; in gaudi_init_dynamic_firmware_loader()
3796 static_loader = &hdev->fw_loader.static_loader; in gaudi_init_static_firmware_loader()
3798 static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN; in gaudi_init_static_firmware_loader()
3799 static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN; in gaudi_init_static_firmware_loader()
3800 static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU; in gaudi_init_static_firmware_loader()
3801 static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST; in gaudi_init_static_firmware_loader()
3802 static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; in gaudi_init_static_firmware_loader()
3803 static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0; in gaudi_init_static_firmware_loader()
3804 static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1; in gaudi_init_static_firmware_loader()
3805 static_loader->boot_err0_reg = mmCPU_BOOT_ERR0; in gaudi_init_static_firmware_loader()
3806 static_loader->boot_err1_reg = mmCPU_BOOT_ERR1; in gaudi_init_static_firmware_loader()
3807 static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET; in gaudi_init_static_firmware_loader()
3808 static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET; in gaudi_init_static_firmware_loader()
3809 static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR)); in gaudi_init_static_firmware_loader()
3810 static_loader->cpu_reset_wait_msec = hdev->pldm ? in gaudi_init_static_firmware_loader()
3817 struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load; in gaudi_init_firmware_preload_params()
3819 pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; in gaudi_init_firmware_preload_params()
3820 pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0; in gaudi_init_firmware_preload_params()
3821 pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1; in gaudi_init_firmware_preload_params()
3822 pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0; in gaudi_init_firmware_preload_params()
3823 pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1; in gaudi_init_firmware_preload_params()
3824 pre_fw_load->wait_for_preboot_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC; in gaudi_init_firmware_preload_params()
3829 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_init_firmware_loader()
3830 struct fw_load_mgr *fw_loader = &hdev->fw_loader; in gaudi_init_firmware_loader()
3833 fw_loader->fw_comp_loaded = FW_TYPE_NONE; in gaudi_init_firmware_loader()
3834 fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE; in gaudi_init_firmware_loader()
3835 fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE; in gaudi_init_firmware_loader()
3836 fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC; in gaudi_init_firmware_loader()
3837 fw_loader->boot_fit_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC; in gaudi_init_firmware_loader()
3838 fw_loader->skip_bmc = !hdev->bmc_enable; in gaudi_init_firmware_loader()
3839 fw_loader->sram_bar_id = SRAM_BAR_ID; in gaudi_init_firmware_loader()
3840 fw_loader->dram_bar_id = HBM_BAR_ID; in gaudi_init_firmware_loader()
3842 if (prop->dynamic_fw_load) in gaudi_init_firmware_loader()
3850 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_cpu()
3853 if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU)) in gaudi_init_cpu()
3856 if (gaudi->hw_cap_initialized & HW_CAP_CPU) in gaudi_init_cpu()
3863 if (!hdev->asic_prop.fw_security_enabled) in gaudi_init_cpu()
3864 WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); in gaudi_init_cpu()
3871 gaudi->hw_cap_initialized |= HW_CAP_CPU; in gaudi_init_cpu()
3879 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_cpu_queues()
3880 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_init_cpu_queues()
3881 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_cpu_queues()
3885 &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; in gaudi_init_cpu_queues()
3888 if (!hdev->cpu_queues_enable) in gaudi_init_cpu_queues()
3891 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) in gaudi_init_cpu_queues()
3894 eq = &hdev->event_queue; in gaudi_init_cpu_queues()
3896 WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address)); in gaudi_init_cpu_queues()
3897 WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address)); in gaudi_init_cpu_queues()
3899 WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address)); in gaudi_init_cpu_queues()
3900 WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address)); in gaudi_init_cpu_queues()
3903 lower_32_bits(hdev->cpu_accessible_dma_address)); in gaudi_init_cpu_queues()
3905 upper_32_bits(hdev->cpu_accessible_dma_address)); in gaudi_init_cpu_queues()
3916 if (gaudi->multi_msi_mode) in gaudi_init_cpu_queues()
3922 irq_handler_offset = prop->gic_interrupts_enable ? in gaudi_init_cpu_queues()
3924 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq); in gaudi_init_cpu_queues()
3938 dev_err(hdev->dev, in gaudi_init_cpu_queues()
3939 "Failed to communicate with Device CPU (CPU-CP timeout)\n"); in gaudi_init_cpu_queues()
3940 return -EIO; in gaudi_init_cpu_queues()
3944 if (prop->fw_cpu_boot_dev_sts0_valid) in gaudi_init_cpu_queues()
3945 prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0); in gaudi_init_cpu_queues()
3946 if (prop->fw_cpu_boot_dev_sts1_valid) in gaudi_init_cpu_queues()
3947 prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1); in gaudi_init_cpu_queues()
3949 gaudi->hw_cap_initialized |= HW_CAP_CPU_Q; in gaudi_init_cpu_queues()
3958 if (!hdev->asic_prop.fw_security_enabled) { in gaudi_pre_hw_init()
3983 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_init()
3992 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_hw_init()
3993 gaudi->hbm_bar_cur_addr = DRAM_PHYS_BASE; in gaudi_hw_init()
3996 * Before pushing u-boot/linux to device, need to set the hbm bar to in gaudi_hw_init()
4000 dev_err(hdev->dev, in gaudi_hw_init()
4002 return -EIO; in gaudi_hw_init()
4007 dev_err(hdev->dev, "failed to initialize CPU\n"); in gaudi_hw_init()
4050 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", in gaudi_hw_init()
4072 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_hw_fini()
4074 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_fini()
4078 dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n"); in gaudi_hw_fini()
4082 if (hdev->pldm) { in gaudi_hw_fini()
4091 dev_dbg(hdev->dev, in gaudi_hw_fini()
4098 driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled && in gaudi_hw_fini()
4099 !hdev->asic_prop.hard_reset_done_by_fw); in gaudi_hw_fini()
4112 if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) { in gaudi_hw_fini()
4113 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_hw_fini()
4115 le32_to_cpu(dyn_regs->gic_host_halt_irq); in gaudi_hw_fini()
4120 /* This is a hail-mary attempt to revive the card in the small chance that the in gaudi_hw_fini()
4131 if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) { in gaudi_hw_fini()
4132 if (hdev->asic_prop.hard_reset_done_by_fw) in gaudi_hw_fini()
4138 if (hdev->asic_prop.hard_reset_done_by_fw) in gaudi_hw_fini()
4174 /* Tell ASIC not to re-initialize PCIe */ in gaudi_hw_fini()
4177 /* Restart BTL/BLR upon hard-reset */ in gaudi_hw_fini()
4183 dev_dbg(hdev->dev, in gaudi_hw_fini()
4187 dev_dbg(hdev->dev, in gaudi_hw_fini()
4201 dev_err(hdev->dev, in gaudi_hw_fini()
4206 gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM | in gaudi_hw_fini()
4212 memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); in gaudi_hw_fini()
4214 hdev->device_cpu_is_halted = false; in gaudi_hw_fini()
4224 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); in gaudi_suspend()
4239 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | in gaudi_mmap()
4242 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, in gaudi_mmap()
4243 (dma_addr - HOST_PHYS_BASE), size); in gaudi_mmap()
4245 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); in gaudi_mmap()
4253 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_ring_doorbell()
4255 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_ring_doorbell()
4277 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4284 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4291 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4298 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4305 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4312 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4317 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) in gaudi_ring_doorbell()
4484 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC0)) in gaudi_ring_doorbell()
4487 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4492 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC1)) in gaudi_ring_doorbell()
4495 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4500 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC2)) in gaudi_ring_doorbell()
4503 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4508 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC3)) in gaudi_ring_doorbell()
4511 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4516 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC4)) in gaudi_ring_doorbell()
4519 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4524 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC5)) in gaudi_ring_doorbell()
4527 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4532 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC6)) in gaudi_ring_doorbell()
4535 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4540 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC7)) in gaudi_ring_doorbell()
4543 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4548 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC8)) in gaudi_ring_doorbell()
4551 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4556 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC9)) in gaudi_ring_doorbell()
4559 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4569 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n", in gaudi_ring_doorbell()
4583 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_ring_doorbell()
4585 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq); in gaudi_ring_doorbell()
4605 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size, in gaudi_dma_alloc_coherent()
4619 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE; in gaudi_dma_free_coherent()
4621 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); in gaudi_dma_free_coherent()
4626 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_scrub_device_dram()
4627 u64 cur_addr = prop->dram_user_base_address; in gaudi_scrub_device_dram()
4631 while (cur_addr < prop->dram_end_address) { in gaudi_scrub_device_dram()
4636 min((u64)SZ_2G, prop->dram_end_address - cur_addr); in gaudi_scrub_device_dram()
4638 dev_dbg(hdev->dev, in gaudi_scrub_device_dram()
4639 "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n", in gaudi_scrub_device_dram()
4658 if (cur_addr == prop->dram_end_address) in gaudi_scrub_device_dram()
4674 dev_err(hdev->dev, in gaudi_scrub_device_dram()
4677 return -EIO; in gaudi_scrub_device_dram()
4687 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_scrub_device_mem()
4688 u64 wait_to_idle_time = hdev->pdev ? HBM_SCRUBBING_TIMEOUT_US : in gaudi_scrub_device_mem()
4690 u64 addr, size, val = hdev->memory_scrub_val; in gaudi_scrub_device_mem()
4694 if (!hdev->memory_scrub) in gaudi_scrub_device_mem()
4698 while (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { in gaudi_scrub_device_mem()
4700 dev_err(hdev->dev, "waiting for idle timeout\n"); in gaudi_scrub_device_mem()
4701 return -ETIMEDOUT; in gaudi_scrub_device_mem()
4707 addr = prop->sram_user_base_address; in gaudi_scrub_device_mem()
4708 size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET; in gaudi_scrub_device_mem()
4710 dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n", in gaudi_scrub_device_mem()
4714 dev_err(hdev->dev, "Failed to clear SRAM (%d)\n", rc); in gaudi_scrub_device_mem()
4718 /* Scrub HBM using all DMA channels in parallel */ in gaudi_scrub_device_mem()
4721 dev_err(hdev->dev, "Failed to clear HBM (%d)\n", rc); in gaudi_scrub_device_mem()
4732 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_int_queue_base()
4737 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); in gaudi_get_int_queue_base()
4741 q = &gaudi->internal_qmans[queue_id]; in gaudi_get_int_queue_base()
4742 *dma_handle = q->pq_dma_addr; in gaudi_get_int_queue_base()
4743 *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE; in gaudi_get_int_queue_base()
4745 return q->pq_kernel_addr; in gaudi_get_int_queue_base()
4751 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_send_cpu_message()
4753 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) { in gaudi_send_cpu_message()
4775 if (hdev->pldm) in gaudi_test_queue()
4784 dev_err(hdev->dev, in gaudi_test_queue()
4787 return -ENOMEM; in gaudi_test_queue()
4795 dev_err(hdev->dev, in gaudi_test_queue()
4798 rc = -ENOMEM; in gaudi_test_queue()
4806 fence_pkt->ctl = cpu_to_le32(tmp); in gaudi_test_queue()
4807 fence_pkt->value = cpu_to_le32(fence_val); in gaudi_test_queue()
4808 fence_pkt->addr = cpu_to_le64(fence_dma_addr); in gaudi_test_queue()
4814 dev_err(hdev->dev, in gaudi_test_queue()
4825 if (rc == -ETIMEDOUT) { in gaudi_test_queue()
4826 dev_err(hdev->dev, in gaudi_test_queue()
4829 rc = -EIO; in gaudi_test_queue()
4841 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_test_cpu_queue()
4847 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_test_cpu_queue()
4857 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) { in gaudi_test_queues()
4858 if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) { in gaudi_test_queues()
4861 ret_val = -EINVAL; in gaudi_test_queues()
4867 ret_val = -EINVAL; in gaudi_test_queues()
4880 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); in gaudi_dma_pool_zalloc()
4893 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE; in gaudi_dma_pool_free()
4895 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr); in gaudi_dma_pool_free()
4926 while ((count + 1) < sgt->nents) { in gaudi_get_dma_desc_list_size()
4958 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), in gaudi_pin_memory_before_cs()
4959 parser->job_userptr_list, &userptr)) in gaudi_pin_memory_before_cs()
4964 return -ENOMEM; in gaudi_pin_memory_before_cs()
4966 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), in gaudi_pin_memory_before_cs()
4971 list_add_tail(&userptr->job_node, parser->job_userptr_list); in gaudi_pin_memory_before_cs()
4973 rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir); in gaudi_pin_memory_before_cs()
4975 dev_err(hdev->dev, "failed to map sgt with DMA region\n"); in gaudi_pin_memory_before_cs()
4979 userptr->dma_mapped = true; in gaudi_pin_memory_before_cs()
4980 userptr->dir = dir; in gaudi_pin_memory_before_cs()
4983 parser->patched_cb_size += in gaudi_pin_memory_before_cs()
4984 gaudi_get_dma_desc_list_size(hdev, userptr->sgt); in gaudi_pin_memory_before_cs()
4989 list_del(&userptr->job_node); in gaudi_pin_memory_before_cs()
5006 user_memset = (le32_to_cpu(user_dma_pkt->ctl) & in gaudi_validate_dma_pkt_host()
5014 dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n"); in gaudi_validate_dma_pkt_host()
5016 addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_validate_dma_pkt_host()
5018 dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n"); in gaudi_validate_dma_pkt_host()
5020 addr = (le64_to_cpu(user_dma_pkt->dst_addr) & in gaudi_validate_dma_pkt_host()
5026 parser->patched_cb_size += sizeof(*user_dma_pkt); in gaudi_validate_dma_pkt_host()
5039 u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) & in gaudi_validate_dma_pkt_no_mmu()
5043 dev_dbg(hdev->dev, "DMA packet details:\n"); in gaudi_validate_dma_pkt_no_mmu()
5044 dev_dbg(hdev->dev, "source == 0x%llx\n", in gaudi_validate_dma_pkt_no_mmu()
5045 le64_to_cpu(user_dma_pkt->src_addr)); in gaudi_validate_dma_pkt_no_mmu()
5046 dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr); in gaudi_validate_dma_pkt_no_mmu()
5047 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); in gaudi_validate_dma_pkt_no_mmu()
5054 if (!le32_to_cpu(user_dma_pkt->tsize)) { in gaudi_validate_dma_pkt_no_mmu()
5055 parser->patched_cb_size += sizeof(*user_dma_pkt); in gaudi_validate_dma_pkt_no_mmu()
5059 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) in gaudi_validate_dma_pkt_no_mmu()
5072 cfg = le32_to_cpu(user_pkt->cfg); in gaudi_validate_load_and_exe_pkt()
5075 dev_err(hdev->dev, in gaudi_validate_load_and_exe_pkt()
5077 return -EPERM; in gaudi_validate_load_and_exe_pkt()
5080 parser->patched_cb_size += sizeof(struct packet_load_and_exe); in gaudi_validate_load_and_exe_pkt()
5091 parser->patched_cb_size = 0; in gaudi_validate_cb()
5094 while (cb_parsed_length < parser->user_cb_size) { in gaudi_validate_cb()
5099 user_pkt = parser->user_cb->kernel_address + cb_parsed_length; in gaudi_validate_cb()
5102 (le64_to_cpu(user_pkt->header) & in gaudi_validate_cb()
5107 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); in gaudi_validate_cb()
5108 rc = -EINVAL; in gaudi_validate_cb()
5114 if (cb_parsed_length > parser->user_cb_size) { in gaudi_validate_cb()
5115 dev_err(hdev->dev, in gaudi_validate_cb()
5117 rc = -EINVAL; in gaudi_validate_cb()
5123 dev_err(hdev->dev, in gaudi_validate_cb()
5125 rc = -EPERM; in gaudi_validate_cb()
5129 dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); in gaudi_validate_cb()
5130 rc = -EPERM; in gaudi_validate_cb()
5134 dev_err(hdev->dev, "User not allowed to use STOP\n"); in gaudi_validate_cb()
5135 rc = -EPERM; in gaudi_validate_cb()
5139 dev_err(hdev->dev, in gaudi_validate_cb()
5141 rc = -EPERM; in gaudi_validate_cb()
5150 parser->contains_dma_pkt = true; in gaudi_validate_cb()
5152 parser->patched_cb_size += pkt_size; in gaudi_validate_cb()
5165 parser->patched_cb_size += pkt_size; in gaudi_validate_cb()
5169 dev_err(hdev->dev, "Invalid packet header 0x%x\n", in gaudi_validate_cb()
5171 rc = -EINVAL; in gaudi_validate_cb()
5185 if (parser->completion) in gaudi_validate_cb()
5186 parser->patched_cb_size += gaudi_get_patched_cb_extra_size( in gaudi_validate_cb()
5187 parser->patched_cb_size); in gaudi_validate_cb()
5210 ctl = le32_to_cpu(user_dma_pkt->ctl); in gaudi_patch_dma_packet()
5212 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) in gaudi_patch_dma_packet()
5219 addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_patch_dma_packet()
5220 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); in gaudi_patch_dma_packet()
5225 addr = le64_to_cpu(user_dma_pkt->dst_addr); in gaudi_patch_dma_packet()
5226 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_patch_dma_packet()
5232 le32_to_cpu(user_dma_pkt->tsize), in gaudi_patch_dma_packet()
5233 parser->job_userptr_list, &userptr))) { in gaudi_patch_dma_packet()
5234 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", in gaudi_patch_dma_packet()
5235 addr, user_dma_pkt->tsize); in gaudi_patch_dma_packet()
5236 return -EFAULT; in gaudi_patch_dma_packet()
5247 sgt = userptr->sgt; in gaudi_patch_dma_packet()
5257 while ((count + 1) < sgt->nents) { in gaudi_patch_dma_packet()
5275 ctl = le32_to_cpu(user_dma_pkt->ctl); in gaudi_patch_dma_packet()
5279 new_dma_pkt->ctl = cpu_to_le32(ctl); in gaudi_patch_dma_packet()
5280 new_dma_pkt->tsize = cpu_to_le32(len); in gaudi_patch_dma_packet()
5283 new_dma_pkt->src_addr = cpu_to_le64(dma_addr); in gaudi_patch_dma_packet()
5284 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); in gaudi_patch_dma_packet()
5286 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr); in gaudi_patch_dma_packet()
5287 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr); in gaudi_patch_dma_packet()
5297 dev_err(hdev->dev, in gaudi_patch_dma_packet()
5299 return -EFAULT; in gaudi_patch_dma_packet()
5302 /* Fix the last dma packet - wrcomp must be as user set it */ in gaudi_patch_dma_packet()
5303 new_dma_pkt--; in gaudi_patch_dma_packet()
5304 new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask); in gaudi_patch_dma_packet()
5319 while (cb_parsed_length < parser->user_cb_size) { in gaudi_patch_cb()
5325 user_pkt = parser->user_cb->kernel_address + cb_parsed_length; in gaudi_patch_cb()
5326 kernel_pkt = parser->patched_cb->kernel_address + in gaudi_patch_cb()
5330 (le64_to_cpu(user_pkt->header) & in gaudi_patch_cb()
5335 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); in gaudi_patch_cb()
5336 rc = -EINVAL; in gaudi_patch_cb()
5342 if (cb_parsed_length > parser->user_cb_size) { in gaudi_patch_cb()
5343 dev_err(hdev->dev, in gaudi_patch_cb()
5345 rc = -EINVAL; in gaudi_patch_cb()
5359 dev_err(hdev->dev, in gaudi_patch_cb()
5361 rc = -EPERM; in gaudi_patch_cb()
5365 dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); in gaudi_patch_cb()
5366 rc = -EPERM; in gaudi_patch_cb()
5370 dev_err(hdev->dev, "User not allowed to use STOP\n"); in gaudi_patch_cb()
5371 rc = -EPERM; in gaudi_patch_cb()
5388 dev_err(hdev->dev, "Invalid packet header 0x%x\n", in gaudi_patch_cb()
5390 rc = -EINVAL; in gaudi_patch_cb()
5415 if (parser->completion) in gaudi_parse_cb_mmu()
5416 parser->patched_cb_size = parser->user_cb_size + in gaudi_parse_cb_mmu()
5417 gaudi_get_patched_cb_extra_size(parser->user_cb_size); in gaudi_parse_cb_mmu()
5419 parser->patched_cb_size = parser->user_cb_size; in gaudi_parse_cb_mmu()
5421 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, in gaudi_parse_cb_mmu()
5422 parser->patched_cb_size, false, false, in gaudi_parse_cb_mmu()
5426 dev_err(hdev->dev, in gaudi_parse_cb_mmu()
5432 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_mmu()
5434 if (!parser->patched_cb) { in gaudi_parse_cb_mmu()
5435 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); in gaudi_parse_cb_mmu()
5436 rc = -EFAULT; in gaudi_parse_cb_mmu()
5442 * "parser->user_cb_size <= parser->user_cb->size" was done in get_cb_from_cs_chunk() in gaudi_parse_cb_mmu()
5450 memcpy(parser->patched_cb->kernel_address, in gaudi_parse_cb_mmu()
5451 parser->user_cb->kernel_address, in gaudi_parse_cb_mmu()
5452 parser->user_cb_size); in gaudi_parse_cb_mmu()
5454 patched_cb_size = parser->patched_cb_size; in gaudi_parse_cb_mmu()
5457 user_cb = parser->user_cb; in gaudi_parse_cb_mmu()
5458 parser->user_cb = parser->patched_cb; in gaudi_parse_cb_mmu()
5460 parser->user_cb = user_cb; in gaudi_parse_cb_mmu()
5463 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_mmu()
5467 if (patched_cb_size != parser->patched_cb_size) { in gaudi_parse_cb_mmu()
5468 dev_err(hdev->dev, "user CB size mismatch\n"); in gaudi_parse_cb_mmu()
5469 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_mmu()
5470 rc = -EINVAL; in gaudi_parse_cb_mmu()
5481 hl_cb_destroy(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_mmu()
5497 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, in gaudi_parse_cb_no_mmu()
5498 parser->patched_cb_size, false, false, in gaudi_parse_cb_no_mmu()
5501 dev_err(hdev->dev, in gaudi_parse_cb_no_mmu()
5506 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_no_mmu()
5508 if (!parser->patched_cb) { in gaudi_parse_cb_no_mmu()
5509 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); in gaudi_parse_cb_no_mmu()
5510 rc = -EFAULT; in gaudi_parse_cb_no_mmu()
5517 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_no_mmu()
5526 hl_cb_destroy(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_no_mmu()
5530 hl_userptr_delete_list(hdev, parser->job_userptr_list); in gaudi_parse_cb_no_mmu()
5537 struct asic_fixed_properties *asic_prop = &hdev->asic_prop; in gaudi_parse_cb_no_ext_queue()
5538 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_parse_cb_no_ext_queue()
5541 if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) && in gaudi_parse_cb_no_ext_queue()
5542 (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3)) { in gaudi_parse_cb_no_ext_queue()
5543 nic_queue_offset = parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0; in gaudi_parse_cb_no_ext_queue()
5546 if (!(gaudi->hw_cap_initialized & nic_mask_q_id)) { in gaudi_parse_cb_no_ext_queue()
5547 dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id); in gaudi_parse_cb_no_ext_queue()
5548 return -EINVAL; in gaudi_parse_cb_no_ext_queue()
5553 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5554 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5555 asic_prop->sram_user_base_address, in gaudi_parse_cb_no_ext_queue()
5556 asic_prop->sram_end_address)) in gaudi_parse_cb_no_ext_queue()
5559 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5560 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5561 asic_prop->dram_user_base_address, in gaudi_parse_cb_no_ext_queue()
5562 asic_prop->dram_end_address)) in gaudi_parse_cb_no_ext_queue()
5566 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5567 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5568 asic_prop->pmmu.start_addr, in gaudi_parse_cb_no_ext_queue()
5569 asic_prop->pmmu.end_addr)) in gaudi_parse_cb_no_ext_queue()
5572 dev_err(hdev->dev, in gaudi_parse_cb_no_ext_queue()
5574 parser->user_cb, parser->user_cb_size); in gaudi_parse_cb_no_ext_queue()
5576 return -EFAULT; in gaudi_parse_cb_no_ext_queue()
5581 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_cs_parser()
5583 if (parser->queue_type == QUEUE_TYPE_INT) in gaudi_cs_parser()
5586 if (gaudi->hw_cap_initialized & HW_CAP_MMU) in gaudi_cs_parser()
5596 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_add_end_of_cb_packets()
5603 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); in gaudi_add_end_of_cb_packets()
5606 cq_padding->ctl = cpu_to_le32(FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_NOP)); in gaudi_add_end_of_cb_packets()
5616 cq_pkt->ctl = cpu_to_le32(tmp); in gaudi_add_end_of_cb_packets()
5617 cq_pkt->value = cpu_to_le32(cq_val); in gaudi_add_end_of_cb_packets()
5618 cq_pkt->addr = cpu_to_le64(cq_addr); in gaudi_add_end_of_cb_packets()
5624 cq_pkt->ctl = cpu_to_le32(tmp); in gaudi_add_end_of_cb_packets()
5625 cq_pkt->value = cpu_to_le32(1); in gaudi_add_end_of_cb_packets()
5627 if (gaudi->multi_msi_mode) in gaudi_add_end_of_cb_packets()
5632 cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr); in gaudi_add_end_of_cb_packets()
5651 return -EFAULT; in gaudi_memset_device_memory()
5653 lin_dma_pkt = cb->kernel_address; in gaudi_memset_device_memory()
5663 lin_dma_pkt->ctl = cpu_to_le32(ctl); in gaudi_memset_device_memory()
5664 lin_dma_pkt->src_addr = cpu_to_le64(val); in gaudi_memset_device_memory()
5665 lin_dma_pkt->dst_addr |= cpu_to_le64(addr); in gaudi_memset_device_memory()
5666 lin_dma_pkt->tsize = cpu_to_le32(size); in gaudi_memset_device_memory()
5670 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_memset_device_memory()
5671 rc = -ENOMEM; in gaudi_memset_device_memory()
5677 if (err_cause && !hdev->init_done) { in gaudi_memset_device_memory()
5678 dev_dbg(hdev->dev, in gaudi_memset_device_memory()
5684 job->id = 0; in gaudi_memset_device_memory()
5685 job->user_cb = cb; in gaudi_memset_device_memory()
5686 atomic_inc(&job->user_cb->cs_cnt); in gaudi_memset_device_memory()
5687 job->user_cb_size = cb_size; in gaudi_memset_device_memory()
5688 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in gaudi_memset_device_memory()
5689 job->patched_cb = job->user_cb; in gaudi_memset_device_memory()
5690 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); in gaudi_memset_device_memory()
5697 atomic_dec(&cb->cs_cnt); in gaudi_memset_device_memory()
5702 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); in gaudi_memset_device_memory()
5703 rc = -EIO; in gaudi_memset_device_memory()
5704 if (!hdev->init_done) { in gaudi_memset_device_memory()
5705 dev_dbg(hdev->dev, in gaudi_memset_device_memory()
5714 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_memset_device_memory()
5731 dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M); in gaudi_memset_registers()
5732 return -ENOMEM; in gaudi_memset_registers()
5737 return -EFAULT; in gaudi_memset_registers()
5739 pkt = cb->kernel_address; in gaudi_memset_registers()
5748 pkt->ctl = cpu_to_le32(ctl); in gaudi_memset_registers()
5749 pkt->value = cpu_to_le32(val); in gaudi_memset_registers()
5750 pkt->addr = cpu_to_le64(reg_base + (i * 4)); in gaudi_memset_registers()
5755 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_memset_registers()
5756 rc = -ENOMEM; in gaudi_memset_registers()
5760 job->id = 0; in gaudi_memset_registers()
5761 job->user_cb = cb; in gaudi_memset_registers()
5762 atomic_inc(&job->user_cb->cs_cnt); in gaudi_memset_registers()
5763 job->user_cb_size = cb_size; in gaudi_memset_registers()
5764 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in gaudi_memset_registers()
5765 job->patched_cb = job->user_cb; in gaudi_memset_registers()
5766 job->job_cb_size = cb_size; in gaudi_memset_registers()
5773 atomic_dec(&cb->cs_cnt); in gaudi_memset_registers()
5777 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_memset_registers()
5792 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5793 return -ENOMEM; in gaudi_restore_sm_registers()
5800 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5801 return -ENOMEM; in gaudi_restore_sm_registers()
5808 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5809 return -ENOMEM; in gaudi_restore_sm_registers()
5816 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5817 return -ENOMEM; in gaudi_restore_sm_registers()
5824 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5825 return -ENOMEM; in gaudi_restore_sm_registers()
5832 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5833 return -ENOMEM; in gaudi_restore_sm_registers()
5838 num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT; in gaudi_restore_sm_registers()
5841 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5842 return -ENOMEM; in gaudi_restore_sm_registers()
5847 num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR; in gaudi_restore_sm_registers()
5850 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5851 return -ENOMEM; in gaudi_restore_sm_registers()
5859 u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 - in gaudi_restore_dma_registers()
5875 /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be in gaudi_restore_dma_registers()
5895 qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE); in gaudi_restore_qm_registers()
5932 u32 size = hdev->asic_prop.mmu_pgt_size + in gaudi_mmu_clear_pgt_range()
5933 hdev->asic_prop.mmu_cache_mng_size; in gaudi_mmu_clear_pgt_range()
5934 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_clear_pgt_range()
5935 u64 addr = hdev->asic_prop.mmu_pgt_addr; in gaudi_mmu_clear_pgt_range()
5937 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_mmu_clear_pgt_range()
5974 dev_err(hdev->dev, in gaudi_dma_core_transfer()
5975 "DMA %d timed-out during reading of 0x%llx\n", in gaudi_dma_core_transfer()
5977 return -EIO; in gaudi_dma_core_transfer()
5983 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); in gaudi_dma_core_transfer()
5984 dev_dbg(hdev->dev, in gaudi_dma_core_transfer()
5989 return -EIO; in gaudi_dma_core_transfer()
6009 return -ENOMEM; in gaudi_debugfs_read_dma()
6011 hdev->asic_funcs->hw_queues_lock(hdev); in gaudi_debugfs_read_dma()
6033 dev_err_ratelimited(hdev->dev, in gaudi_debugfs_read_dma()
6035 rc = -EAGAIN; in gaudi_debugfs_read_dma()
6053 dev_dbg(hdev->dev, in gaudi_debugfs_read_dma()
6080 size_left -= SZ_2M; in gaudi_debugfs_read_dma()
6093 hdev->asic_funcs->hw_queues_unlock(hdev); in gaudi_debugfs_read_dma()
6102 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_read_pte()
6104 if (hdev->reset_info.hard_reset_pending) in gaudi_read_pte()
6107 return readq(hdev->pcie_bar[HBM_BAR_ID] + in gaudi_read_pte()
6108 (addr - gaudi->hbm_bar_cur_addr)); in gaudi_read_pte()
6113 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_write_pte()
6115 if (hdev->reset_info.hard_reset_pending) in gaudi_write_pte()
6118 writeq(val, hdev->pcie_bar[HBM_BAR_ID] + in gaudi_write_pte()
6119 (addr - gaudi->hbm_bar_cur_addr)); in gaudi_write_pte()
6131 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_prepare()
6133 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_mmu_prepare()
6137 dev_crit(hdev->dev, "asid %u is too big\n", asid); in gaudi_mmu_prepare()
6286 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) { in gaudi_mmu_prepare()
6299 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) { in gaudi_mmu_prepare()
6312 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) { in gaudi_mmu_prepare()
6325 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) { in gaudi_mmu_prepare()
6338 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) { in gaudi_mmu_prepare()
6351 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) { in gaudi_mmu_prepare()
6364 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) { in gaudi_mmu_prepare()
6377 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) { in gaudi_mmu_prepare()
6390 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) { in gaudi_mmu_prepare()
6403 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) { in gaudi_mmu_prepare()
6430 if (hdev->pldm) in gaudi_send_job_on_qman0()
6435 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { in gaudi_send_job_on_qman0()
6436 dev_err_ratelimited(hdev->dev, in gaudi_send_job_on_qman0()
6438 return -EBUSY; in gaudi_send_job_on_qman0()
6443 dev_err(hdev->dev, in gaudi_send_job_on_qman0()
6445 return -ENOMEM; in gaudi_send_job_on_qman0()
6448 cb = job->patched_cb; in gaudi_send_job_on_qman0()
6450 fence_pkt = cb->kernel_address + in gaudi_send_job_on_qman0()
6451 job->job_cb_size - sizeof(struct packet_msg_prot); in gaudi_send_job_on_qman0()
6457 fence_pkt->ctl = cpu_to_le32(tmp); in gaudi_send_job_on_qman0()
6458 fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL); in gaudi_send_job_on_qman0()
6459 fence_pkt->addr = cpu_to_le64(fence_dma_addr); in gaudi_send_job_on_qman0()
6467 job->job_cb_size, cb->bus_address); in gaudi_send_job_on_qman0()
6469 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); in gaudi_send_job_on_qman0()
6479 if (rc == -ETIMEDOUT) { in gaudi_send_job_on_qman0()
6480 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp); in gaudi_send_job_on_qman0()
6711 dev_err(hdev->dev, in gaudi_get_razwi_initiator_name()
6727 dev_err_ratelimited(hdev->dev, in gaudi_print_and_get_razwi_info()
6734 dev_err_ratelimited(hdev->dev, in gaudi_print_and_get_razwi_info()
6743 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_print_and_get_mmu_error_info()
6746 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_print_and_get_mmu_error_info()
6755 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr); in gaudi_print_and_get_mmu_error_info()
6767 dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr); in gaudi_print_and_get_mmu_error_info()
6775 * +-------------------+------------------------------------------------------+
6778 * +-------------------+------------------------------------------------------+
6779 * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
6784 * +-------------------+------------------------------------------------------+
6785 * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
6790 * +-------------------+------------------------------------------------------+
6799 num_mem_regs = params->num_memories / 32 + in gaudi_extract_ecc_info()
6800 ((params->num_memories % 32) ? 1 : 0); in gaudi_extract_ecc_info()
6802 if (params->block_address >= CFG_BASE) in gaudi_extract_ecc_info()
6803 params->block_address -= CFG_BASE; in gaudi_extract_ecc_info()
6805 if (params->derr) in gaudi_extract_ecc_info()
6806 err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET; in gaudi_extract_ecc_info()
6808 err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET; in gaudi_extract_ecc_info()
6825 dev_err(hdev->dev, "ECC error information cannot be found\n"); in gaudi_extract_ecc_info()
6826 return -EINVAL; in gaudi_extract_ecc_info()
6829 WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET, in gaudi_extract_ecc_info()
6833 RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET); in gaudi_extract_ecc_info()
6835 RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET); in gaudi_extract_ecc_info()
6838 reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET); in gaudi_extract_ecc_info()
6839 if (params->derr) in gaudi_extract_ecc_info()
6844 WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg); in gaudi_extract_ecc_info()
6850 * gaudi_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
6859 u32 mask = q_len - 1; in gaudi_queue_idx_dec()
6862 * modular decrement is equivalent to adding (queue_size -1) in gaudi_queue_idx_dec()
6864 * range [0, queue_len - 1] in gaudi_queue_idx_dec()
6866 return (idx + q_len - 1) & mask; in gaudi_queue_idx_dec()
6870 * gaudi_handle_sw_config_stream_data - print SW config stream data
6883 cq_ptr_lo_off = mmTPC0_QM_CQ_PTR_LO_1 - mmTPC0_QM_CQ_PTR_LO_0; in gaudi_handle_sw_config_stream_data()
6885 cq_ptr_lo = qman_base + (mmTPC0_QM_CQ_PTR_LO_0 - mmTPC0_QM_BASE) + in gaudi_handle_sw_config_stream_data()
6888 (mmTPC0_QM_CQ_PTR_HI_0 - mmTPC0_QM_CQ_PTR_LO_0); in gaudi_handle_sw_config_stream_data()
6890 (mmTPC0_QM_CQ_TSIZE_0 - mmTPC0_QM_CQ_PTR_LO_0); in gaudi_handle_sw_config_stream_data()
6894 dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n", in gaudi_handle_sw_config_stream_data()
6898 hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr; in gaudi_handle_sw_config_stream_data()
6899 hdev->captured_err_info.undef_opcode.cq_size = size; in gaudi_handle_sw_config_stream_data()
6900 hdev->captured_err_info.undef_opcode.stream_id = stream; in gaudi_handle_sw_config_stream_data()
6905 * gaudi_handle_last_pqes_on_err - print last PQEs on error
6924 q = &hdev->kernel_queues[qid_base + stream]; in gaudi_handle_last_pqes_on_err()
6926 qm_ci_stream_off = mmTPC0_QM_PQ_CI_1 - mmTPC0_QM_PQ_CI_0; in gaudi_handle_last_pqes_on_err()
6927 pq_ci = qman_base + (mmTPC0_QM_PQ_CI_0 - mmTPC0_QM_BASE) + in gaudi_handle_last_pqes_on_err()
6930 queue_len = (q->queue_type == QUEUE_TYPE_INT) ? in gaudi_handle_last_pqes_on_err()
6931 q->int_queue_len : HL_QUEUE_LENGTH; in gaudi_handle_last_pqes_on_err()
6933 hdev->asic_funcs->hw_queues_lock(hdev); in gaudi_handle_last_pqes_on_err()
6940 /* we should start printing form ci -1 */ in gaudi_handle_last_pqes_on_err()
6948 bd = q->kernel_address; in gaudi_handle_last_pqes_on_err()
6951 len = le32_to_cpu(bd->len); in gaudi_handle_last_pqes_on_err()
6952 /* len 0 means uninitialized entry- break */ in gaudi_handle_last_pqes_on_err()
6956 addr[i] = le64_to_cpu(bd->ptr); in gaudi_handle_last_pqes_on_err()
6958 dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n", in gaudi_handle_last_pqes_on_err()
6966 struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode; in gaudi_handle_last_pqes_on_err()
6967 u32 arr_idx = undef_opcode->cb_addr_streams_len; in gaudi_handle_last_pqes_on_err()
6970 undef_opcode->timestamp = ktime_get(); in gaudi_handle_last_pqes_on_err()
6971 undef_opcode->engine_id = gaudi_queue_id_to_engine_id[qid_base]; in gaudi_handle_last_pqes_on_err()
6974 memcpy(undef_opcode->cb_addr_streams[arr_idx], addr, sizeof(addr)); in gaudi_handle_last_pqes_on_err()
6975 undef_opcode->cb_addr_streams_len++; in gaudi_handle_last_pqes_on_err()
6978 hdev->asic_funcs->hw_queues_unlock(hdev); in gaudi_handle_last_pqes_on_err()
6982 * handle_qman_data_on_err - extract QMAN data on error
7005 /* handle Lower-CP */ in handle_qman_data_on_err()
7023 glbl_sts_addr = qman_base + (mmTPC0_QM_GLBL_STS1_0 - mmTPC0_QM_BASE); in gaudi_handle_qman_err_generic()
7024 arb_err_addr = qman_base + (mmTPC0_QM_ARB_ERR_CAUSE - mmTPC0_QM_BASE); in gaudi_handle_qman_err_generic()
7041 dev_err_ratelimited(hdev->dev, in gaudi_handle_qman_err_generic()
7050 hdev->captured_err_info.undef_opcode.write_enable) { in gaudi_handle_qman_err_generic()
7051 memset(&hdev->captured_err_info.undef_opcode, 0, in gaudi_handle_qman_err_generic()
7052 sizeof(hdev->captured_err_info.undef_opcode)); in gaudi_handle_qman_err_generic()
7054 hdev->captured_err_info.undef_opcode.write_enable = false; in gaudi_handle_qman_err_generic()
7059 if (!hdev->stop_on_err) in gaudi_handle_qman_err_generic()
7072 dev_err_ratelimited(hdev->dev, in gaudi_handle_qman_err_generic()
7083 u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0; in gaudi_print_sm_sei_info()
7088 switch (sei_data->sei_cause) { in gaudi_print_sm_sei_info()
7090 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7093 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7096 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7097 "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x", in gaudi_print_sm_sei_info()
7099 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7102 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7105 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7108 dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u", in gaudi_print_sm_sei_info()
7109 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7123 if (hdev->asic_prop.fw_security_enabled) { in gaudi_handle_ecc_event()
7134 index = event_type - GAUDI_EVENT_TPC0_SERR; in gaudi_handle_ecc_event()
7141 index = event_type - GAUDI_EVENT_TPC0_DERR; in gaudi_handle_ecc_event()
7152 index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4; in gaudi_handle_ecc_event()
7162 index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4; in gaudi_handle_ecc_event()
7172 index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4; in gaudi_handle_ecc_event()
7183 index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4; in gaudi_handle_ecc_event()
7196 ecc_address = le64_to_cpu(ecc_data->ecc_address); in gaudi_handle_ecc_event()
7197 ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom); in gaudi_handle_ecc_event()
7198 memory_wrapper_idx = ecc_data->memory_wrapper_idx; in gaudi_handle_ecc_event()
7206 dev_err(hdev->dev, in gaudi_handle_ecc_event()
7220 index = event_type - GAUDI_EVENT_TPC0_QM; in gaudi_handle_qman_err()
7237 index = event_type - GAUDI_EVENT_DMA0_QM; in gaudi_handle_qman_err()
7322 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", in gaudi_print_irq_info()
7330 rc = atomic_cmpxchg(&hdev->captured_err_info.razwi.write_enable, 1, 0); in gaudi_print_irq_info()
7332 hdev->captured_err_info.razwi.timestamp = ktime_get(); in gaudi_print_irq_info()
7333 hdev->captured_err_info.razwi.addr = razwi_addr; in gaudi_print_irq_info()
7334 hdev->captured_err_info.razwi.engine_id_1 = engine_id_1; in gaudi_print_irq_info()
7335 hdev->captured_err_info.razwi.engine_id_2 = engine_id_2; in gaudi_print_irq_info()
7340 hdev->captured_err_info.razwi.non_engine_initiator = in gaudi_print_irq_info()
7342 hdev->captured_err_info.razwi.type = razwi_type; in gaudi_print_irq_info()
7351 struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; in gaudi_print_out_of_sync_info()
7353 dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n", in gaudi_print_out_of_sync_info()
7354 sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci)); in gaudi_print_out_of_sync_info()
7360 dev_err(hdev->dev, in gaudi_print_fw_alive_info()
7362 (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? in gaudi_print_fw_alive_info()
7363 "Minor" : "Critical", fw_alive->process_id, in gaudi_print_fw_alive_info()
7364 fw_alive->thread_id, fw_alive->uptime_seconds); in gaudi_print_fw_alive_info()
7372 u16 nic_id = event_type - GAUDI_EVENT_NIC_SEI_0; in gaudi_print_nic_axi_irq_info()
7374 switch (eq_nic_sei->axi_error_cause) { in gaudi_print_nic_axi_irq_info()
7397 dev_err(hdev->dev, "unknown NIC AXI cause %d\n", in gaudi_print_nic_axi_irq_info()
7398 eq_nic_sei->axi_error_cause); in gaudi_print_nic_axi_irq_info()
7404 eq_nic_sei->id); in gaudi_print_nic_axi_irq_info()
7405 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", in gaudi_print_nic_axi_irq_info()
7411 /* GAUDI doesn't support any reset except hard-reset */ in gaudi_compute_reset_late_init()
7412 return -EPERM; in gaudi_compute_reset_late_init()
7421 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & in gaudi_hbm_read_interrupts()
7424 dev_err(hdev->dev, "No FW ECC data"); in gaudi_hbm_read_interrupts()
7429 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7431 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7433 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7435 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7437 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7439 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7441 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7443 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7446 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7448 device, ch, hbm_ecc_data->first_addr, type, in gaudi_hbm_read_interrupts()
7449 hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt, in gaudi_hbm_read_interrupts()
7450 hbm_ecc_data->dec_cnt); in gaudi_hbm_read_interrupts()
7454 if (hdev->asic_prop.fw_security_enabled) { in gaudi_hbm_read_interrupts()
7455 dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n"); in gaudi_hbm_read_interrupts()
7464 rc = -EIO; in gaudi_hbm_read_interrupts()
7465 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7472 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7484 rc = -EIO; in gaudi_hbm_read_interrupts()
7485 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7492 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7513 rc = -EIO; in gaudi_hbm_read_interrupts()
7514 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7521 rc = -EIO; in gaudi_hbm_read_interrupts()
7522 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7564 dev_err_ratelimited(hdev->dev, in gaudi_tpc_read_interrupts()
7568 /* If this is QM error, we need to soft-reset */ in gaudi_tpc_read_interrupts()
7581 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1; in tpc_dec_event_to_tpc_id()
7586 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6; in tpc_krn_event_to_tpc_id()
7593 mutex_lock(&hdev->clk_throttling.lock); in gaudi_print_clk_change_info()
7597 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7598 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7599 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); in gaudi_print_clk_change_info()
7600 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; in gaudi_print_clk_change_info()
7601 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7606 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7607 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); in gaudi_print_clk_change_info()
7608 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7613 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7614 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7615 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); in gaudi_print_clk_change_info()
7616 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; in gaudi_print_clk_change_info()
7617 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7622 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7623 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); in gaudi_print_clk_change_info()
7624 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7629 dev_err(hdev->dev, "Received invalid clock change event %d\n", in gaudi_print_clk_change_info()
7634 mutex_unlock(&hdev->clk_throttling.lock); in gaudi_print_clk_change_info()
7639 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_handle_eqe()
7640 u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0; in gaudi_handle_eqe()
7641 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl); in gaudi_handle_eqe()
7650 dev_err(hdev->dev, "Event type %u exceeds maximum of %u", in gaudi_handle_eqe()
7651 event_type, GAUDI_EVENT_SIZE - 1); in gaudi_handle_eqe()
7655 gaudi->events_stat[event_type]++; in gaudi_handle_eqe()
7656 gaudi->events_stat_aggregate[event_type]++; in gaudi_handle_eqe()
7683 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); in gaudi_handle_eqe()
7704 &eq_entry->hbm_ecc_data); in gaudi_handle_eqe()
7716 &eq_entry->hbm_ecc_data); in gaudi_handle_eqe()
7741 dev_err(hdev->dev, "reset required due to %s\n", in gaudi_handle_eqe()
7766 dev_err(hdev->dev, "reset required due to %s\n", in gaudi_handle_eqe()
7800 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); in gaudi_handle_eqe()
7879 &eq_entry->sm_sei_data); in gaudi_handle_eqe()
7883 dev_err(hdev->dev, in gaudi_handle_eqe()
7898 cause = le64_to_cpu(eq_entry->data[0]) & 0xFF; in gaudi_handle_eqe()
7899 dev_err(hdev->dev, in gaudi_handle_eqe()
7912 gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); in gaudi_handle_eqe()
7918 gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive); in gaudi_handle_eqe()
7923 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n", in gaudi_handle_eqe()
7936 if (hdev->asic_prop.fw_security_enabled && !reset_direct) { in gaudi_handle_eqe()
7942 } else if (hdev->hard_reset_on_fw_events) { in gaudi_handle_eqe()
7961 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_events_stat()
7964 *size = (u32) sizeof(gaudi->events_stat_aggregate); in gaudi_get_events_stat()
7965 return gaudi->events_stat_aggregate; in gaudi_get_events_stat()
7968 *size = (u32) sizeof(gaudi->events_stat); in gaudi_get_events_stat()
7969 return gaudi->events_stat; in gaudi_get_events_stat()
7974 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_invalidate_cache()
7978 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) || in gaudi_mmu_invalidate_cache()
7979 hdev->reset_info.hard_reset_pending) in gaudi_mmu_invalidate_cache()
7982 if (hdev->pldm) in gaudi_mmu_invalidate_cache()
7989 WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); in gaudi_mmu_invalidate_cache()
8012 return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags); in gaudi_mmu_invalidate_cache_range()
8020 if (hdev->pldm) in gaudi_mmu_update_asid_hop0_addr()
8039 dev_err(hdev->dev, in gaudi_mmu_update_asid_hop0_addr()
8049 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_send_heartbeat()
8051 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_send_heartbeat()
8059 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_cpucp_info_get()
8060 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_cpucp_info_get()
8063 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_cpucp_info_get()
8072 if (!strlen(prop->cpucp_info.card_name)) in gaudi_cpucp_info_get()
8073 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, in gaudi_cpucp_info_get()
8076 hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type); in gaudi_cpucp_info_get()
8086 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_is_device_idle()
8087 const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; in gaudi_is_device_idle()
8088 const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; in gaudi_is_device_idle()
8089 const char *nic_fmt = "%-5d%-9s%#-14x%#x\n"; in gaudi_is_device_idle()
8099 "--- ------- ------------ ---------- -------------\n"); in gaudi_is_device_idle()
8123 "--- ------- ------------ ---------- ----------\n"); in gaudi_is_device_idle()
8145 "--- ------- ------------ ---------- -----------\n"); in gaudi_is_device_idle()
8171 is_eng_idle ? "Y" : "N", "-", in gaudi_is_device_idle()
8172 "-", mme_arch_sts); in gaudi_is_device_idle()
8179 "--- ------- ------------ ----------\n"); in gaudi_is_device_idle()
8184 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) { in gaudi_is_device_idle()
8199 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) { in gaudi_is_device_idle()
8221 __acquires(&gaudi->hw_queues_lock) in gaudi_hw_queues_lock()
8223 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_queues_lock()
8225 spin_lock(&gaudi->hw_queues_lock); in gaudi_hw_queues_lock()
8229 __releases(&gaudi->hw_queues_lock) in gaudi_hw_queues_unlock()
8231 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_queues_unlock()
8233 spin_unlock(&gaudi->hw_queues_lock); in gaudi_hw_queues_unlock()
8238 return hdev->pdev->device; in gaudi_get_pci_id()
8244 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_eeprom_data()
8246 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_get_eeprom_data()
8254 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_monitor_dump()
8256 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_get_monitor_dump()
8272 offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS); in gaudi_run_tpc_kernel()
8274 if (hdev->pldm) in gaudi_run_tpc_kernel()
8315 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8318 return -EIO; in gaudi_run_tpc_kernel()
8338 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8341 return -EIO; in gaudi_run_tpc_kernel()
8353 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8356 return -EIO; in gaudi_run_tpc_kernel()
8365 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_internal_cb_pool_init()
8368 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_internal_cb_pool_init()
8371 hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev, in gaudi_internal_cb_pool_init()
8373 &hdev->internal_cb_pool_dma_addr, in gaudi_internal_cb_pool_init()
8376 if (!hdev->internal_cb_pool_virt_addr) in gaudi_internal_cb_pool_init()
8377 return -ENOMEM; in gaudi_internal_cb_pool_init()
8383 hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1); in gaudi_internal_cb_pool_init()
8384 if (!hdev->internal_cb_pool) { in gaudi_internal_cb_pool_init()
8385 dev_err(hdev->dev, in gaudi_internal_cb_pool_init()
8387 rc = -ENOMEM; in gaudi_internal_cb_pool_init()
8391 rc = gen_pool_add(hdev->internal_cb_pool, in gaudi_internal_cb_pool_init()
8392 (uintptr_t) hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_init()
8393 HOST_SPACE_INTERNAL_CB_SZ, -1); in gaudi_internal_cb_pool_init()
8395 dev_err(hdev->dev, in gaudi_internal_cb_pool_init()
8397 rc = -EFAULT; in gaudi_internal_cb_pool_init()
8401 hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, in gaudi_internal_cb_pool_init()
8405 if (!hdev->internal_cb_va_base) { in gaudi_internal_cb_pool_init()
8406 rc = -ENOMEM; in gaudi_internal_cb_pool_init()
8410 mutex_lock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8411 rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8412 hdev->internal_cb_pool_dma_addr, in gaudi_internal_cb_pool_init()
8416 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8424 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8427 gen_pool_destroy(hdev->internal_cb_pool); in gaudi_internal_cb_pool_init()
8429 hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_init()
8430 hdev->internal_cb_pool_dma_addr); in gaudi_internal_cb_pool_init()
8438 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_internal_cb_pool_fini()
8440 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_internal_cb_pool_fini()
8443 mutex_lock(&hdev->mmu_lock); in gaudi_internal_cb_pool_fini()
8444 hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_fini()
8446 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_fini()
8449 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_fini()
8451 gen_pool_destroy(hdev->internal_cb_pool); in gaudi_internal_cb_pool_fini()
8453 hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_fini()
8454 hdev->internal_cb_pool_dma_addr); in gaudi_internal_cb_pool_fini()
8461 if (ctx->asid == HL_KERNEL_ASID_ID) in gaudi_ctx_init()
8464 rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx); in gaudi_ctx_init()
8468 rc = gaudi_restore_user_registers(ctx->hdev); in gaudi_ctx_init()
8470 gaudi_internal_cb_pool_fini(ctx->hdev, ctx); in gaudi_ctx_init()
8477 if (ctx->asid == HL_KERNEL_ASID_ID) in gaudi_ctx_fini()
8480 gaudi_internal_cb_pool_fini(ctx->hdev, ctx); in gaudi_ctx_fini()
8518 pkt = cb->kernel_address + size; in gaudi_gen_signal_cb()
8533 pkt->value = cpu_to_le32(value); in gaudi_gen_signal_cb()
8534 pkt->ctl = cpu_to_le32(ctl); in gaudi_gen_signal_cb()
8553 pkt->value = cpu_to_le32(value); in gaudi_add_mon_msg_short()
8554 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_mon_msg_short()
8569 dev_err(hdev->dev, in gaudi_add_arm_monitor_pkt()
8582 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - in gaudi_add_arm_monitor_pkt()
8602 pkt->value = cpu_to_le32(value); in gaudi_add_arm_monitor_pkt()
8603 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_arm_monitor_pkt()
8623 pkt->cfg = cpu_to_le32(cfg); in gaudi_add_fence_pkt()
8624 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_fence_pkt()
8692 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2; in gaudi_get_fence_addr()
8707 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2; in gaudi_get_fence_addr()
8722 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2; in gaudi_get_fence_addr()
8737 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2; in gaudi_get_fence_addr()
8743 return -EINVAL; in gaudi_get_fence_addr()
8765 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8773 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8784 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8795 struct hl_cb *cb = (struct hl_cb *) prop->data; in gaudi_gen_wait_cb()
8796 void *buf = cb->kernel_address; in gaudi_gen_wait_cb()
8798 u32 size = prop->size; in gaudi_gen_wait_cb()
8800 if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) { in gaudi_gen_wait_cb()
8801 dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", in gaudi_gen_wait_cb()
8802 prop->q_idx); in gaudi_gen_wait_cb()
8806 size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr); in gaudi_gen_wait_cb()
8807 size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, in gaudi_gen_wait_cb()
8808 prop->sob_mask, prop->sob_val, prop->mon_id); in gaudi_gen_wait_cb()
8818 dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, in gaudi_reset_sob()
8819 hw_sob->sob_id); in gaudi_reset_sob()
8822 hw_sob->sob_id * 4, 0); in gaudi_reset_sob()
8824 kref_init(&hw_sob->kref); in gaudi_reset_sob()
8837 return -EPERM; in gaudi_get_hw_block_id()
8844 return -EPERM; in gaudi_block_mmap()
8850 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_enable_events_from_fw()
8851 u32 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_enable_events_from_fw()
8853 le32_to_cpu(dyn_regs->gic_host_ints_irq); in gaudi_enable_events_from_fw()
8861 return -EINVAL; in gaudi_ack_mmu_page_fault_or_access_error()
8877 default: return -EINVAL; in gaudi_map_pll_idx_to_fw_idx()
8893 reg_value -= lower_32_bits(CFG_BASE); in gaudi_add_sync_to_engine_map_entry()
8898 return -ENOMEM; in gaudi_add_sync_to_engine_map_entry()
8899 entry->engine_type = engine_type; in gaudi_add_sync_to_engine_map_entry()
8900 entry->engine_id = engine_id; in gaudi_add_sync_to_engine_map_entry()
8901 entry->sync_id = reg_value; in gaudi_add_sync_to_engine_map_entry()
8902 hash_add(map->tb, &entry->node, reg_value); in gaudi_add_sync_to_engine_map_entry()
8910 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_gen_sync_to_engine_map()
8915 for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8917 reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8918 sds->props[SP_NEXT_TPC] * i); in gaudi_gen_sync_to_engine_map()
8927 for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8928 for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) { in gaudi_gen_sync_to_engine_map()
8930 reg_value = RREG32(sds->props[SP_MME_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8931 sds->props[SP_NEXT_MME] * i + in gaudi_gen_sync_to_engine_map()
8936 i * sds->props[SP_SUB_MME_ENG_NUM] + j); in gaudi_gen_sync_to_engine_map()
8943 for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8944 reg_value = RREG32(sds->props[SP_DMA_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8945 sds->props[SP_DMA_QUEUES_OFFSET] * i); in gaudi_gen_sync_to_engine_map()
8964 mon->status); in gaudi_monitor_valid()
8977 mon->arm_data); in gaudi_fill_sobs_from_mon()
8979 mon->arm_data); in gaudi_fill_sobs_from_mon()
8981 for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE - in gaudi_fill_sobs_from_mon()
9013 mon->id, name, in gaudi_print_single_monitor()
9015 mon->arm_data), in gaudi_print_single_monitor()
9020 mon->arm_data)), in gaudi_print_single_monitor()
9022 mon->arm_data), in gaudi_print_single_monitor()
9023 mon->wr_data, in gaudi_print_single_monitor()
9024 (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low, in gaudi_print_single_monitor()
9029 mon->status)), in gaudi_print_single_monitor()
9039 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_print_fences_single_engine()
9040 int rc = -ENOMEM, i; in gaudi_print_fences_single_engine()
9043 statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES], in gaudi_print_fences_single_engine()
9048 fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] * in gaudi_print_fences_single_engine()
9049 sds->props[SP_ENGINE_NUM_OF_QUEUES], in gaudi_print_fences_single_engine()
9054 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i) in gaudi_print_fences_single_engine()
9057 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] * in gaudi_print_fences_single_engine()
9058 sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) in gaudi_print_fences_single_engine()
9062 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) { in gaudi_print_fences_single_engine()
9075 (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]); in gaudi_print_fences_single_engine()
9076 fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] + in gaudi_print_fences_single_engine()
9077 sds->props[SP_FENCE0_RDATA_OFFSET]; in gaudi_print_fences_single_engine()
9113 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_state_dump_init()
9117 hash_add(sds->so_id_to_str_tb, in gaudi_state_dump_init()
9122 hash_add(sds->monitor_id_to_str_tb, in gaudi_state_dump_init()
9126 sds->props = gaudi_state_dump_specs_props; in gaudi_state_dump_init()
9128 sds->sync_namager_names = gaudi_sync_manager_names; in gaudi_state_dump_init()
9130 sds->funcs = gaudi_state_dump_funcs; in gaudi_state_dump_init()
9147 cpucp_info = &hdev->asic_prop.cpucp_info; in infineon_ver_show()
9149 return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version)); in infineon_ver_show()
9163 dev_vrm_attr_grp->attrs = gaudi_vrm_dev_attrs; in gaudi_add_device_attr()
9267 * gaudi_set_asic_funcs - set GAUDI function pointers
9274 hdev->asic_funcs = &gaudi_funcs; in gaudi_set_asic_funcs()