Lines Matching +full:smmu +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0
87 #define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
172 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)");
180 return -EINVAL; in sec_ctx_q_num_set()
184 return -EINVAL; in sec_ctx_q_num_set()
188 return -EINVAL; in sec_ctx_q_num_set()
209 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
252 if (qm->pdev->is_virtfn) { in sec_get_endian()
253 dev_err_ratelimited(&qm->pdev->dev, in sec_get_endian()
257 reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + in sec_get_endian()
264 /* BD 32-bits big endian mode */ in sec_get_endian()
268 /* BD 64-bits big endian mode */ in sec_get_endian()
289 pci_err(qm->pdev, "fail to init sec mem\n"); in sec_engine_init()
306 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); in sec_engine_init()
330 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); in sec_set_user_domain_and_cache()
331 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); in sec_set_user_domain_and_cache()
332 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); in sec_set_user_domain_and_cache()
333 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); in sec_set_user_domain_and_cache()
334 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); in sec_set_user_domain_and_cache()
337 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); in sec_set_user_domain_and_cache()
338 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); in sec_set_user_domain_and_cache()
341 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); in sec_set_user_domain_and_cache()
342 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); in sec_set_user_domain_and_cache()
347 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); in sec_set_user_domain_and_cache()
352 /* sec_debug_regs_clear() - clear the sec debug regs */
358 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); in sec_debug_regs_clear()
359 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); in sec_debug_regs_clear()
362 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); in sec_debug_regs_clear()
364 readl(qm->io_base + sec_dfx_regs[i].offset); in sec_debug_regs_clear()
367 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); in sec_debug_regs_clear()
376 if (qm->ver == QM_HW_V1) { in sec_hw_error_enable()
377 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); in sec_hw_error_enable()
378 pci_info(qm->pdev, "V1 not support hw error handle\n"); in sec_hw_error_enable()
385 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); in sec_hw_error_enable()
388 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); in sec_hw_error_enable()
391 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); in sec_hw_error_enable()
392 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); in sec_hw_error_enable()
393 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); in sec_hw_error_enable()
395 /* enable SEC block master OOO when m-bit error occur */ in sec_hw_error_enable()
408 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); in sec_hw_error_disable()
409 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); in sec_hw_error_disable()
410 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); in sec_hw_error_disable()
413 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); in sec_hw_error_disable()
415 /* disable SEC block master OOO when m-bit error occur */ in sec_hw_error_disable()
423 struct hisi_qm *qm = file->qm; in sec_current_qm_read()
425 return readl(qm->io_base + QM_DFX_MB_CNT_VF); in sec_current_qm_read()
430 struct hisi_qm *qm = file->qm; in sec_current_qm_write()
434 if (val > qm->vfs_num) in sec_current_qm_write()
435 return -EINVAL; in sec_current_qm_write()
439 qm->debug.curr_qm_qp_num = qm->qp_num; in sec_current_qm_write()
441 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; in sec_current_qm_write()
443 if (val == qm->vfs_num) in sec_current_qm_write()
444 qm->debug.curr_qm_qp_num = in sec_current_qm_write()
445 qm->ctrl_qp_num - qm->qp_num - in sec_current_qm_write()
446 (qm->vfs_num - 1) * vfq_num; in sec_current_qm_write()
448 qm->debug.curr_qm_qp_num = vfq_num; in sec_current_qm_write()
451 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); in sec_current_qm_write()
452 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); in sec_current_qm_write()
455 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); in sec_current_qm_write()
456 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in sec_current_qm_write()
459 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); in sec_current_qm_write()
460 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in sec_current_qm_write()
467 struct hisi_qm *qm = file->qm; in sec_clear_enable_read()
469 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & in sec_clear_enable_read()
475 struct hisi_qm *qm = file->qm; in sec_clear_enable_write()
479 return -EINVAL; in sec_clear_enable_write()
481 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & in sec_clear_enable_write()
483 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); in sec_clear_enable_write()
491 struct sec_debug_file *file = filp->private_data; in sec_debug_read()
496 spin_lock_irq(&file->lock); in sec_debug_read()
498 switch (file->index) { in sec_debug_read()
506 spin_unlock_irq(&file->lock); in sec_debug_read()
507 return -EINVAL; in sec_debug_read()
510 spin_unlock_irq(&file->lock); in sec_debug_read()
519 struct sec_debug_file *file = filp->private_data; in sec_debug_write()
528 return -ENOSPC; in sec_debug_write()
530 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, in sec_debug_write()
537 return -EFAULT; in sec_debug_write()
539 spin_lock_irq(&file->lock); in sec_debug_write()
541 switch (file->index) { in sec_debug_write()
553 ret = -EINVAL; in sec_debug_write()
557 spin_unlock_irq(&file->lock); in sec_debug_write()
562 spin_unlock_irq(&file->lock); in sec_debug_write()
583 return -EINVAL; in sec_debugfs_atomic64_set()
596 struct device *dev = &qm->pdev->dev; in sec_core_debug_init()
597 struct sec_dfx *dfx = &sec->debug.dfx; in sec_core_debug_init()
602 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); in sec_core_debug_init()
606 return -ENOMEM; in sec_core_debug_init()
608 regset->regs = sec_dfx_regs; in sec_core_debug_init()
609 regset->nregs = ARRAY_SIZE(sec_dfx_regs); in sec_core_debug_init()
610 regset->base = qm->io_base; in sec_core_debug_init()
612 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) in sec_core_debug_init()
630 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { in sec_debug_init()
632 spin_lock_init(&sec->debug.files[i].lock); in sec_debug_init()
633 sec->debug.files[i].index = i; in sec_debug_init()
634 sec->debug.files[i].qm = qm; in sec_debug_init()
637 qm->debug.debug_root, in sec_debug_init()
638 sec->debug.files + i, in sec_debug_init()
648 struct device *dev = &qm->pdev->dev; in sec_debugfs_init()
651 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), in sec_debugfs_init()
653 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; in sec_debugfs_init()
654 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; in sec_debugfs_init()
674 debugfs_remove_recursive(qm->debug.debug_root); in sec_debugfs_exit()
680 struct device *dev = &qm->pdev->dev; in sec_log_hw_error()
683 while (errs->msg) { in sec_log_hw_error()
684 if (errs->int_msk & err_sts) { in sec_log_hw_error()
686 errs->msg, errs->int_msk); in sec_log_hw_error()
688 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { in sec_log_hw_error()
689 err_val = readl(qm->io_base + in sec_log_hw_error()
701 return readl(qm->io_base + SEC_CORE_INT_STATUS); in sec_get_hw_err_status()
706 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); in sec_clear_hw_err_status()
739 struct hisi_qm *qm = &sec->qm; in sec_pf_probe_init()
742 if (qm->ver == QM_HW_V1) in sec_pf_probe_init()
743 qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; in sec_pf_probe_init()
745 qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; in sec_pf_probe_init()
747 qm->err_ini = &sec_err_ini; in sec_pf_probe_init()
763 qm->pdev = pdev; in sec_qm_init()
764 qm->ver = pdev->revision; in sec_qm_init()
765 qm->sqe_size = SEC_SQE_SIZE; in sec_qm_init()
766 qm->dev_name = sec_name; in sec_qm_init()
768 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? in sec_qm_init()
770 if (qm->fun_type == QM_HW_PF) { in sec_qm_init()
771 qm->qp_base = SEC_PF_DEF_Q_BASE; in sec_qm_init()
772 qm->qp_num = pf_q_num; in sec_qm_init()
773 qm->debug.curr_qm_qp_num = pf_q_num; in sec_qm_init()
774 qm->qm_list = &sec_devices; in sec_qm_init()
775 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { in sec_qm_init()
777 * have no way to get qm configure in VM in v1 hardware, in sec_qm_init()
779 * to trigger only one VF in v1 hardware. in sec_qm_init()
782 qm->qp_base = SEC_PF_DEF_Q_NUM; in sec_qm_init()
783 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; in sec_qm_init()
792 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | in sec_qm_init()
794 pci_name(qm->pdev)); in sec_qm_init()
795 if (!qm->wq) { in sec_qm_init()
796 pci_err(qm->pdev, "fail to alloc workqueue\n"); in sec_qm_init()
797 return -ENOMEM; in sec_qm_init()
802 destroy_workqueue(qm->wq); in sec_qm_init()
814 struct hisi_qm *qm = &sec->qm; in sec_probe_init()
817 if (qm->fun_type == QM_HW_PF) { in sec_probe_init()
830 destroy_workqueue(qm->wq); in sec_probe_uninit()
836 struct device *dev = &sec->qm.pdev->dev; in sec_iommu_used_check()
841 sec->iommu_used = false; in sec_iommu_used_check()
843 if (domain->type & __IOMMU_DOMAIN_PAGING) in sec_iommu_used_check()
844 sec->iommu_used = true; in sec_iommu_used_check()
845 dev_info(dev, "SMMU Opened, the iommu type = %u\n", in sec_iommu_used_check()
846 domain->type); in sec_iommu_used_check()
856 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); in sec_probe()
858 return -ENOMEM; in sec_probe()
860 qm = &sec->qm; in sec_probe()
867 sec->ctx_q_num = ctx_q_num; in sec_probe()
892 if (qm->fun_type == QM_HW_PF && vfs_num) { in sec_probe()
922 if (qm->fun_type == QM_HW_PF && qm->vfs_num) in sec_remove()
923 hisi_qm_sriov_disable(pdev, qm->is_frozen); in sec_remove()
929 if (qm->fun_type == QM_HW_PF) in sec_remove()