Lines Matching +full:sub +full:- +full:mailboxes

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/dma-mapping.h>
69 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
79 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
81 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
87 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
90 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
282 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
296 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
299 (qc)->head = 0; \
300 (qc)->tail = 0; \
301 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
302 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
303 (qc)->dw3 = 0; \
304 (qc)->w8 = 0; \
305 (qc)->rsvd0 = 0; \
306 (qc)->pasid = cpu_to_le16(pasid); \
307 (qc)->w11 = 0; \
308 (qc)->rsvd1 = 0; \
509 enum qm_state curr = atomic_read(&qm->status.flags); in qm_avail_state()
529 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", in qm_avail_state()
533 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", in qm_avail_state()
542 enum qm_state qm_curr = atomic_read(&qm->status.flags); in qm_qp_avail_state()
547 qp_curr = atomic_read(&qp->qp_status.flags); in qm_qp_avail_state()
575 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", in qm_qp_avail_state()
579 dev_warn(&qm->pdev->dev, in qm_qp_avail_state()
589 mailbox->w0 = cpu_to_le16((cmd) | in qm_mb_pre_init()
592 mailbox->queue_num = cpu_to_le16(queue); in qm_mb_pre_init()
593 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); in qm_mb_pre_init()
594 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); in qm_mb_pre_init()
595 mailbox->rsvd = 0; in qm_mb_pre_init()
598 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
603 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, in qm_wait_mb_ready()
611 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; in qm_mb_write()
633 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); in qm_mb_nolock()
640 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); in qm_mb_nolock()
647 atomic64_inc(&qm->debug.dfx.mb_err_cnt); in qm_mb_nolock()
648 return -EBUSY; in qm_mb_nolock()
657 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", in qm_mb()
662 mutex_lock(&qm->mailbox_lock); in qm_mb()
664 mutex_unlock(&qm->mailbox_lock); in qm_mb()
677 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); in qm_db_v1()
682 void __iomem *io_base = qm->io_base; in qm_db_v2()
687 io_base = qm->db_io_base + (u64)qn * qm->db_interval + in qm_db_v2()
702 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", in qm_db()
705 qm->ops->qm_db(qm, qn, cmd, index, priority); in qm_db()
712 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
713 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, in qm_dev_mem_reset()
725 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num_v2()
733 if (qm->fun_type == QM_HW_PF) in qm_get_irq_num_v3()
741 struct device *dev = &qm->pdev->dev; in qm_pm_get_sync()
744 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) in qm_pm_get_sync()
758 struct device *dev = &qm->pdev->dev; in qm_pm_put_sync()
760 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) in qm_pm_put_sync()
769 u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; in qm_to_hisi_qp()
771 return &qm->qp_array[cqn]; in qm_to_hisi_qp()
776 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { in qm_cq_head_update()
777 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; in qm_cq_head_update()
778 qp->qp_status.cq_head = 0; in qm_cq_head_update()
780 qp->qp_status.cq_head++; in qm_cq_head_update()
786 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) in qm_poll_qp()
789 if (qp->event_cb) { in qm_poll_qp()
790 qp->event_cb(qp); in qm_poll_qp()
794 if (qp->req_cb) { in qm_poll_qp()
795 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_qp()
797 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in qm_poll_qp()
799 qp->req_cb(qp, qp->sqe + qm->sqe_size * in qm_poll_qp()
800 le16_to_cpu(cqe->sq_head)); in qm_poll_qp()
802 cqe = qp->cqe + qp->qp_status.cq_head; in qm_poll_qp()
803 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_qp()
804 qp->qp_status.cq_head, 0); in qm_poll_qp()
805 atomic_dec(&qp->qp_status.used); in qm_poll_qp()
809 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, in qm_poll_qp()
810 qp->qp_status.cq_head, 1); in qm_poll_qp()
817 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_work_process()
821 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { in qm_work_process()
826 if (qm->status.eq_head == QM_EQ_DEPTH - 1) { in qm_work_process()
827 qm->status.eqc_phase = !qm->status.eqc_phase; in qm_work_process()
828 eqe = qm->eqe; in qm_work_process()
829 qm->status.eq_head = 0; in qm_work_process()
832 qm->status.eq_head++; in qm_work_process()
835 if (eqe_num == QM_EQ_DEPTH / 2 - 1) { in qm_work_process()
837 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_work_process()
841 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_work_process()
849 if (qm->wq) in do_qm_irq()
850 queue_work(qm->wq, &qm->work); in do_qm_irq()
852 schedule_work(&qm->work); in do_qm_irq()
861 if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) in qm_irq()
864 atomic64_inc(&qm->debug.dfx.err_irq_cnt); in qm_irq()
865 dev_err(&qm->pdev->dev, "invalid int source\n"); in qm_irq()
866 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_irq()
876 val = readl(qm->io_base + QM_IFC_INT_STATUS); in qm_mb_cmd_irq()
881 schedule_work(&qm->cmd_process); in qm_mb_cmd_irq()
889 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; in qm_aeq_irq()
892 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); in qm_aeq_irq()
893 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) in qm_aeq_irq()
896 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { in qm_aeq_irq()
897 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; in qm_aeq_irq()
899 dev_err(&qm->pdev->dev, "%s overflow\n", in qm_aeq_irq()
902 dev_err(&qm->pdev->dev, "unknown error type %u\n", in qm_aeq_irq()
905 if (qm->status.aeq_head == QM_Q_DEPTH - 1) { in qm_aeq_irq()
906 qm->status.aeqc_phase = !qm->status.aeqc_phase; in qm_aeq_irq()
907 aeqe = qm->aeqe; in qm_aeq_irq()
908 qm->status.aeq_head = 0; in qm_aeq_irq()
911 qm->status.aeq_head++; in qm_aeq_irq()
914 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_irq()
922 struct pci_dev *pdev = qm->pdev; in qm_irq_unregister()
926 if (qm->ver > QM_HW_V1) { in qm_irq_unregister()
929 if (qm->fun_type == QM_HW_PF) in qm_irq_unregister()
934 if (qm->ver > QM_HW_V2) in qm_irq_unregister()
940 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_init_qp_status()
942 qp_status->sq_tail = 0; in qm_init_qp_status()
943 qp_status->cq_head = 0; in qm_init_qp_status()
944 qp_status->cqc_phase = true; in qm_init_qp_status()
945 atomic_set(&qp_status->used, 0); in qm_init_qp_status()
950 struct device *dev = &qm->pdev->dev; in qm_init_prefetch()
953 if (qm->ver < QM_HW_V3) in qm_init_prefetch()
971 writel(page_type, qm->io_base + QM_PAGE_SIZE); in qm_init_prefetch()
979 * IR(Mbps) * 10 ^ -3 = -------------------------
996 if (ir >= typical_qos_val[i - 1] && ir < typical_qos_val[i]) in acc_shaper_calc_cbs_s()
997 return typical_qos_cbs_s[i - 1]; in acc_shaper_calc_cbs_s()
1000 return typical_qos_cbs_s[QM_QOS_TYPICAL_NUM - 1]; in acc_shaper_calc_cbs_s()
1008 factor->cbs_s = acc_shaper_calc_cbs_s(ir); in qm_get_shaper_para()
1015 * IR(Mbps) = ------------------------- in qm_get_shaper_para()
1020 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_para()
1022 factor->cir_b = cir_b; in qm_get_shaper_para()
1023 factor->cir_u = cir_u; in qm_get_shaper_para()
1024 factor->cir_s = cir_s; in qm_get_shaper_para()
1032 return -EINVAL; in qm_get_shaper_para()
1043 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1052 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; in qm_vft_data_cfg()
1056 if (qm->ver == QM_HW_V1) { in qm_vft_data_cfg()
1066 if (qm->ver >= QM_HW_V3) { in qm_vft_data_cfg()
1067 tmp = factor->cir_b | in qm_vft_data_cfg()
1068 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | in qm_vft_data_cfg()
1069 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | in qm_vft_data_cfg()
1071 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); in qm_vft_data_cfg()
1077 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); in qm_vft_data_cfg()
1078 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); in qm_vft_data_cfg()
1084 struct qm_shaper_factor *factor = &qm->factor[fun_num]; in qm_set_vft_common()
1088 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1094 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1095 writel(type, qm->io_base + QM_VFT_CFG_TYPE); in qm_set_vft_common()
1099 writel(fun_num, qm->io_base + QM_VFT_CFG); in qm_set_vft_common()
1103 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1104 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1106 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_set_vft_common()
1115 qm->factor[fun_num].func_qos = QM_QOS_MAX_VAL; in qm_shaper_init_vft()
1116 ret = qm_get_shaper_para(QM_QOS_MAX_VAL * QM_QOS_RATE, &qm->factor[fun_num]); in qm_shaper_init_vft()
1118 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); in qm_shaper_init_vft()
1121 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); in qm_shaper_init_vft()
1145 if (qm->ver >= QM_HW_V3) { in qm_set_sqc_cqc_vft()
1170 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_vft_v2()
1171 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_vft_v2()
1182 u32 num_vfs = qm->vfs_num; in qm_get_vf_qp_num()
1184 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; in qm_get_vf_qp_num()
1185 if (vfq_num >= qm->max_qp_num) in qm_get_vf_qp_num()
1186 return qm->max_qp_num; in qm_get_vf_qp_num()
1188 remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; in qm_get_vf_qp_num()
1189 if (vfq_num + remain_q_num <= qm->max_qp_num) in qm_get_vf_qp_num()
1201 struct qm_debug *debug = file->debug; in file_to_qm()
1208 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; in current_q_read()
1215 if (val >= qm->debug.curr_qm_qp_num) in current_q_write()
1216 return -EINVAL; in current_q_write()
1219 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); in current_q_write()
1220 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in current_q_write()
1223 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); in current_q_write()
1224 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in current_q_write()
1231 return readl(qm->io_base + QM_DFX_CNT_CLR_CE); in clear_enable_read()
1238 return -EINVAL; in clear_enable_write()
1240 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); in clear_enable_write()
1247 return readl(qm->io_base + QM_DFX_MB_CNT_VF); in current_qm_read()
1254 if (val > qm->vfs_num) in current_qm_write()
1255 return -EINVAL; in current_qm_write()
1259 qm->debug.curr_qm_qp_num = qm->qp_num; in current_qm_write()
1261 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); in current_qm_write()
1263 writel(val, qm->io_base + QM_DFX_MB_CNT_VF); in current_qm_write()
1264 writel(val, qm->io_base + QM_DFX_DB_CNT_VF); in current_qm_write()
1267 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); in current_qm_write()
1268 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in current_qm_write()
1271 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); in current_qm_write()
1272 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in current_qm_write()
1280 struct debugfs_file *file = filp->private_data; in qm_debug_read()
1281 enum qm_debug_file index = file->index; in qm_debug_read()
1291 mutex_lock(&file->lock); in qm_debug_read()
1305 mutex_unlock(&file->lock); in qm_debug_read()
1312 mutex_unlock(&file->lock); in qm_debug_read()
1314 return -EINVAL; in qm_debug_read()
1320 struct debugfs_file *file = filp->private_data; in qm_debug_write()
1321 enum qm_debug_file index = file->index; in qm_debug_write()
1331 return -ENOSPC; in qm_debug_write()
1333 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, in qm_debug_write()
1340 return -EFAULT; in qm_debug_write()
1346 mutex_lock(&file->lock); in qm_debug_write()
1358 ret = -EINVAL; in qm_debug_write()
1360 mutex_unlock(&file->lock); in qm_debug_write()
1411 * hisi_qm_regs_dump() - Dump registers's value.
1419 struct pci_dev *pdev = to_pci_dev(regset->dev); in hisi_qm_regs_dump()
1421 const struct debugfs_reg32 *regs = regset->regs; in hisi_qm_regs_dump()
1422 int regs_len = regset->nregs; in hisi_qm_regs_dump()
1431 val = readl(regset->base + regs[i].offset); in hisi_qm_regs_dump()
1441 struct hisi_qm *qm = s->private; in qm_regs_show()
1444 if (qm->fun_type == QM_HW_PF) { in qm_regs_show()
1452 regset.base = qm->io_base; in qm_regs_show()
1453 regset.dev = &qm->pdev->dev; in qm_regs_show()
1477 struct device *dev = &qm->pdev->dev; in qm_ctx_alloc()
1482 return ERR_PTR(-ENOMEM); in qm_ctx_alloc()
1488 return ERR_PTR(-ENOMEM); in qm_ctx_alloc()
1497 struct device *dev = &qm->pdev->dev; in qm_ctx_free()
1506 struct device *dev = &qm->pdev->dev; in dump_show()
1513 return -ENOMEM; in dump_show()
1521 info_buf[i - 1] = *info_curr; in dump_show()
1523 info_buf[i - 3] = *info_curr; in dump_show()
1550 struct device *dev = &qm->pdev->dev; in qm_sqc_dump()
1557 return -EINVAL; in qm_sqc_dump()
1560 if (ret || qp_id >= qm->qp_num) { in qm_sqc_dump()
1561 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); in qm_sqc_dump()
1562 return -EINVAL; in qm_sqc_dump()
1571 down_read(&qm->qps_lock); in qm_sqc_dump()
1572 if (qm->sqc) { in qm_sqc_dump()
1573 sqc_curr = qm->sqc + qp_id; in qm_sqc_dump()
1580 up_read(&qm->qps_lock); in qm_sqc_dump()
1596 struct device *dev = &qm->pdev->dev; in qm_cqc_dump()
1603 return -EINVAL; in qm_cqc_dump()
1606 if (ret || qp_id >= qm->qp_num) { in qm_cqc_dump()
1607 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); in qm_cqc_dump()
1608 return -EINVAL; in qm_cqc_dump()
1617 down_read(&qm->qps_lock); in qm_cqc_dump()
1618 if (qm->cqc) { in qm_cqc_dump()
1619 cqc_curr = qm->cqc + qp_id; in qm_cqc_dump()
1626 up_read(&qm->qps_lock); in qm_cqc_dump()
1643 struct device *dev = &qm->pdev->dev; in qm_eqc_aeqc_dump()
1650 return -EINVAL; in qm_eqc_aeqc_dump()
1673 struct device *dev = &qm->pdev->dev; in q_dump_param_parse()
1674 unsigned int qp_num = qm->qp_num; in q_dump_param_parse()
1681 return -EINVAL; in q_dump_param_parse()
1686 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1); in q_dump_param_parse()
1687 return -EINVAL; in q_dump_param_parse()
1693 return -EINVAL; in q_dump_param_parse()
1698 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1); in q_dump_param_parse()
1699 return -EINVAL; in q_dump_param_parse()
1704 return -EINVAL; in q_dump_param_parse()
1712 struct device *dev = &qm->pdev->dev; in qm_sq_dump()
1722 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL); in qm_sq_dump()
1724 return -ENOMEM; in qm_sq_dump()
1726 qp = &qm->qp_array[qp_id]; in qm_sq_dump()
1727 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH); in qm_sq_dump()
1728 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); in qm_sq_dump()
1729 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, in qm_sq_dump()
1730 qm->debug.sqe_mask_len); in qm_sq_dump()
1732 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE"); in qm_sq_dump()
1743 struct device *dev = &qm->pdev->dev; in qm_cq_dump()
1753 qp = &qm->qp_array[qp_id]; in qm_cq_dump()
1754 cqe_curr = qp->cqe + cqe_id; in qm_cq_dump()
1765 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_dump()
1771 return -EINVAL; in qm_eq_aeq_dump()
1775 return -EINVAL; in qm_eq_aeq_dump()
1778 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1); in qm_eq_aeq_dump()
1779 return -EINVAL; in qm_eq_aeq_dump()
1781 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1); in qm_eq_aeq_dump()
1782 return -EINVAL; in qm_eq_aeq_dump()
1785 down_read(&qm->qps_lock); in qm_eq_aeq_dump()
1787 if (qm->eqe && !strcmp(name, "EQE")) { in qm_eq_aeq_dump()
1788 xeqe = qm->eqe + xeqe_id; in qm_eq_aeq_dump()
1789 } else if (qm->aeqe && !strcmp(name, "AEQE")) { in qm_eq_aeq_dump()
1790 xeqe = qm->aeqe + xeqe_id; in qm_eq_aeq_dump()
1792 ret = -EINVAL; in qm_eq_aeq_dump()
1801 up_read(&qm->qps_lock); in qm_eq_aeq_dump()
1807 struct device *dev = &qm->pdev->dev; in qm_dbg_help()
1811 return -EINVAL; in qm_dbg_help()
1829 struct device *dev = &qm->pdev->dev; in qm_cmd_write_dump()
1835 return -ENOMEM; in qm_cmd_write_dump()
1840 ret = -EINVAL; in qm_cmd_write_dump()
1865 ret = -EINVAL; in qm_cmd_write_dump()
1879 struct hisi_qm *qm = filp->private_data; in qm_cmd_write()
1891 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) in qm_cmd_write()
1895 ret = -ENOSPC; in qm_cmd_write()
1908 count = cmd_buf_tmp - cmd_buf + 1; in qm_cmd_write()
1936 struct debugfs_file *file = qm->debug.files + index; in qm_create_debugfs_file()
1941 file->index = index; in qm_create_debugfs_file()
1942 mutex_init(&file->lock); in qm_create_debugfs_file()
1943 file->debug = &qm->debug; in qm_create_debugfs_file()
1948 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v1()
1953 qm->error_mask = ce | nfe | fe; in qm_hw_error_cfg()
1956 qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_hw_error_cfg()
1959 writel(ce, qm->io_base + QM_RAS_CE_ENABLE); in qm_hw_error_cfg()
1960 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); in qm_hw_error_cfg()
1961 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_cfg()
1962 writel(fe, qm->io_base + QM_RAS_FE_ENABLE); in qm_hw_error_cfg()
1972 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1973 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v2()
1978 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v2()
1989 writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_init_v3()
1991 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1992 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_init_v3()
1997 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); in qm_hw_error_uninit_v3()
2000 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
2006 struct device *dev = &qm->pdev->dev; in qm_log_hw_error()
2012 if (!(err->int_msk & error_status)) in qm_log_hw_error()
2016 err->msg, err->int_msk); in qm_log_hw_error()
2018 if (err->int_msk & QM_DB_TIMEOUT) { in qm_log_hw_error()
2019 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); in qm_log_hw_error()
2025 } else if (err->int_msk & QM_OF_FIFO_OF) { in qm_log_hw_error()
2026 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); in qm_log_hw_error()
2045 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_hw_error_handle_v2()
2046 error_status = qm->error_mask & tmp; in qm_hw_error_handle_v2()
2050 qm->err_status.is_qm_ecc_mbit = true; in qm_hw_error_handle_v2()
2056 writel(error_status, qm->io_base + in qm_hw_error_handle_v2()
2058 writel(qm->err_info.nfe, in qm_hw_error_handle_v2()
2059 qm->io_base + QM_RAS_NFE_ENABLE); in qm_hw_error_handle_v2()
2071 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); in qm_get_hw_error_status()
2076 return qm->err_ini->get_dev_hw_err_status(qm); in qm_get_dev_err_status()
2084 if (qm->fun_type == QM_HW_VF) in qm_check_dev_error()
2090 if (qm->ver < QM_HW_V3) in qm_check_dev_error()
2092 (dev_val & qm->err_info.ecc_2bits_mask); in qm_check_dev_error()
2094 return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) || in qm_check_dev_error()
2095 (dev_val & (~qm->err_info.dev_ce_mask)); in qm_check_dev_error()
2104 mutex_lock(&qm->mailbox_lock); in qm_get_mb_cmd()
2109 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | in qm_get_mb_cmd()
2110 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); in qm_get_mb_cmd()
2113 mutex_unlock(&qm->mailbox_lock); in qm_get_mb_cmd()
2121 if (qm->fun_type == QM_HW_PF) in qm_clear_cmd_interrupt()
2122 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); in qm_clear_cmd_interrupt()
2124 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
2126 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); in qm_clear_cmd_interrupt()
2131 struct device *dev = &qm->pdev->dev; in qm_handle_vf_msg()
2161 struct device *dev = &qm->pdev->dev; in qm_wait_vf_prepare_finish()
2162 u32 vfs_num = qm->vfs_num; in qm_wait_vf_prepare_finish()
2168 if (!qm->vfs_num || qm->ver < QM_HW_V3) in qm_wait_vf_prepare_finish()
2172 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_wait_vf_prepare_finish()
2178 ret = -EBUSY; in qm_wait_vf_prepare_finish()
2203 val = readl(qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
2206 writel(val, qm->io_base + QM_IFC_INT_CFG); in qm_trigger_vf_interrupt()
2208 val = readl(qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
2210 writel(val, qm->io_base + QM_IFC_INT_SET_P); in qm_trigger_vf_interrupt()
2217 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
2219 writel(val, qm->io_base + QM_IFC_INT_SET_V); in qm_trigger_pf_interrupt()
2224 struct device *dev = &qm->pdev->dev; in qm_ping_single_vf()
2231 mutex_lock(&qm->mailbox_lock); in qm_ping_single_vf()
2241 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_single_vf()
2248 ret = -ETIMEDOUT; in qm_ping_single_vf()
2254 mutex_unlock(&qm->mailbox_lock); in qm_ping_single_vf()
2260 struct device *dev = &qm->pdev->dev; in qm_ping_all_vfs()
2261 u32 vfs_num = qm->vfs_num; in qm_ping_all_vfs()
2269 mutex_lock(&qm->mailbox_lock); in qm_ping_all_vfs()
2274 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
2281 val = readq(qm->io_base + QM_IFC_READY_STATUS); in qm_ping_all_vfs()
2284 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
2292 mutex_unlock(&qm->mailbox_lock); in qm_ping_all_vfs()
2300 return -ETIMEDOUT; in qm_ping_all_vfs()
2311 mutex_lock(&qm->mailbox_lock); in qm_ping_pf()
2314 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); in qm_ping_pf()
2322 val = readl(qm->io_base + QM_IFC_INT_SET_V); in qm_ping_pf()
2327 ret = -ETIMEDOUT; in qm_ping_pf()
2333 mutex_unlock(&qm->mailbox_lock); in qm_ping_pf()
2339 return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
2344 struct pci_dev *pdev = qm->pdev; in qm_set_msi()
2347 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
2350 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, in qm_set_msi()
2352 if (qm->err_status.is_qm_ecc_mbit || in qm_set_msi()
2353 qm->err_status.is_dev_ecc_mbit) in qm_set_msi()
2357 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) in qm_set_msi()
2358 return -EFAULT; in qm_set_msi()
2366 struct pci_dev *pdev = qm->pdev; in qm_wait_msi_finish()
2373 pci_read_config_dword(pdev, pdev->msi_cap + in qm_wait_msi_finish()
2386 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, in qm_wait_msi_finish()
2392 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, in qm_wait_msi_finish()
2401 struct pci_dev *pdev = qm->pdev; in qm_set_msi_v3()
2402 int ret = -ETIMEDOUT; in qm_set_msi_v3()
2405 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
2411 pci_write_config_dword(pdev, pdev->msi_cap, cmd); in qm_set_msi_v3()
2414 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); in qm_set_msi_v3()
2461 struct hisi_qp_status *qp_status = &qp->qp_status; in qm_get_avail_sqe()
2462 u16 sq_tail = qp_status->sq_tail; in qm_get_avail_sqe()
2464 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1)) in qm_get_avail_sqe()
2467 return qp->sqe + sq_tail * qp->qm->sqe_size; in qm_get_avail_sqe()
2472 struct device *dev = &qm->pdev->dev; in qm_create_qp_nolock()
2477 return ERR_PTR(-EPERM); in qm_create_qp_nolock()
2479 if (qm->qp_in_used == qm->qp_num) { in qm_create_qp_nolock()
2481 qm->qp_num); in qm_create_qp_nolock()
2482 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
2483 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
2486 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
2489 qm->qp_num); in qm_create_qp_nolock()
2490 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); in qm_create_qp_nolock()
2491 return ERR_PTR(-EBUSY); in qm_create_qp_nolock()
2494 qp = &qm->qp_array[qp_id]; in qm_create_qp_nolock()
2496 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH); in qm_create_qp_nolock()
2498 qp->event_cb = NULL; in qm_create_qp_nolock()
2499 qp->req_cb = NULL; in qm_create_qp_nolock()
2500 qp->qp_id = qp_id; in qm_create_qp_nolock()
2501 qp->alg_type = alg_type; in qm_create_qp_nolock()
2502 qp->is_in_kernel = true; in qm_create_qp_nolock()
2503 qm->qp_in_used++; in qm_create_qp_nolock()
2504 atomic_set(&qp->qp_status.flags, QP_INIT); in qm_create_qp_nolock()
2510 * hisi_qm_create_qp() - Create a queue pair from qm.
2514 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
2526 down_write(&qm->qps_lock); in hisi_qm_create_qp()
2528 up_write(&qm->qps_lock); in hisi_qm_create_qp()
2538 * hisi_qm_release_qp() - Release a qp back to its qm.
2545 struct hisi_qm *qm = qp->qm; in hisi_qm_release_qp()
2547 down_write(&qm->qps_lock); in hisi_qm_release_qp()
2550 up_write(&qm->qps_lock); in hisi_qm_release_qp()
2554 qm->qp_in_used--; in hisi_qm_release_qp()
2555 idr_remove(&qm->qp_idr, qp->qp_id); in hisi_qm_release_qp()
2557 up_write(&qm->qps_lock); in hisi_qm_release_qp()
2565 struct hisi_qm *qm = qp->qm; in qm_sq_ctx_cfg()
2566 struct device *dev = &qm->pdev->dev; in qm_sq_ctx_cfg()
2567 enum qm_hw_ver ver = qm->ver; in qm_sq_ctx_cfg()
2574 return -ENOMEM; in qm_sq_ctx_cfg()
2576 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); in qm_sq_ctx_cfg()
2578 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
2579 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); in qm_sq_ctx_cfg()
2581 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size)); in qm_sq_ctx_cfg()
2582 sqc->w8 = 0; /* rand_qc */ in qm_sq_ctx_cfg()
2584 sqc->cq_num = cpu_to_le16(qp_id); in qm_sq_ctx_cfg()
2585 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); in qm_sq_ctx_cfg()
2587 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_sq_ctx_cfg()
2588 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << in qm_sq_ctx_cfg()
2595 return -ENOMEM; in qm_sq_ctx_cfg()
2607 struct hisi_qm *qm = qp->qm; in qm_cq_ctx_cfg()
2608 struct device *dev = &qm->pdev->dev; in qm_cq_ctx_cfg()
2609 enum qm_hw_ver ver = qm->ver; in qm_cq_ctx_cfg()
2616 return -ENOMEM; in qm_cq_ctx_cfg()
2618 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); in qm_cq_ctx_cfg()
2620 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, in qm_cq_ctx_cfg()
2622 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1); in qm_cq_ctx_cfg()
2624 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE)); in qm_cq_ctx_cfg()
2625 cqc->w8 = 0; /* rand_qc */ in qm_cq_ctx_cfg()
2627 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); in qm_cq_ctx_cfg()
2629 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) in qm_cq_ctx_cfg()
2630 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); in qm_cq_ctx_cfg()
2636 return -ENOMEM; in qm_cq_ctx_cfg()
2661 struct hisi_qm *qm = qp->qm; in qm_start_qp_nolock()
2662 struct device *dev = &qm->pdev->dev; in qm_start_qp_nolock()
2663 int qp_id = qp->qp_id; in qm_start_qp_nolock()
2668 return -EPERM; in qm_start_qp_nolock()
2674 atomic_set(&qp->qp_status.flags, QP_START); in qm_start_qp_nolock()
2681 * hisi_qm_start_qp() - Start a qp into running.
2686 * successful, Return -EBUSY if failed.
2690 struct hisi_qm *qm = qp->qm; in hisi_qm_start_qp()
2693 down_write(&qm->qps_lock); in hisi_qm_start_qp()
2695 up_write(&qm->qps_lock); in hisi_qm_start_qp()
2702 * qp_stop_fail_cb() - call request cb.
2709 int qp_used = atomic_read(&qp->qp_status.used); in qp_stop_fail_cb()
2710 u16 cur_tail = qp->qp_status.sq_tail; in qp_stop_fail_cb()
2711 u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH; in qp_stop_fail_cb()
2712 struct hisi_qm *qm = qp->qm; in qp_stop_fail_cb()
2718 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); in qp_stop_fail_cb()
2719 atomic_dec(&qp->qp_status.used); in qp_stop_fail_cb()
2724 * qm_drain_qp() - Drain a qp.
2733 struct hisi_qm *qm = qp->qm; in qm_drain_qp()
2734 struct device *dev = &qm->pdev->dev; in qm_drain_qp()
2746 if (qm->ops->stop_qp) { in qm_drain_qp()
2747 ret = qm->ops->stop_qp(qp); in qm_drain_qp()
2749 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); in qm_drain_qp()
2756 return -ENOMEM; in qm_drain_qp()
2760 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); in qm_drain_qp()
2768 qp->qp_id); in qm_drain_qp()
2775 if ((sqc->tail == cqc->tail) && in qm_drain_qp()
2780 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); in qm_drain_qp()
2781 ret = -EBUSY; in qm_drain_qp()
2795 struct device *dev = &qp->qm->pdev->dev; in qm_stop_qp_nolock()
2804 if (atomic_read(&qp->qp_status.flags) == QP_STOP) { in qm_stop_qp_nolock()
2805 qp->is_resetting = false; in qm_stop_qp_nolock()
2809 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) in qm_stop_qp_nolock()
2810 return -EPERM; in qm_stop_qp_nolock()
2812 atomic_set(&qp->qp_status.flags, QP_STOP); in qm_stop_qp_nolock()
2818 if (qp->qm->wq) in qm_stop_qp_nolock()
2819 flush_workqueue(qp->qm->wq); in qm_stop_qp_nolock()
2821 flush_work(&qp->qm->work); in qm_stop_qp_nolock()
2823 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) in qm_stop_qp_nolock()
2826 dev_dbg(dev, "stop queue %u!", qp->qp_id); in qm_stop_qp_nolock()
2832 * hisi_qm_stop_qp() - Stop a qp in qm.
2841 down_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2843 up_write(&qp->qm->qps_lock); in hisi_qm_stop_qp()
2850 * hisi_qp_send() - Queue up a task in the hardware queue.
2854 * This function will return -EBUSY if qp is currently full, and -EAGAIN
2866 struct hisi_qp_status *qp_status = &qp->qp_status; in hisi_qp_send()
2867 u16 sq_tail = qp_status->sq_tail; in hisi_qp_send()
2871 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || in hisi_qp_send()
2872 atomic_read(&qp->qm->status.flags) == QM_STOP || in hisi_qp_send()
2873 qp->is_resetting)) { in hisi_qp_send()
2874 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); in hisi_qp_send()
2875 return -EAGAIN; in hisi_qp_send()
2879 return -EBUSY; in hisi_qp_send()
2881 memcpy(sqe, msg, qp->qm->sqe_size); in hisi_qp_send()
2883 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2884 atomic_inc(&qp->qp_status.used); in hisi_qp_send()
2885 qp_status->sq_tail = sq_tail_next; in hisi_qp_send()
2895 if (qm->ver == QM_HW_V1) in hisi_qm_cache_wb()
2898 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2899 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, in hisi_qm_cache_wb()
2902 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); in hisi_qm_cache_wb()
2907 wake_up_interruptible(&qp->uacce_q->wait); in qm_qp_event_notifier()
2912 return hisi_qm_get_free_qp_num(uacce->priv); in hisi_qm_get_available_instances()
2919 struct hisi_qm *qm = uacce->priv; in hisi_qm_uacce_get_queue()
2927 q->priv = qp; in hisi_qm_uacce_get_queue()
2928 q->uacce = uacce; in hisi_qm_uacce_get_queue()
2929 qp->uacce_q = q; in hisi_qm_uacce_get_queue()
2930 qp->event_cb = qm_qp_event_notifier; in hisi_qm_uacce_get_queue()
2931 qp->pasid = arg; in hisi_qm_uacce_get_queue()
2932 qp->is_in_kernel = false; in hisi_qm_uacce_get_queue()
2939 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_put_queue()
2941 hisi_qm_cache_wb(qp->qm); in hisi_qm_uacce_put_queue()
2950 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_mmap()
2951 struct hisi_qm *qm = qp->qm; in hisi_qm_uacce_mmap()
2952 resource_size_t phys_base = qm->db_phys_base + in hisi_qm_uacce_mmap()
2953 qp->qp_id * qm->db_interval; in hisi_qm_uacce_mmap()
2954 size_t sz = vma->vm_end - vma->vm_start; in hisi_qm_uacce_mmap()
2955 struct pci_dev *pdev = qm->pdev; in hisi_qm_uacce_mmap()
2956 struct device *dev = &pdev->dev; in hisi_qm_uacce_mmap()
2960 switch (qfr->type) { in hisi_qm_uacce_mmap()
2962 if (qm->ver == QM_HW_V1) { in hisi_qm_uacce_mmap()
2964 return -EINVAL; in hisi_qm_uacce_mmap()
2965 } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { in hisi_qm_uacce_mmap()
2968 return -EINVAL; in hisi_qm_uacce_mmap()
2970 if (sz > qm->db_interval) in hisi_qm_uacce_mmap()
2971 return -EINVAL; in hisi_qm_uacce_mmap()
2974 vma->vm_flags |= VM_IO; in hisi_qm_uacce_mmap()
2976 return remap_pfn_range(vma, vma->vm_start, in hisi_qm_uacce_mmap()
2978 sz, pgprot_noncached(vma->vm_page_prot)); in hisi_qm_uacce_mmap()
2980 if (sz != qp->qdma.size) in hisi_qm_uacce_mmap()
2981 return -EINVAL; in hisi_qm_uacce_mmap()
2987 vm_pgoff = vma->vm_pgoff; in hisi_qm_uacce_mmap()
2988 vma->vm_pgoff = 0; in hisi_qm_uacce_mmap()
2989 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, in hisi_qm_uacce_mmap()
2990 qp->qdma.dma, sz); in hisi_qm_uacce_mmap()
2991 vma->vm_pgoff = vm_pgoff; in hisi_qm_uacce_mmap()
2995 return -EINVAL; in hisi_qm_uacce_mmap()
3001 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_start_queue()
3003 return hisi_qm_start_qp(qp, qp->pasid); in hisi_qm_uacce_start_queue()
3008 hisi_qm_stop_qp(q->priv); in hisi_qm_uacce_stop_queue()
3013 struct hisi_qp *qp = q->priv; in hisi_qm_is_q_updated()
3014 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
3017 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { in hisi_qm_is_q_updated()
3021 cqe = qp->cqe + qp->qp_status.cq_head; in hisi_qm_is_q_updated()
3030 struct hisi_qm *qm = q->uacce->priv; in qm_set_sqctype()
3031 struct hisi_qp *qp = q->priv; in qm_set_sqctype()
3033 down_write(&qm->qps_lock); in qm_set_sqctype()
3034 qp->alg_type = type; in qm_set_sqctype()
3035 up_write(&qm->qps_lock); in qm_set_sqctype()
3041 struct hisi_qp *qp = q->priv; in hisi_qm_uacce_ioctl()
3047 return -EFAULT; in hisi_qm_uacce_ioctl()
3050 return -EINVAL; in hisi_qm_uacce_ioctl()
3053 qp_ctx.id = qp->qp_id; in hisi_qm_uacce_ioctl()
3057 return -EFAULT; in hisi_qm_uacce_ioctl()
3059 return -EINVAL; in hisi_qm_uacce_ioctl()
3078 struct pci_dev *pdev = qm->pdev; in qm_alloc_uacce()
3088 ret = strscpy(interface.name, pdev->driver->name, in qm_alloc_uacce()
3091 return -ENAMETOOLONG; in qm_alloc_uacce()
3093 uacce = uacce_alloc(&pdev->dev, &interface); in qm_alloc_uacce()
3097 if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) { in qm_alloc_uacce()
3098 qm->use_sva = true; in qm_alloc_uacce()
3102 qm->uacce = NULL; in qm_alloc_uacce()
3103 return -EINVAL; in qm_alloc_uacce()
3106 uacce->is_vf = pdev->is_virtfn; in qm_alloc_uacce()
3107 uacce->priv = qm; in qm_alloc_uacce()
3108 uacce->algs = qm->algs; in qm_alloc_uacce()
3110 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
3111 uacce->api_ver = HISI_QM_API_VER_BASE; in qm_alloc_uacce()
3112 else if (qm->ver == QM_HW_V2) in qm_alloc_uacce()
3113 uacce->api_ver = HISI_QM_API_VER2_BASE; in qm_alloc_uacce()
3115 uacce->api_ver = HISI_QM_API_VER3_BASE; in qm_alloc_uacce()
3117 if (qm->ver == QM_HW_V1) in qm_alloc_uacce()
3119 else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) in qm_alloc_uacce()
3123 mmio_page_nr = qm->db_interval / PAGE_SIZE; in qm_alloc_uacce()
3125 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + in qm_alloc_uacce()
3128 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; in qm_alloc_uacce()
3129 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; in qm_alloc_uacce()
3131 qm->uacce = uacce; in qm_alloc_uacce()
3137 * qm_frozen() - Try to froze QM to cut continuous queue request. If
3145 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) in qm_frozen()
3148 down_write(&qm->qps_lock); in qm_frozen()
3150 if (!qm->qp_in_used) { in qm_frozen()
3151 qm->qp_in_used = qm->qp_num; in qm_frozen()
3152 up_write(&qm->qps_lock); in qm_frozen()
3153 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); in qm_frozen()
3157 up_write(&qm->qps_lock); in qm_frozen()
3159 return -EBUSY; in qm_frozen()
3170 return -EINVAL; in qm_try_frozen_vfs()
3173 mutex_lock(&qm_list->lock); in qm_try_frozen_vfs()
3174 list_for_each_entry(qm, &qm_list->list, list) { in qm_try_frozen_vfs()
3175 dev = qm->pdev; in qm_try_frozen_vfs()
3187 mutex_unlock(&qm_list->lock); in qm_try_frozen_vfs()
3193 * hisi_qm_wait_task_finish() - Wait until the task is finished
3201 ((qm->fun_type == QM_HW_PF) && in hisi_qm_wait_task_finish()
3202 qm_try_frozen_vfs(qm->pdev, qm_list))) { in hisi_qm_wait_task_finish()
3206 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || in hisi_qm_wait_task_finish()
3207 test_bit(QM_RESETTING, &qm->misc_ctl)) in hisi_qm_wait_task_finish()
3215 * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
3224 down_read(&qm->qps_lock); in hisi_qm_get_free_qp_num()
3225 ret = qm->qp_num - qm->qp_in_used; in hisi_qm_get_free_qp_num()
3226 up_read(&qm->qps_lock); in hisi_qm_get_free_qp_num()
3234 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_uninit()
3238 for (i = num - 1; i >= 0; i--) { in hisi_qp_memory_uninit()
3239 qdma = &qm->qp_array[i].qdma; in hisi_qp_memory_uninit()
3240 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); in hisi_qp_memory_uninit()
3243 kfree(qm->qp_array); in hisi_qp_memory_uninit()
3248 struct device *dev = &qm->pdev->dev; in hisi_qp_memory_init()
3249 size_t off = qm->sqe_size * QM_Q_DEPTH; in hisi_qp_memory_init()
3252 qp = &qm->qp_array[id]; in hisi_qp_memory_init()
3253 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, in hisi_qp_memory_init()
3255 if (!qp->qdma.va) in hisi_qp_memory_init()
3256 return -ENOMEM; in hisi_qp_memory_init()
3258 qp->sqe = qp->qdma.va; in hisi_qp_memory_init()
3259 qp->sqe_dma = qp->qdma.dma; in hisi_qp_memory_init()
3260 qp->cqe = qp->qdma.va + off; in hisi_qp_memory_init()
3261 qp->cqe_dma = qp->qdma.dma + off; in hisi_qp_memory_init()
3262 qp->qdma.size = dma_size; in hisi_qp_memory_init()
3263 qp->qm = qm; in hisi_qp_memory_init()
3264 qp->qp_id = id; in hisi_qp_memory_init()
3271 struct pci_dev *pdev = qm->pdev; in hisi_qm_pre_init()
3273 if (qm->ver == QM_HW_V1) in hisi_qm_pre_init()
3274 qm->ops = &qm_hw_ops_v1; in hisi_qm_pre_init()
3275 else if (qm->ver == QM_HW_V2) in hisi_qm_pre_init()
3276 qm->ops = &qm_hw_ops_v2; in hisi_qm_pre_init()
3278 qm->ops = &qm_hw_ops_v3; in hisi_qm_pre_init()
3281 mutex_init(&qm->mailbox_lock); in hisi_qm_pre_init()
3282 init_rwsem(&qm->qps_lock); in hisi_qm_pre_init()
3283 qm->qp_in_used = 0; in hisi_qm_pre_init()
3284 qm->misc_ctl = false; in hisi_qm_pre_init()
3285 if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) { in hisi_qm_pre_init()
3286 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) in hisi_qm_pre_init()
3287 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); in hisi_qm_pre_init()
3295 if (qm->ver < QM_HW_V3) in qm_cmd_uninit()
3298 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
3300 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_uninit()
3307 if (qm->ver < QM_HW_V3) in qm_cmd_init()
3314 val = readl(qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
3316 writel(val, qm->io_base + QM_IFC_INT_MASK); in qm_cmd_init()
3321 struct pci_dev *pdev = qm->pdev; in qm_put_pci_res()
3323 if (qm->use_db_isolation) in qm_put_pci_res()
3324 iounmap(qm->db_io_base); in qm_put_pci_res()
3326 iounmap(qm->io_base); in qm_put_pci_res()
3332 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_uninit()
3340 * hisi_qm_uninit() - Uninitialize qm.
3347 struct pci_dev *pdev = qm->pdev; in hisi_qm_uninit()
3348 struct device *dev = &pdev->dev; in hisi_qm_uninit()
3351 kfree(qm->factor); in hisi_qm_uninit()
3352 down_write(&qm->qps_lock); in hisi_qm_uninit()
3355 up_write(&qm->qps_lock); in hisi_qm_uninit()
3359 hisi_qp_memory_uninit(qm, qm->qp_num); in hisi_qm_uninit()
3360 idr_destroy(&qm->qp_idr); in hisi_qm_uninit()
3362 if (qm->qdma.va) { in hisi_qm_uninit()
3364 dma_free_coherent(dev, qm->qdma.size, in hisi_qm_uninit()
3365 qm->qdma.va, qm->qdma.dma); in hisi_qm_uninit()
3370 uacce_remove(qm->uacce); in hisi_qm_uninit()
3371 qm->uacce = NULL; in hisi_qm_uninit()
3373 up_write(&qm->qps_lock); in hisi_qm_uninit()
3378 * hisi_qm_get_vft() - Get vft from a qm.
3392 return -EINVAL; in hisi_qm_get_vft()
3394 if (!qm->ops->get_vft) { in hisi_qm_get_vft()
3395 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); in hisi_qm_get_vft()
3396 return -EINVAL; in hisi_qm_get_vft()
3399 return qm->ops->get_vft(qm, base, number); in hisi_qm_get_vft()
3404 * hisi_qm_set_vft() - Set vft to a qm.
3413 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3414 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3420 u32 max_q_num = qm->ctrl_qp_num; in hisi_qm_set_vft()
3424 return -EINVAL; in hisi_qm_set_vft()
3431 struct hisi_qm_status *status = &qm->status; in qm_init_eq_aeq_status()
3433 status->eq_head = 0; in qm_init_eq_aeq_status()
3434 status->aeq_head = 0; in qm_init_eq_aeq_status()
3435 status->eqc_phase = true; in qm_init_eq_aeq_status()
3436 status->aeqc_phase = true; in qm_init_eq_aeq_status()
3441 struct device *dev = &qm->pdev->dev; in qm_eq_ctx_cfg()
3448 return -ENOMEM; in qm_eq_ctx_cfg()
3450 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3451 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); in qm_eq_ctx_cfg()
3452 if (qm->ver == QM_HW_V1) in qm_eq_ctx_cfg()
3453 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); in qm_eq_ctx_cfg()
3454 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_eq_ctx_cfg()
3460 return -ENOMEM; in qm_eq_ctx_cfg()
3472 struct device *dev = &qm->pdev->dev; in qm_aeq_ctx_cfg()
3479 return -ENOMEM; in qm_aeq_ctx_cfg()
3481 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3482 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); in qm_aeq_ctx_cfg()
3483 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT)); in qm_aeq_ctx_cfg()
3489 return -ENOMEM; in qm_aeq_ctx_cfg()
3501 struct device *dev = &qm->pdev->dev; in qm_eq_aeq_ctx_cfg()
3519 WARN_ON(!qm->qdma.va); in __hisi_qm_start()
3521 if (qm->fun_type == QM_HW_PF) { in __hisi_qm_start()
3526 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
3535 ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
3539 ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
3545 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in __hisi_qm_start()
3546 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in __hisi_qm_start()
3552 * hisi_qm_start() - start qm
3559 struct device *dev = &qm->pdev->dev; in hisi_qm_start()
3562 down_write(&qm->qps_lock); in hisi_qm_start()
3565 up_write(&qm->qps_lock); in hisi_qm_start()
3566 return -EPERM; in hisi_qm_start()
3569 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); in hisi_qm_start()
3571 if (!qm->qp_num) { in hisi_qm_start()
3573 ret = -EINVAL; in hisi_qm_start()
3579 atomic_set(&qm->status.flags, QM_START); in hisi_qm_start()
3582 up_write(&qm->qps_lock); in hisi_qm_start()
3589 struct device *dev = &qm->pdev->dev; in qm_restart()
3597 down_write(&qm->qps_lock); in qm_restart()
3598 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3599 qp = &qm->qp_array[i]; in qm_restart()
3600 if (atomic_read(&qp->qp_status.flags) == QP_STOP && in qm_restart()
3601 qp->is_resetting == true) { in qm_restart()
3606 up_write(&qm->qps_lock); in qm_restart()
3609 qp->is_resetting = false; in qm_restart()
3612 up_write(&qm->qps_lock); in qm_restart()
3620 struct device *dev = &qm->pdev->dev; in qm_stop_started_qp()
3624 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3625 qp = &qm->qp_array[i]; in qm_stop_started_qp()
3626 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { in qm_stop_started_qp()
3627 qp->is_resetting = true; in qm_stop_started_qp()
3641 * qm_clear_queues() - Clear all queues memory in a qm.
3652 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3653 qp = &qm->qp_array[i]; in qm_clear_queues()
3654 if (qp->is_resetting) in qm_clear_queues()
3655 memset(qp->qdma.va, 0, qp->qdma.size); in qm_clear_queues()
3658 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3662 * hisi_qm_stop() - Stop a qm.
3672 struct device *dev = &qm->pdev->dev; in hisi_qm_stop()
3675 down_write(&qm->qps_lock); in hisi_qm_stop()
3677 qm->status.stop_reason = r; in hisi_qm_stop()
3679 ret = -EPERM; in hisi_qm_stop()
3683 if (qm->status.stop_reason == QM_SOFT_RESET || in hisi_qm_stop()
3684 qm->status.stop_reason == QM_FLR) { in hisi_qm_stop()
3693 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in hisi_qm_stop()
3694 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in hisi_qm_stop()
3696 if (qm->fun_type == QM_HW_PF) { in hisi_qm_stop()
3700 ret = -EBUSY; in hisi_qm_stop()
3706 atomic_set(&qm->status.flags, QM_STOP); in hisi_qm_stop()
3709 up_write(&qm->qps_lock); in hisi_qm_stop()
3717 struct hisi_qm *qm = filp->private_data; in qm_status_read()
3721 val = atomic_read(&qm->status.flags); in qm_status_read()
3736 return -EINVAL; in qm_debugfs_atomic64_set()
3755 struct hisi_qm_err_info *err_info = &qm->err_info; in qm_hw_error_init()
3757 if (!qm->ops->hw_error_init) { in qm_hw_error_init()
3758 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); in qm_hw_error_init()
3762 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe); in qm_hw_error_init()
3767 if (!qm->ops->hw_error_uninit) { in qm_hw_error_uninit()
3768 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); in qm_hw_error_uninit()
3772 qm->ops->hw_error_uninit(qm); in qm_hw_error_uninit()
3777 if (!qm->ops->hw_error_handle) { in qm_hw_error_handle()
3778 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); in qm_hw_error_handle()
3782 return qm->ops->hw_error_handle(qm); in qm_hw_error_handle()
3786 * hisi_qm_dev_err_init() - Initialize device error configuration.
3793 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_init()
3798 if (!qm->err_ini->hw_err_enable) { in hisi_qm_dev_err_init()
3799 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); in hisi_qm_dev_err_init()
3802 qm->err_ini->hw_err_enable(qm); in hisi_qm_dev_err_init()
3807 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
3814 if (qm->fun_type == QM_HW_VF) in hisi_qm_dev_err_uninit()
3819 if (!qm->err_ini->hw_err_disable) { in hisi_qm_dev_err_uninit()
3820 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); in hisi_qm_dev_err_uninit()
3823 qm->err_ini->hw_err_disable(qm); in hisi_qm_dev_err_uninit()
3828 * hisi_qm_free_qps() - free multiple queue pairs.
3839 for (i = qp_num - 1; i >= 0; i--) in hisi_qm_free_qps()
3849 list_del(&res->list); in free_list()
3863 list_for_each_entry(qm, &qm_list->list, list) { in hisi_qm_sort_devices()
3864 dev = &qm->pdev->dev; in hisi_qm_sort_devices()
3874 return -ENOMEM; in hisi_qm_sort_devices()
3876 res->qm = qm; in hisi_qm_sort_devices()
3877 res->distance = node_distance(dev_node, node); in hisi_qm_sort_devices()
3880 if (res->distance < tmp->distance) { in hisi_qm_sort_devices()
3881 n = &tmp->list; in hisi_qm_sort_devices()
3885 list_add_tail(&res->list, n); in hisi_qm_sort_devices()
3892 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3907 int ret = -ENODEV; in hisi_qm_alloc_qps_node()
3912 return -EINVAL; in hisi_qm_alloc_qps_node()
3914 mutex_lock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3916 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3922 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node()
3935 mutex_unlock(&qm_list->lock); in hisi_qm_alloc_qps_node()
3949 u32 max_qp_num = qm->max_qp_num; in qm_vf_q_assign()
3950 u32 q_base = qm->qp_num; in qm_vf_q_assign()
3954 return -EINVAL; in qm_vf_q_assign()
3956 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; in qm_vf_q_assign()
3960 return -EINVAL; in qm_vf_q_assign()
3965 for (i = num_vfs; i > 0; i--) { in qm_vf_q_assign()
3975 remain_q_num--; in qm_vf_q_assign()
3983 for (j = num_vfs; j > i; j--) in qm_vf_q_assign()
3998 for (i = 1; i <= qm->vfs_num; i++) { in qm_clear_vft_config()
4003 qm->vfs_num = 0; in qm_clear_vft_config()
4010 struct device *dev = &qm->pdev->dev; in qm_func_shaper_enable()
4014 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in qm_func_shaper_enable()
4016 return -EINVAL; in qm_func_shaper_enable()
4018 qm->factor[fun_index].func_qos = qos; in qm_func_shaper_enable()
4020 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); in qm_func_shaper_enable()
4023 return -EINVAL; in qm_func_shaper_enable()
4031 return -EINVAL; in qm_func_shaper_enable()
4046 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
4052 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
4053 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); in qm_get_shaper_vft_qos()
4054 writel(fun_index, qm->io_base + QM_VFT_CFG); in qm_get_shaper_vft_qos()
4056 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
4057 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
4059 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, in qm_get_shaper_vft_qos()
4065 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | in qm_get_shaper_vft_qos()
4066 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); in qm_get_shaper_vft_qos()
4077 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; in qm_get_shaper_vft_qos()
4079 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; in qm_get_shaper_vft_qos()
4081 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); in qm_get_shaper_vft_qos()
4090 struct device *dev = &qm->pdev->dev; in qm_vf_get_qos()
4113 qm->mb_qos = 0; in qm_vf_read_qos()
4116 if (qm->ops->ping_pf) { in qm_vf_read_qos()
4117 ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS); in qm_vf_read_qos()
4119 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); in qm_vf_read_qos()
4126 if (qm->mb_qos) in qm_vf_read_qos()
4130 pci_err(qm->pdev, "PF ping VF timeout!\n"); in qm_vf_read_qos()
4131 return -ETIMEDOUT; in qm_vf_read_qos()
4141 struct hisi_qm *qm = filp->private_data; in qm_algqos_read()
4151 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_read()
4152 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); in qm_algqos_read()
4153 ret = -EAGAIN; in qm_algqos_read()
4157 if (qm->fun_type == QM_HW_PF) { in qm_algqos_read()
4163 ir = qm->mb_qos; in qm_algqos_read()
4172 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_read()
4185 return -EINVAL; in qm_qos_value_init()
4190 return -EINVAL; in qm_qos_value_init()
4198 struct hisi_qm *qm = filp->private_data; in qm_algqos_write()
4207 if (qm->fun_type == QM_HW_VF) in qm_algqos_write()
4208 return -EINVAL; in qm_algqos_write()
4211 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_algqos_write()
4212 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); in qm_algqos_write()
4213 return -EAGAIN; in qm_algqos_write()
4222 ret = -ENOSPC; in qm_algqos_write()
4226 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); in qm_algqos_write()
4235 ret = -EINVAL; in qm_algqos_write()
4241 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); in qm_algqos_write()
4242 ret = -EINVAL; in qm_algqos_write()
4248 pci_err(qm->pdev, "input pci bdf value is error!\n"); in qm_algqos_write()
4249 ret = -EINVAL; in qm_algqos_write()
4257 ret = -EINVAL; in qm_algqos_write()
4263 pci_err(qm->pdev, "failed to enable function shaper!\n"); in qm_algqos_write()
4264 ret = -EINVAL; in qm_algqos_write()
4273 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_algqos_write()
4285 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
4292 if (qm->fun_type == QM_HW_PF) in hisi_qm_set_algqos_init()
4293 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, in hisi_qm_set_algqos_init()
4296 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, in hisi_qm_set_algqos_init()
4301 * hisi_qm_debug_init() - Initialize qm related debugfs files.
4308 struct qm_dfx *dfx = &qm->debug.dfx; in hisi_qm_debug_init()
4313 qm_d = debugfs_create_dir("qm", qm->debug.debug_root); in hisi_qm_debug_init()
4314 qm->debug.qm_d = qm_d; in hisi_qm_debug_init()
4317 if (qm->fun_type == QM_HW_PF) { in hisi_qm_debug_init()
4318 qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); in hisi_qm_debug_init()
4320 qm_create_debugfs_file(qm, qm->debug.qm_d, i); in hisi_qm_debug_init()
4323 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); in hisi_qm_debug_init()
4325 debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); in hisi_qm_debug_init()
4327 debugfs_create_file("status", 0444, qm->debug.qm_d, qm, in hisi_qm_debug_init()
4338 if (qm->ver >= QM_HW_V3) in hisi_qm_debug_init()
4344 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
4353 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); in hisi_qm_debug_regs_clear()
4354 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); in hisi_qm_debug_regs_clear()
4357 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); in hisi_qm_debug_regs_clear()
4358 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); in hisi_qm_debug_regs_clear()
4364 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); in hisi_qm_debug_regs_clear()
4368 readl(qm->io_base + regs->offset); in hisi_qm_debug_regs_clear()
4373 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); in hisi_qm_debug_regs_clear()
4378 * hisi_qm_sriov_enable() - enable virtual functions
4398 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", in hisi_qm_sriov_enable()
4410 qm->vfs_num = num_vfs; in hisi_qm_sriov_enable()
4430 * hisi_qm_sriov_disable - disable virtual functions
4439 int total_vfs = pci_sriov_get_totalvfs(qm->pdev); in hisi_qm_sriov_disable()
4444 return -EPERM; in hisi_qm_sriov_disable()
4448 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { in hisi_qm_sriov_disable()
4450 return -EBUSY; in hisi_qm_sriov_disable()
4455 memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs); in hisi_qm_sriov_disable()
4467 * hisi_qm_sriov_configure - configure the number of VFs
4471 * Enable SR-IOV according to num_vfs, 0 means disable.
4486 if (!qm->err_ini->get_dev_hw_err_status) { in qm_dev_err_handle()
4487 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); in qm_dev_err_handle()
4492 err_sts = qm->err_ini->get_dev_hw_err_status(qm); in qm_dev_err_handle()
4494 if (err_sts & qm->err_info.ecc_2bits_mask) in qm_dev_err_handle()
4495 qm->err_status.is_dev_ecc_mbit = true; in qm_dev_err_handle()
4497 if (qm->err_ini->log_dev_hw_err) in qm_dev_err_handle()
4498 qm->err_ini->log_dev_hw_err(qm, err_sts); in qm_dev_err_handle()
4501 if ((err_sts | qm->err_info.dev_ce_mask) == in qm_dev_err_handle()
4502 qm->err_info.dev_ce_mask) { in qm_dev_err_handle()
4503 if (qm->err_ini->clear_dev_hw_err_status) in qm_dev_err_handle()
4504 qm->err_ini->clear_dev_hw_err_status(qm, in qm_dev_err_handle()
4532 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4545 if (pdev->is_virtfn) in hisi_qm_dev_err_detected()
4562 struct pci_dev *pdev = qm->pdev; in qm_check_req_recv()
4566 if (qm->ver >= QM_HW_V3) in qm_check_req_recv()
4569 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4570 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4574 dev_err(&pdev->dev, "Fails to read QM reg!\n"); in qm_check_req_recv()
4578 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); in qm_check_req_recv()
4579 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, in qm_check_req_recv()
4583 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); in qm_check_req_recv()
4590 struct pci_dev *pdev = qm->pdev; in qm_set_pf_mse()
4609 return -ETIMEDOUT; in qm_set_pf_mse()
4614 struct pci_dev *pdev = qm->pdev; in qm_set_vf_mse()
4636 return -ETIMEDOUT; in qm_set_vf_mse()
4642 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_prepare()
4643 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_prepare()
4648 mutex_lock(&qm_list->lock); in qm_vf_reset_prepare()
4649 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_prepare()
4650 virtfn = vf_qm->pdev; in qm_vf_reset_prepare()
4665 mutex_unlock(&qm_list->lock); in qm_vf_reset_prepare()
4672 struct pci_dev *pdev = qm->pdev; in qm_try_stop_vfs()
4675 if (!qm->vfs_num) in qm_try_stop_vfs()
4679 if (qm->ops->ping_all_vfs) { in qm_try_stop_vfs()
4680 ret = qm->ops->ping_all_vfs(qm, cmd); in qm_try_stop_vfs()
4697 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { in qm_wait_reset_finish()
4700 return -EBUSY; in qm_wait_reset_finish()
4708 struct pci_dev *pdev = qm->pdev; in qm_reset_prepare_ready()
4715 if (qm->ver < QM_HW_V3) in qm_reset_prepare_ready()
4723 struct pci_dev *pdev = qm->pdev; in qm_reset_bit_clear()
4726 if (qm->ver < QM_HW_V3) in qm_reset_bit_clear()
4727 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); in qm_reset_bit_clear()
4729 clear_bit(QM_RESETTING, &qm->misc_ctl); in qm_reset_bit_clear()
4734 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_prepare()
4762 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset_prepare()
4772 if (qm->ver >= QM_HW_V3) in qm_dev_ecc_mbit_handle()
4775 if (!qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4776 qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4777 qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4779 qm->err_ini->close_axi_master_ooo(qm); in qm_dev_ecc_mbit_handle()
4781 } else if (qm->err_status.is_dev_ecc_mbit && in qm_dev_ecc_mbit_handle()
4782 !qm->err_status.is_qm_ecc_mbit && in qm_dev_ecc_mbit_handle()
4783 !qm->err_ini->close_axi_master_ooo) { in qm_dev_ecc_mbit_handle()
4785 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4787 qm->io_base + QM_RAS_NFE_ENABLE); in qm_dev_ecc_mbit_handle()
4788 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); in qm_dev_ecc_mbit_handle()
4794 struct pci_dev *pdev = qm->pdev; in qm_soft_reset()
4798 /* Ensure all doorbells and mailboxes received by QM */ in qm_soft_reset()
4803 if (qm->vfs_num) { in qm_soft_reset()
4811 ret = qm->ops->set_msi(qm, false); in qm_soft_reset()
4821 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_soft_reset()
4824 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_soft_reset()
4833 if (qm->err_ini->close_sva_prefetch) in qm_soft_reset()
4834 qm->err_ini->close_sva_prefetch(qm); in qm_soft_reset()
4842 /* The reset related sub-control registers are not in PCI BAR */ in qm_soft_reset()
4843 if (ACPI_HANDLE(&pdev->dev)) { in qm_soft_reset()
4847 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), in qm_soft_reset()
4848 qm->err_info.acpi_rst, in qm_soft_reset()
4852 return -EIO; in qm_soft_reset()
4857 return -EIO; in qm_soft_reset()
4861 return -EINVAL; in qm_soft_reset()
4869 struct hisi_qm_list *qm_list = qm->qm_list; in qm_vf_reset_done()
4870 struct pci_dev *pdev = qm->pdev; in qm_vf_reset_done()
4875 mutex_lock(&qm_list->lock); in qm_vf_reset_done()
4876 list_for_each_entry(vf_qm, &qm_list->list, list) { in qm_vf_reset_done()
4877 virtfn = vf_qm->pdev; in qm_vf_reset_done()
4892 mutex_unlock(&qm_list->lock); in qm_vf_reset_done()
4898 struct pci_dev *pdev = qm->pdev; in qm_try_start_vfs()
4901 if (!qm->vfs_num) in qm_try_start_vfs()
4904 ret = qm_vf_q_assign(qm, qm->vfs_num); in qm_try_start_vfs()
4911 if (qm->ops->ping_all_vfs) { in qm_try_start_vfs()
4912 ret = qm->ops->ping_all_vfs(qm, cmd); in qm_try_start_vfs()
4926 return qm->err_ini->hw_init(qm); in qm_dev_hw_init()
4933 if (qm->err_ini->open_sva_prefetch) in qm_restart_prepare()
4934 qm->err_ini->open_sva_prefetch(qm); in qm_restart_prepare()
4936 if (qm->ver >= QM_HW_V3) in qm_restart_prepare()
4939 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_prepare()
4940 !qm->err_status.is_dev_ecc_mbit) in qm_restart_prepare()
4944 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4945 writel(value & ~qm->err_info.msi_wr_port, in qm_restart_prepare()
4946 qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_prepare()
4949 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; in qm_restart_prepare()
4950 if (value && qm->err_ini->clear_dev_hw_err_status) in qm_restart_prepare()
4951 qm->err_ini->clear_dev_hw_err_status(qm, value); in qm_restart_prepare()
4954 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); in qm_restart_prepare()
4957 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); in qm_restart_prepare()
4964 if (qm->ver >= QM_HW_V3) in qm_restart_done()
4967 if (!qm->err_status.is_qm_ecc_mbit && in qm_restart_done()
4968 !qm->err_status.is_dev_ecc_mbit) in qm_restart_done()
4972 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4973 value |= qm->err_info.msi_wr_port; in qm_restart_done()
4974 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); in qm_restart_done()
4977 qm->err_status.is_qm_ecc_mbit = false; in qm_restart_done()
4978 qm->err_status.is_dev_ecc_mbit = false; in qm_restart_done()
4983 struct pci_dev *pdev = qm->pdev; in qm_controller_reset_done()
4986 ret = qm->ops->set_msi(qm, true); in qm_controller_reset_done()
4998 if (qm->vfs_num) { in qm_controller_reset_done()
5014 if (qm->err_ini->open_axi_master_ooo) in qm_controller_reset_done()
5015 qm->err_ini->open_axi_master_ooo(qm); in qm_controller_reset_done()
5041 struct pci_dev *pdev = qm->pdev; in qm_controller_reset()
5048 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in qm_controller_reset()
5071 * hisi_qm_dev_slot_reset() - slot reset
5082 if (pdev->is_virtfn) in hisi_qm_dev_slot_reset()
5124 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_prepare()
5151 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); in qm_flr_reset_complete()
5166 if (qm->fun_type == QM_HW_PF) { in hisi_qm_reset_done()
5191 if (qm->fun_type == QM_HW_PF) in hisi_qm_reset_done()
5206 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); in qm_abnormal_irq()
5209 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && in qm_abnormal_irq()
5210 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) in qm_abnormal_irq()
5211 schedule_work(&qm->rst_work); in qm_abnormal_irq()
5218 struct pci_dev *pdev = qm->pdev; in qm_irq_register()
5222 qm_irq, 0, qm->dev_name, qm); in qm_irq_register()
5226 if (qm->ver > QM_HW_V1) { in qm_irq_register()
5228 qm_aeq_irq, 0, qm->dev_name, qm); in qm_irq_register()
5232 if (qm->fun_type == QM_HW_PF) { in qm_irq_register()
5235 qm_abnormal_irq, 0, qm->dev_name, qm); in qm_irq_register()
5241 if (qm->ver > QM_HW_V2) { in qm_irq_register()
5243 qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_irq_register()
5251 if (qm->fun_type == QM_HW_PF) in qm_irq_register()
5261 * hisi_qm_dev_shutdown() - Shutdown device.
5273 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); in hisi_qm_dev_shutdown()
5284 clear_bit(QM_RST_SCHED, &qm->misc_ctl); in hisi_qm_controller_reset()
5291 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); in hisi_qm_controller_reset()
5300 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_prepare()
5305 dev_err(&pdev->dev, "reset prepare not ready!\n"); in qm_pf_reset_vf_prepare()
5306 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
5313 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); in qm_pf_reset_vf_prepare()
5314 atomic_set(&qm->status.flags, QM_STOP); in qm_pf_reset_vf_prepare()
5321 ret = qm->ops->ping_pf(qm, cmd); in qm_pf_reset_vf_prepare()
5323 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); in qm_pf_reset_vf_prepare()
5329 struct pci_dev *pdev = qm->pdev; in qm_pf_reset_vf_done()
5335 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); in qm_pf_reset_vf_done()
5339 ret = qm->ops->ping_pf(qm, cmd); in qm_pf_reset_vf_done()
5341 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); in qm_pf_reset_vf_done()
5348 struct device *dev = &qm->pdev->dev; in qm_wait_pf_reset_finish()
5354 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, in qm_wait_pf_reset_finish()
5360 return -ETIMEDOUT; in qm_wait_pf_reset_finish()
5377 ret = -EINVAL; in qm_wait_pf_reset_finish()
5386 struct device *dev = &qm->pdev->dev; in qm_pf_reset_vf_process()
5413 struct device *dev = &qm->pdev->dev; in qm_handle_cmd_msg()
5441 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; in qm_handle_cmd_msg()
5453 u32 vfs_num = qm->vfs_num; in qm_cmd_process()
5457 if (qm->fun_type == QM_HW_PF) { in qm_cmd_process()
5458 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); in qm_cmd_process()
5474 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
5483 struct device *dev = &qm->pdev->dev; in hisi_qm_alg_register()
5487 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
5488 if (list_empty(&qm_list->list)) in hisi_qm_alg_register()
5490 list_add_tail(&qm->list, &qm_list->list); in hisi_qm_alg_register()
5491 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
5493 if (qm->ver <= QM_HW_V2 && qm->use_sva) { in hisi_qm_alg_register()
5499 ret = qm_list->register_to_crypto(qm); in hisi_qm_alg_register()
5501 mutex_lock(&qm_list->lock); in hisi_qm_alg_register()
5502 list_del(&qm->list); in hisi_qm_alg_register()
5503 mutex_unlock(&qm_list->lock); in hisi_qm_alg_register()
5512 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
5522 mutex_lock(&qm_list->lock); in hisi_qm_alg_unregister()
5523 list_del(&qm->list); in hisi_qm_alg_unregister()
5524 mutex_unlock(&qm_list->lock); in hisi_qm_alg_unregister()
5526 if (qm->ver <= QM_HW_V2 && qm->use_sva) in hisi_qm_alg_unregister()
5529 if (list_empty(&qm_list->list)) in hisi_qm_alg_unregister()
5530 qm_list->unregister_from_crypto(qm); in hisi_qm_alg_unregister()
5536 if (qm->ver == QM_HW_V1) in qm_get_qp_num()
5537 qm->ctrl_qp_num = QM_QNUM_V1; in qm_get_qp_num()
5538 else if (qm->ver == QM_HW_V2) in qm_get_qp_num()
5539 qm->ctrl_qp_num = QM_QNUM_V2; in qm_get_qp_num()
5541 qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & in qm_get_qp_num()
5544 if (qm->use_db_isolation) in qm_get_qp_num()
5545 qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> in qm_get_qp_num()
5548 qm->max_qp_num = qm->ctrl_qp_num; in qm_get_qp_num()
5551 if (qm->qp_num > qm->max_qp_num) { in qm_get_qp_num()
5552 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", in qm_get_qp_num()
5553 qm->qp_num, qm->max_qp_num); in qm_get_qp_num()
5554 return -EINVAL; in qm_get_qp_num()
5562 struct pci_dev *pdev = qm->pdev; in qm_get_pci_res()
5563 struct device *dev = &pdev->dev; in qm_get_pci_res()
5566 ret = pci_request_mem_regions(pdev, qm->dev_name); in qm_get_pci_res()
5572 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); in qm_get_pci_res()
5573 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); in qm_get_pci_res()
5574 if (!qm->io_base) { in qm_get_pci_res()
5575 ret = -EIO; in qm_get_pci_res()
5579 if (qm->ver > QM_HW_V2) { in qm_get_pci_res()
5580 if (qm->fun_type == QM_HW_PF) in qm_get_pci_res()
5581 qm->use_db_isolation = readl(qm->io_base + in qm_get_pci_res()
5584 qm->use_db_isolation = readl(qm->io_base + in qm_get_pci_res()
5588 if (qm->use_db_isolation) { in qm_get_pci_res()
5589 qm->db_interval = QM_QP_DB_INTERVAL; in qm_get_pci_res()
5590 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); in qm_get_pci_res()
5591 qm->db_io_base = ioremap(qm->db_phys_base, in qm_get_pci_res()
5593 if (!qm->db_io_base) { in qm_get_pci_res()
5594 ret = -EIO; in qm_get_pci_res()
5598 qm->db_phys_base = qm->phys_base; in qm_get_pci_res()
5599 qm->db_io_base = qm->io_base; in qm_get_pci_res()
5600 qm->db_interval = 0; in qm_get_pci_res()
5603 if (qm->fun_type == QM_HW_PF) { in qm_get_pci_res()
5612 if (qm->use_db_isolation) in qm_get_pci_res()
5613 iounmap(qm->db_io_base); in qm_get_pci_res()
5615 iounmap(qm->io_base); in qm_get_pci_res()
5623 struct pci_dev *pdev = qm->pdev; in hisi_qm_pci_init()
5624 struct device *dev = &pdev->dev; in hisi_qm_pci_init()
5643 if (!qm->ops->get_irq_num) { in hisi_qm_pci_init()
5644 ret = -EOPNOTSUPP; in hisi_qm_pci_init()
5647 num_vec = qm->ops->get_irq_num(qm); in hisi_qm_pci_init()
5665 INIT_WORK(&qm->work, qm_work_process); in hisi_qm_init_work()
5666 if (qm->fun_type == QM_HW_PF) in hisi_qm_init_work()
5667 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); in hisi_qm_init_work()
5669 if (qm->ver > QM_HW_V2) in hisi_qm_init_work()
5670 INIT_WORK(&qm->cmd_process, qm_cmd_process); in hisi_qm_init_work()
5675 struct device *dev = &qm->pdev->dev; in hisi_qp_alloc_memory()
5679 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); in hisi_qp_alloc_memory()
5680 if (!qm->qp_array) in hisi_qp_alloc_memory()
5681 return -ENOMEM; in hisi_qp_alloc_memory()
5684 qp_dma_size = qm->sqe_size * QM_Q_DEPTH + in hisi_qp_alloc_memory()
5687 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5704 struct device *dev = &qm->pdev->dev; in hisi_qm_memory_init()
5708 total_vfs = pci_sriov_get_totalvfs(qm->pdev); in hisi_qm_memory_init()
5709 qm->factor = kcalloc(total_vfs + 1, sizeof(struct qm_shaper_factor), GFP_KERNEL); in hisi_qm_memory_init()
5710 if (!qm->factor) in hisi_qm_memory_init()
5711 return -ENOMEM; in hisi_qm_memory_init()
5714 (qm)->type = ((qm)->qdma.va + (off)); \ in hisi_qm_memory_init()
5715 (qm)->type##_dma = (qm)->qdma.dma + (off); \ in hisi_qm_memory_init()
5719 idr_init(&qm->qp_idr); in hisi_qm_memory_init()
5720 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + in hisi_qm_memory_init()
5722 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + in hisi_qm_memory_init()
5723 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); in hisi_qm_memory_init()
5724 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, in hisi_qm_memory_init()
5726 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); in hisi_qm_memory_init()
5727 if (!qm->qdma.va) { in hisi_qm_memory_init()
5728 ret = -ENOMEM; in hisi_qm_memory_init()
5734 QM_INIT_BUF(qm, sqc, qm->qp_num); in hisi_qm_memory_init()
5735 QM_INIT_BUF(qm, cqc, qm->qp_num); in hisi_qm_memory_init()
5744 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); in hisi_qm_memory_init()
5746 kfree(qm->factor); in hisi_qm_memory_init()
5752 * hisi_qm_init() - Initialize configures about qm.
5759 struct pci_dev *pdev = qm->pdev; in hisi_qm_init()
5760 struct device *dev = &pdev->dev; in hisi_qm_init()
5773 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { in hisi_qm_init()
5775 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); in hisi_qm_init()
5790 atomic_set(&qm->status.flags, QM_INIT); in hisi_qm_init()
5795 uacce_remove(qm->uacce); in hisi_qm_init()
5796 qm->uacce = NULL; in hisi_qm_init()
5806 * hisi_qm_get_dfx_access() - Try to get dfx access.
5816 struct device *dev = &qm->pdev->dev; in hisi_qm_get_dfx_access()
5819 dev_info(dev, "can not read/write - device in suspended.\n"); in hisi_qm_get_dfx_access()
5820 return -EAGAIN; in hisi_qm_get_dfx_access()
5828 * hisi_qm_put_dfx_access() - Put dfx access.
5840 * hisi_qm_pm_init() - Initialize qm runtime PM.
5847 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_init()
5849 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) in hisi_qm_pm_init()
5859 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5866 struct device *dev = &qm->pdev->dev; in hisi_qm_pm_uninit()
5868 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3) in hisi_qm_pm_uninit()
5878 struct pci_dev *pdev = qm->pdev; in qm_prepare_for_suspend()
5882 ret = qm->ops->set_msi(qm, false); in qm_prepare_for_suspend()
5890 qm->io_base + ACC_MASTER_GLOBAL_CTRL); in qm_prepare_for_suspend()
5892 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, in qm_prepare_for_suspend()
5910 struct pci_dev *pdev = qm->pdev; in qm_rebuild_for_resume()
5919 ret = qm->ops->set_msi(qm, true); in qm_rebuild_for_resume()
5938 * hisi_qm_suspend() - Runtime suspend of given device.
5966 * hisi_qm_resume() - Runtime resume of given device.