Lines Matching refs:cpt
30 static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask, in cpt_disable_cores() argument
36 struct device *dev = &cpt->pdev->dev; in cpt_disable_cores()
39 coremask = (coremask << cpt->max_se_cores); in cpt_disable_cores()
42 grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); in cpt_disable_cores()
43 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), in cpt_disable_cores()
46 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); in cpt_disable_cores()
49 grp = cpt_read_csr64(cpt->reg_base, in cpt_disable_cores()
58 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); in cpt_disable_cores()
59 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), in cpt_disable_cores()
67 static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask, in cpt_enable_cores() argument
73 coremask = (coremask << cpt->max_se_cores); in cpt_enable_cores()
75 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); in cpt_enable_cores()
76 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), in cpt_enable_cores()
81 static void cpt_configure_group(struct cpt_device *cpt, u8 grp, in cpt_configure_group() argument
87 coremask = (coremask << cpt->max_se_cores); in cpt_configure_group()
89 pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); in cpt_configure_group()
90 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), in cpt_configure_group()
95 static void cpt_disable_mbox_interrupts(struct cpt_device *cpt) in cpt_disable_mbox_interrupts() argument
98 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull); in cpt_disable_mbox_interrupts()
101 static void cpt_disable_ecc_interrupts(struct cpt_device *cpt) in cpt_disable_ecc_interrupts() argument
104 cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull); in cpt_disable_ecc_interrupts()
107 static void cpt_disable_exec_interrupts(struct cpt_device *cpt) in cpt_disable_exec_interrupts() argument
110 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull); in cpt_disable_exec_interrupts()
113 static void cpt_disable_all_interrupts(struct cpt_device *cpt) in cpt_disable_all_interrupts() argument
115 cpt_disable_mbox_interrupts(cpt); in cpt_disable_all_interrupts()
116 cpt_disable_ecc_interrupts(cpt); in cpt_disable_all_interrupts()
117 cpt_disable_exec_interrupts(cpt); in cpt_disable_all_interrupts()
120 static void cpt_enable_mbox_interrupts(struct cpt_device *cpt) in cpt_enable_mbox_interrupts() argument
123 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull); in cpt_enable_mbox_interrupts()
126 static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode) in cpt_load_microcode() argument
130 struct device *dev = &cpt->pdev->dev; in cpt_load_microcode()
156 cpt_write_csr64(cpt->reg_base, in cpt_load_microcode()
164 static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode) in do_cpt_init() argument
167 struct device *dev = &cpt->pdev->dev; in do_cpt_init()
170 cpt->flags &= ~CPT_FLAG_DEVICE_READY; in do_cpt_init()
172 cpt_disable_all_interrupts(cpt); in do_cpt_init()
175 if (mcode->num_cores > cpt->max_ae_cores) { in do_cpt_init()
181 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { in do_cpt_init()
186 mcode->group = cpt->next_group; in do_cpt_init()
189 cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES, in do_cpt_init()
192 ret = cpt_load_microcode(cpt, mcode); in do_cpt_init()
198 cpt->next_group++; in do_cpt_init()
200 cpt_configure_group(cpt, mcode->group, mcode->core_mask, in do_cpt_init()
203 cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES); in do_cpt_init()
205 if (mcode->num_cores > cpt->max_se_cores) { in do_cpt_init()
210 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { in do_cpt_init()
215 mcode->group = cpt->next_group; in do_cpt_init()
218 cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES, in do_cpt_init()
221 ret = cpt_load_microcode(cpt, mcode); in do_cpt_init()
227 cpt->next_group++; in do_cpt_init()
229 cpt_configure_group(cpt, mcode->group, mcode->core_mask, in do_cpt_init()
232 cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES); in do_cpt_init()
236 cpt_enable_mbox_interrupts(cpt); in do_cpt_init()
237 cpt->flags |= CPT_FLAG_DEVICE_READY; in do_cpt_init()
243 cpt_enable_mbox_interrupts(cpt); in do_cpt_init()
255 static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) in cpt_ucode_load_fw() argument
258 struct device *dev = &cpt->pdev->dev; in cpt_ucode_load_fw()
268 mcode = &cpt->mcode[cpt->next_mc_idx]; in cpt_ucode_load_fw()
281 mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_ucode_load_fw()
305 ret = do_cpt_init(cpt, mcode); in cpt_ucode_load_fw()
313 cpt->next_mc_idx++; in cpt_ucode_load_fw()
321 static int cpt_ucode_load(struct cpt_device *cpt) in cpt_ucode_load() argument
324 struct device *dev = &cpt->pdev->dev; in cpt_ucode_load()
326 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true); in cpt_ucode_load()
331 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false); in cpt_ucode_load()
342 struct cpt_device *cpt = (struct cpt_device *)cpt_irq; in cpt_mbx0_intr_handler() local
344 cpt_mbox_intr_handler(cpt, 0); in cpt_mbx0_intr_handler()
349 static void cpt_reset(struct cpt_device *cpt) in cpt_reset() argument
351 cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1); in cpt_reset()
354 static void cpt_find_max_enabled_cores(struct cpt_device *cpt) in cpt_find_max_enabled_cores() argument
358 pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0)); in cpt_find_max_enabled_cores()
359 cpt->max_se_cores = pf_cnsts.s.se; in cpt_find_max_enabled_cores()
360 cpt->max_ae_cores = pf_cnsts.s.ae; in cpt_find_max_enabled_cores()
363 static u32 cpt_check_bist_status(struct cpt_device *cpt) in cpt_check_bist_status() argument
367 bist_sts.u = cpt_read_csr64(cpt->reg_base, in cpt_check_bist_status()
373 static u64 cpt_check_exe_bist_status(struct cpt_device *cpt) in cpt_check_exe_bist_status() argument
377 bist_sts.u = cpt_read_csr64(cpt->reg_base, in cpt_check_exe_bist_status()
383 static void cpt_disable_all_cores(struct cpt_device *cpt) in cpt_disable_all_cores() argument
386 struct device *dev = &cpt->pdev->dev; in cpt_disable_all_cores()
390 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0); in cpt_disable_all_cores()
394 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); in cpt_disable_all_cores()
397 grp = cpt_read_csr64(cpt->reg_base, in cpt_disable_all_cores()
405 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0); in cpt_disable_all_cores()
413 static void cpt_unload_microcode(struct cpt_device *cpt) in cpt_unload_microcode() argument
419 struct microcode *mcode = &cpt->mcode[grp]; in cpt_unload_microcode()
421 if (cpt->mcode[grp].code) in cpt_unload_microcode()
422 dma_free_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_unload_microcode()
428 cpt_write_csr64(cpt->reg_base, in cpt_unload_microcode()
432 static int cpt_device_init(struct cpt_device *cpt) in cpt_device_init() argument
435 struct device *dev = &cpt->pdev->dev; in cpt_device_init()
438 cpt_reset(cpt); in cpt_device_init()
442 bist = (u64)cpt_check_bist_status(cpt); in cpt_device_init()
448 bist = cpt_check_exe_bist_status(cpt); in cpt_device_init()
456 cpt_find_max_enabled_cores(cpt); in cpt_device_init()
458 cpt_disable_all_cores(cpt); in cpt_device_init()
460 cpt->next_mc_idx = 0; in cpt_device_init()
461 cpt->next_group = 0; in cpt_device_init()
463 cpt->flags |= CPT_FLAG_DEVICE_READY; in cpt_device_init()
468 static int cpt_register_interrupts(struct cpt_device *cpt) in cpt_register_interrupts() argument
471 struct device *dev = &cpt->pdev->dev; in cpt_register_interrupts()
474 ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS, in cpt_register_interrupts()
477 dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n", in cpt_register_interrupts()
483 ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), in cpt_register_interrupts()
484 cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt); in cpt_register_interrupts()
489 cpt_enable_mbox_interrupts(cpt); in cpt_register_interrupts()
494 pci_disable_msix(cpt->pdev); in cpt_register_interrupts()
498 static void cpt_unregister_interrupts(struct cpt_device *cpt) in cpt_unregister_interrupts() argument
500 free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt); in cpt_unregister_interrupts()
501 pci_disable_msix(cpt->pdev); in cpt_unregister_interrupts()
504 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs) in cpt_sriov_init() argument
509 struct pci_dev *pdev = cpt->pdev; in cpt_sriov_init()
517 cpt->num_vf_en = num_vfs; /* User requested VFs */ in cpt_sriov_init()
519 if (total_vf_cnt < cpt->num_vf_en) in cpt_sriov_init()
520 cpt->num_vf_en = total_vf_cnt; in cpt_sriov_init()
526 err = pci_enable_sriov(pdev, cpt->num_vf_en); in cpt_sriov_init()
529 cpt->num_vf_en); in cpt_sriov_init()
530 cpt->num_vf_en = 0; in cpt_sriov_init()
537 cpt->num_vf_en); in cpt_sriov_init()
539 cpt->flags |= CPT_FLAG_SRIOV_ENABLED; in cpt_sriov_init()
547 struct cpt_device *cpt; in cpt_probe() local
556 cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL); in cpt_probe()
557 if (!cpt) in cpt_probe()
560 pci_set_drvdata(pdev, cpt); in cpt_probe()
561 cpt->pdev = pdev; in cpt_probe()
588 cpt->reg_base = pcim_iomap(pdev, 0, 0); in cpt_probe()
589 if (!cpt->reg_base) { in cpt_probe()
596 cpt_device_init(cpt); in cpt_probe()
599 err = cpt_register_interrupts(cpt); in cpt_probe()
603 err = cpt_ucode_load(cpt); in cpt_probe()
608 err = cpt_sriov_init(cpt, num_vfs); in cpt_probe()
615 cpt_unregister_interrupts(cpt); in cpt_probe()
626 struct cpt_device *cpt = pci_get_drvdata(pdev); in cpt_remove() local
629 cpt_disable_all_cores(cpt); in cpt_remove()
631 cpt_unload_microcode(cpt); in cpt_remove()
632 cpt_unregister_interrupts(cpt); in cpt_remove()
641 struct cpt_device *cpt = pci_get_drvdata(pdev); in cpt_shutdown() local
643 if (!cpt) in cpt_shutdown()
649 cpt_unregister_interrupts(cpt); in cpt_shutdown()