Lines Matching refs:ccp
33 struct ccp_device *ccp; in ccp_lsb_alloc() local
48 ccp = cmd_q->ccp; in ccp_lsb_alloc()
50 mutex_lock(&ccp->sb_mutex); in ccp_lsb_alloc()
52 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, in ccp_lsb_alloc()
57 bitmap_set(ccp->lsbmap, start, count); in ccp_lsb_alloc()
59 mutex_unlock(&ccp->sb_mutex); in ccp_lsb_alloc()
63 ccp->sb_avail = 0; in ccp_lsb_alloc()
65 mutex_unlock(&ccp->sb_mutex); in ccp_lsb_alloc()
68 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) in ccp_lsb_alloc()
87 struct ccp_device *ccp = cmd_q->ccp; in ccp_lsb_free() local
89 mutex_lock(&ccp->sb_mutex); in ccp_lsb_free()
90 bitmap_clear(ccp->lsbmap, start, count); in ccp_lsb_free()
91 ccp->sb_avail = 1; in ccp_lsb_free()
92 mutex_unlock(&ccp->sb_mutex); in ccp_lsb_free()
93 wake_up_interruptible_all(&ccp->sb_queue); in ccp_lsb_free()
271 ccp_log_error(cmd_q->ccp, in ccp5_do_cmd()
608 dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", in ccp_find_lsb_regions()
614 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, in ccp_find_and_assign_lsb_to_q() argument
633 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_find_and_assign_lsb_to_q()
634 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; in ccp_find_and_assign_lsb_to_q()
649 dev_dbg(ccp->dev, in ccp_find_and_assign_lsb_to_q()
671 static int ccp_assign_lsbs(struct ccp_device *ccp) in ccp_assign_lsbs() argument
683 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_assign_lsbs()
685 lsb_pub, ccp->cmd_q[i].lsbmask, in ccp_assign_lsbs()
690 if (n_lsbs >= ccp->cmd_q_count) { in ccp_assign_lsbs()
700 rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, in ccp_assign_lsbs()
718 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); in ccp_assign_lsbs()
726 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) in ccp5_disable_queue_interrupts() argument
730 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_disable_queue_interrupts()
731 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); in ccp5_disable_queue_interrupts()
734 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) in ccp5_enable_queue_interrupts() argument
738 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_enable_queue_interrupts()
739 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); in ccp5_enable_queue_interrupts()
744 struct ccp_device *ccp = (struct ccp_device *)data; in ccp5_irq_bh() local
748 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_irq_bh()
749 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; in ccp5_irq_bh()
769 ccp5_enable_queue_interrupts(ccp); in ccp5_irq_bh()
774 struct ccp_device *ccp = (struct ccp_device *)data; in ccp5_irq_handler() local
776 ccp5_disable_queue_interrupts(ccp); in ccp5_irq_handler()
777 ccp->total_interrupts++; in ccp5_irq_handler()
778 if (ccp->use_tasklet) in ccp5_irq_handler()
779 tasklet_schedule(&ccp->irq_tasklet); in ccp5_irq_handler()
781 ccp5_irq_bh((unsigned long)ccp); in ccp5_irq_handler()
785 static int ccp5_init(struct ccp_device *ccp) in ccp5_init() argument
787 struct device *dev = ccp->dev; in ccp5_init()
797 qmr = ioread32(ccp->io_regs + Q_MASK_REG); in ccp5_init()
805 ccp->name, i); in ccp5_init()
814 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; in ccp5_init()
815 ccp->cmd_q_count++; in ccp5_init()
817 cmd_q->ccp = ccp; in ccp5_init()
838 cmd_q->reg_control = ccp->io_regs + in ccp5_init()
861 if (ccp->cmd_q_count == 0) { in ccp5_init()
868 ccp5_disable_queue_interrupts(ccp); in ccp5_init()
869 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
870 cmd_q = &ccp->cmd_q[i]; in ccp5_init()
884 ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); in ccp5_init()
890 if (ccp->use_tasklet) in ccp5_init()
891 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, in ccp5_init()
892 (unsigned long)ccp); in ccp5_init()
896 status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); in ccp5_init()
897 status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); in ccp5_init()
898 iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); in ccp5_init()
899 iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); in ccp5_init()
904 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
908 cmd_q = &ccp->cmd_q[i]; in ccp5_init()
928 ret = ccp_assign_lsbs(ccp); in ccp5_init()
935 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
936 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); in ccp5_init()
937 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); in ccp5_init()
942 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_init()
945 cmd_q = &ccp->cmd_q[i]; in ccp5_init()
948 "%s-q%u", ccp->name, cmd_q->id); in ccp5_init()
961 ccp5_enable_queue_interrupts(ccp); in ccp5_init()
965 ccp_add_device(ccp); in ccp5_init()
967 ret = ccp_register_rng(ccp); in ccp5_init()
972 ret = ccp_dmaengine_register(ccp); in ccp5_init()
977 ccp5_debugfs_setup(ccp); in ccp5_init()
982 ccp_unregister_rng(ccp); in ccp5_init()
985 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_init()
986 if (ccp->cmd_q[i].kthread) in ccp5_init()
987 kthread_stop(ccp->cmd_q[i].kthread); in ccp5_init()
990 sp_free_ccp_irq(ccp->sp, ccp); in ccp5_init()
993 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_init()
994 dma_pool_destroy(ccp->cmd_q[i].dma_pool); in ccp5_init()
999 static void ccp5_destroy(struct ccp_device *ccp) in ccp5_destroy() argument
1001 struct device *dev = ccp->dev; in ccp5_destroy()
1007 ccp_dmaengine_unregister(ccp); in ccp5_destroy()
1010 ccp_unregister_rng(ccp); in ccp5_destroy()
1013 ccp_del_device(ccp); in ccp5_destroy()
1022 ccp5_disable_queue_interrupts(ccp); in ccp5_destroy()
1023 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_destroy()
1024 cmd_q = &ccp->cmd_q[i]; in ccp5_destroy()
1036 for (i = 0; i < ccp->cmd_q_count; i++) in ccp5_destroy()
1037 if (ccp->cmd_q[i].kthread) in ccp5_destroy()
1038 kthread_stop(ccp->cmd_q[i].kthread); in ccp5_destroy()
1040 sp_free_ccp_irq(ccp->sp, ccp); in ccp5_destroy()
1042 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp5_destroy()
1043 cmd_q = &ccp->cmd_q[i]; in ccp5_destroy()
1049 while (!list_empty(&ccp->cmd)) { in ccp5_destroy()
1051 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); in ccp5_destroy()
1055 while (!list_empty(&ccp->backlog)) { in ccp5_destroy()
1057 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); in ccp5_destroy()
1063 static void ccp5_config(struct ccp_device *ccp) in ccp5_config() argument
1066 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); in ccp5_config()
1069 static void ccp5other_config(struct ccp_device *ccp) in ccp5other_config() argument
1076 iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); in ccp5other_config()
1077 iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); in ccp5other_config()
1079 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); in ccp5other_config()
1080 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); in ccp5other_config()
1083 iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); in ccp5other_config()
1084 iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); in ccp5other_config()
1085 iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); in ccp5other_config()
1087 iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); in ccp5other_config()
1088 iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); in ccp5other_config()
1090 iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); in ccp5other_config()
1092 ccp5_config(ccp); in ccp5other_config()