Lines Matching refs:drvdata

60 void cc_req_mgr_fini(struct cc_drvdata *drvdata)  in cc_req_mgr_fini()  argument
62 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_req_mgr_fini()
63 struct device *dev = drvdata_to_dev(drvdata); in cc_req_mgr_fini()
85 drvdata->request_mgr_handle = NULL; in cc_req_mgr_fini()
88 int cc_req_mgr_init(struct cc_drvdata *drvdata) in cc_req_mgr_init() argument
91 struct device *dev = drvdata_to_dev(drvdata); in cc_req_mgr_init()
100 drvdata->request_mgr_handle = req_mgr_h; in cc_req_mgr_init()
118 (unsigned long)drvdata); in cc_req_mgr_init()
120 req_mgr_h->hw_queue_size = cc_ioread(drvdata, in cc_req_mgr_init()
150 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc); in cc_req_mgr_init()
155 cc_req_mgr_fini(drvdata); in cc_req_mgr_init()
159 static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[], in enqueue_seq() argument
163 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0); in enqueue_seq()
164 struct device *dev = drvdata_to_dev(drvdata); in enqueue_seq()
198 static int cc_queues_status(struct cc_drvdata *drvdata, in cc_queues_status() argument
203 struct device *dev = drvdata_to_dev(drvdata); in cc_queues_status()
222 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); in cc_queues_status()
253 static int cc_do_send_request(struct cc_drvdata *drvdata, in cc_do_send_request() argument
258 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_do_send_request()
263 struct device *dev = drvdata_to_dev(drvdata); in cc_do_send_request()
275 rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr, in cc_do_send_request()
310 enqueue_seq(drvdata, iv_seq, iv_seq_len); in cc_do_send_request()
312 enqueue_seq(drvdata, desc, len); in cc_do_send_request()
315 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1); in cc_do_send_request()
335 static void cc_enqueue_backlog(struct cc_drvdata *drvdata, in cc_enqueue_backlog() argument
338 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_enqueue_backlog()
347 static void cc_proc_backlog(struct cc_drvdata *drvdata) in cc_proc_backlog() argument
349 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_proc_backlog()
355 struct device *dev = drvdata_to_dev(drvdata); in cc_proc_backlog()
381 rc = cc_queues_status(drvdata, mgr, total_len); in cc_proc_backlog()
392 rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, in cc_proc_backlog()
411 int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, in cc_send_request() argument
416 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_send_request()
419 struct device *dev = drvdata_to_dev(drvdata); in cc_send_request()
431 rc = cc_queues_status(drvdata, mgr, total_len); in cc_send_request()
451 cc_enqueue_backlog(drvdata, bli); in cc_send_request()
456 rc = cc_do_send_request(drvdata, cc_req, desc, len, false, in cc_send_request()
463 int cc_send_sync_request(struct cc_drvdata *drvdata, in cc_send_sync_request() argument
468 struct device *dev = drvdata_to_dev(drvdata); in cc_send_sync_request()
469 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_send_sync_request()
483 rc = cc_queues_status(drvdata, mgr, len + 1); in cc_send_sync_request()
493 wait_for_completion_interruptible(&drvdata->hw_queue_avail); in cc_send_sync_request()
494 reinit_completion(&drvdata->hw_queue_avail); in cc_send_sync_request()
497 rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false); in cc_send_sync_request()
520 int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, in send_request_init() argument
523 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in send_request_init()
529 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len); in send_request_init()
533 set_queue_last_ind(drvdata, &desc[(len - 1)]); in send_request_init()
541 enqueue_seq(drvdata, desc, len); in send_request_init()
545 cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); in send_request_init()
550 void complete_request(struct cc_drvdata *drvdata) in complete_request() argument
553 drvdata->request_mgr_handle; in complete_request()
555 complete(&drvdata->hw_queue_avail); in complete_request()
567 struct cc_drvdata *drvdata = in comp_work_handler() local
570 comp_handler((unsigned long)drvdata); in comp_work_handler()
574 static void proc_completions(struct cc_drvdata *drvdata) in proc_completions() argument
577 struct device *dev = drvdata_to_dev(drvdata); in proc_completions()
579 drvdata->request_mgr_handle; in proc_completions()
609 static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata) in cc_axi_comp_count() argument
612 cc_ioread(drvdata, drvdata->axim_mon_offset)); in cc_axi_comp_count()
618 struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; in comp_handler() local
620 drvdata->request_mgr_handle; in comp_handler()
624 irq = (drvdata->irq & CC_COMP_IRQ_MASK); in comp_handler()
630 cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK); in comp_handler()
636 cc_axi_comp_count(drvdata); in comp_handler()
640 proc_completions(drvdata); in comp_handler()
645 cc_axi_comp_count(drvdata); in comp_handler()
648 cc_iowrite(drvdata, CC_REG(HOST_ICR), in comp_handler()
652 cc_axi_comp_count(drvdata); in comp_handler()
658 cc_iowrite(drvdata, CC_REG(HOST_IMR), in comp_handler()
659 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq); in comp_handler()
661 cc_proc_backlog(drvdata); in comp_handler()
669 int cc_resume_req_queue(struct cc_drvdata *drvdata) in cc_resume_req_queue() argument
672 drvdata->request_mgr_handle; in cc_resume_req_queue()
685 int cc_suspend_req_queue(struct cc_drvdata *drvdata) in cc_suspend_req_queue() argument
688 drvdata->request_mgr_handle; in cc_suspend_req_queue()
703 bool cc_req_queue_suspended(struct cc_drvdata *drvdata) in cc_req_queue_suspended() argument
706 drvdata->request_mgr_handle; in cc_req_queue_suspended()