Lines Matching +full:bypass +full:- +full:slot +full:- +full:no

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
78 static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot) in cc_cpp_int_mask() argument
81 slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS); in cc_cpp_int_mask()
83 return cc_cpp_int_masks[alg][slot]; in cc_cpp_int_mask()
88 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_req_mgr_fini()
94 if (req_mgr_h->dummy_comp_buff_dma) { in cc_req_mgr_fini()
95 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff, in cc_req_mgr_fini()
96 req_mgr_h->dummy_comp_buff_dma); in cc_req_mgr_fini()
99 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size - in cc_req_mgr_fini()
100 req_mgr_h->min_free_hw_slots)); in cc_req_mgr_fini()
101 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots); in cc_req_mgr_fini()
104 destroy_workqueue(req_mgr_h->workq); in cc_req_mgr_fini()
107 tasklet_kill(&req_mgr_h->comptask); in cc_req_mgr_fini()
110 drvdata->request_mgr_handle = NULL; in cc_req_mgr_fini()
121 rc = -ENOMEM; in cc_req_mgr_init()
125 drvdata->request_mgr_handle = req_mgr_h; in cc_req_mgr_init()
127 spin_lock_init(&req_mgr_h->hw_lock); in cc_req_mgr_init()
128 spin_lock_init(&req_mgr_h->bl_lock); in cc_req_mgr_init()
129 INIT_LIST_HEAD(&req_mgr_h->backlog); in cc_req_mgr_init()
133 req_mgr_h->workq = create_singlethread_workqueue("ccree"); in cc_req_mgr_init()
134 if (!req_mgr_h->workq) { in cc_req_mgr_init()
136 rc = -ENOMEM; in cc_req_mgr_init()
139 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); in cc_req_mgr_init()
142 tasklet_init(&req_mgr_h->comptask, comp_handler, in cc_req_mgr_init()
145 req_mgr_h->hw_queue_size = cc_ioread(drvdata, in cc_req_mgr_init()
147 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); in cc_req_mgr_init()
148 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { in cc_req_mgr_init()
150 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); in cc_req_mgr_init()
151 rc = -ENOMEM; in cc_req_mgr_init()
154 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; in cc_req_mgr_init()
155 req_mgr_h->max_used_sw_slots = 0; in cc_req_mgr_init()
158 req_mgr_h->dummy_comp_buff = in cc_req_mgr_init()
160 &req_mgr_h->dummy_comp_buff_dma, in cc_req_mgr_init()
162 if (!req_mgr_h->dummy_comp_buff) { in cc_req_mgr_init()
165 rc = -ENOMEM; in cc_req_mgr_init()
170 hw_desc_init(&req_mgr_h->compl_desc); in cc_req_mgr_init()
171 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32)); in cc_req_mgr_init()
172 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma, in cc_req_mgr_init()
174 set_flow_mode(&req_mgr_h->compl_desc, BYPASS); in cc_req_mgr_init()
175 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc); in cc_req_mgr_init()
188 void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0); in enqueue_seq()
209 * request_mgr_complete() - Completion will take place if and only if user
235 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == in cc_queues_status()
236 req_mgr_h->req_queue_tail) { in cc_queues_status()
238 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); in cc_queues_status()
239 return -ENOSPC; in cc_queues_status()
242 if (req_mgr_h->q_free_slots >= total_seq_len) in cc_queues_status()
247 req_mgr_h->q_free_slots = in cc_queues_status()
249 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) in cc_queues_status()
250 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; in cc_queues_status()
252 if (req_mgr_h->q_free_slots >= total_seq_len) { in cc_queues_status()
258 req_mgr_h->q_free_slots, total_seq_len); in cc_queues_status()
260 /* No room in the HW queue try again later */ in cc_queues_status()
262 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE, in cc_queues_status()
263 req_mgr_h->q_free_slots, total_seq_len); in cc_queues_status()
264 return -ENOSPC; in cc_queues_status()
268 * cc_do_send_request() - Enqueue caller request to crypto hardware.
283 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_do_send_request()
288 used_sw_slots = ((req_mgr_h->req_queue_head - in cc_do_send_request()
289 req_mgr_h->req_queue_tail) & in cc_do_send_request()
290 (MAX_REQUEST_QUEUE_SIZE - 1)); in cc_do_send_request()
291 if (used_sw_slots > req_mgr_h->max_used_sw_slots) in cc_do_send_request()
292 req_mgr_h->max_used_sw_slots = used_sw_slots; in cc_do_send_request()
294 /* Enqueue request - must be locked with HW lock*/ in cc_do_send_request()
295 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; in cc_do_send_request()
296 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & in cc_do_send_request()
297 (MAX_REQUEST_QUEUE_SIZE - 1); in cc_do_send_request()
299 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head); in cc_do_send_request()
304 * to make sure there are no outstanding memory writes in cc_do_send_request()
313 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1); in cc_do_send_request()
317 if (req_mgr_h->q_free_slots < total_seq_len) { in cc_do_send_request()
319 * with resuming power. Set the free slot count to 0 and hope in cc_do_send_request()
322 dev_err(dev, "HW free slot count mismatch."); in cc_do_send_request()
323 req_mgr_h->q_free_slots = 0; in cc_do_send_request()
326 req_mgr_h->q_free_slots -= total_seq_len; in cc_do_send_request()
333 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_enqueue_backlog()
336 spin_lock_bh(&mgr->bl_lock); in cc_enqueue_backlog()
337 list_add_tail(&bli->list, &mgr->backlog); in cc_enqueue_backlog()
338 ++mgr->bl_len; in cc_enqueue_backlog()
339 dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len); in cc_enqueue_backlog()
340 spin_unlock_bh(&mgr->bl_lock); in cc_enqueue_backlog()
341 tasklet_schedule(&mgr->comptask); in cc_enqueue_backlog()
346 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_proc_backlog()
353 spin_lock(&mgr->bl_lock); in cc_proc_backlog()
355 while (mgr->bl_len) { in cc_proc_backlog()
356 bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); in cc_proc_backlog()
357 dev_dbg(dev, "---bl len: %d\n", mgr->bl_len); in cc_proc_backlog()
359 spin_unlock(&mgr->bl_lock); in cc_proc_backlog()
362 creq = &bli->creq; in cc_proc_backlog()
363 req = creq->user_arg; in cc_proc_backlog()
369 if (!bli->notif) { in cc_proc_backlog()
370 creq->user_cb(dev, req, -EINPROGRESS); in cc_proc_backlog()
371 bli->notif = true; in cc_proc_backlog()
374 spin_lock(&mgr->hw_lock); in cc_proc_backlog()
376 rc = cc_queues_status(drvdata, mgr, bli->len); in cc_proc_backlog()
379 * There is still no room in the FIFO for in cc_proc_backlog()
383 spin_unlock(&mgr->hw_lock); in cc_proc_backlog()
387 cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len, in cc_proc_backlog()
389 spin_unlock(&mgr->hw_lock); in cc_proc_backlog()
392 spin_lock(&mgr->bl_lock); in cc_proc_backlog()
393 list_del(&bli->list); in cc_proc_backlog()
394 --mgr->bl_len; in cc_proc_backlog()
398 spin_unlock(&mgr->bl_lock); in cc_proc_backlog()
406 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_send_request()
408 bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; in cc_send_request()
418 spin_lock_bh(&mgr->hw_lock); in cc_send_request()
423 rc = -ENOSPC; in cc_send_request()
426 if (rc == -ENOSPC && backlog_ok) { in cc_send_request()
427 spin_unlock_bh(&mgr->hw_lock); in cc_send_request()
432 return -ENOMEM; in cc_send_request()
435 memcpy(&bli->creq, cc_req, sizeof(*cc_req)); in cc_send_request()
436 memcpy(&bli->desc, desc, len * sizeof(*desc)); in cc_send_request()
437 bli->len = len; in cc_send_request()
438 bli->notif = false; in cc_send_request()
440 return -EBUSY; in cc_send_request()
445 rc = -EINPROGRESS; in cc_send_request()
448 spin_unlock_bh(&mgr->hw_lock); in cc_send_request()
458 struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; in cc_send_sync_request()
460 init_completion(&cc_req->seq_compl); in cc_send_sync_request()
461 cc_req->user_cb = request_mgr_complete; in cc_send_sync_request()
462 cc_req->user_arg = &cc_req->seq_compl; in cc_send_sync_request()
471 spin_lock_bh(&mgr->hw_lock); in cc_send_sync_request()
477 spin_unlock_bh(&mgr->hw_lock); in cc_send_sync_request()
478 wait_for_completion_interruptible(&drvdata->hw_queue_avail); in cc_send_sync_request()
479 reinit_completion(&drvdata->hw_queue_avail); in cc_send_sync_request()
483 spin_unlock_bh(&mgr->hw_lock); in cc_send_sync_request()
484 wait_for_completion(&cc_req->seq_compl); in cc_send_sync_request()
489 * send_request_init() - Enqueue caller request to crypto hardware during init
504 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in send_request_init()
514 set_queue_last_ind(drvdata, &desc[(len - 1)]); in send_request_init()
519 * to make sure there are no outstanding memory writes in send_request_init()
525 req_mgr_h->q_free_slots = in send_request_init()
534 drvdata->request_mgr_handle; in complete_request()
536 complete(&drvdata->hw_queue_avail); in complete_request()
538 queue_delayed_work(request_mgr_handle->workq, in complete_request()
539 &request_mgr_handle->compwork, 0); in complete_request()
541 tasklet_schedule(&request_mgr_handle->comptask); in complete_request()
560 drvdata->request_mgr_handle; in proc_completions()
561 unsigned int *tail = &request_mgr_handle->req_queue_tail; in proc_completions()
562 unsigned int *head = &request_mgr_handle->req_queue_head; in proc_completions()
566 while (request_mgr_handle->axi_completed) { in proc_completions()
567 request_mgr_handle->axi_completed--; in proc_completions()
580 cc_req = &request_mgr_handle->req_queue[*tail]; in proc_completions()
582 if (cc_req->cpp.is_cpp) { in proc_completions()
584 dev_dbg(dev, "CPP request completion slot: %d alg:%d\n", in proc_completions()
585 cc_req->cpp.slot, cc_req->cpp.alg); in proc_completions()
586 mask = cc_cpp_int_mask(cc_req->cpp.alg, in proc_completions()
587 cc_req->cpp.slot); in proc_completions()
588 rc = (drvdata->irq & mask ? -EPERM : 0); in proc_completions()
590 drvdata->irq, rc); in proc_completions()
596 if (cc_req->user_cb) in proc_completions()
597 cc_req->user_cb(dev, cc_req->user_arg, rc); in proc_completions()
598 *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); in proc_completions()
601 request_mgr_handle->axi_completed); in proc_completions()
609 cc_ioread(drvdata, drvdata->axim_mon_offset)); in cc_axi_comp_count()
612 /* Deferred service handler, run as interrupt-fired tasklet */
617 drvdata->request_mgr_handle; in comp_handler()
622 irq = (drvdata->irq & drvdata->comp_mask); in comp_handler()
631 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); in comp_handler()
634 request_mgr_handle->axi_completed); in comp_handler()
636 while (request_mgr_handle->axi_completed) { in comp_handler()
638 drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR)); in comp_handler()
639 irq = (drvdata->irq & drvdata->comp_mask); in comp_handler()
643 * request_mgr_handle->axi_completed is 0. in comp_handler()
645 request_mgr_handle->axi_completed += in comp_handler()
647 } while (request_mgr_handle->axi_completed > 0); in comp_handler()
651 request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); in comp_handler()
658 cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask); in comp_handler()