Lines Matching refs:req_mgr_h
88 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_req_mgr_fini() local
91 if (!req_mgr_h) in cc_req_mgr_fini()
94 if (req_mgr_h->dummy_comp_buff_dma) { in cc_req_mgr_fini()
95 dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff, in cc_req_mgr_fini()
96 req_mgr_h->dummy_comp_buff_dma); in cc_req_mgr_fini()
99 dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size - in cc_req_mgr_fini()
100 req_mgr_h->min_free_hw_slots)); in cc_req_mgr_fini()
101 dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots); in cc_req_mgr_fini()
104 flush_workqueue(req_mgr_h->workq); in cc_req_mgr_fini()
105 destroy_workqueue(req_mgr_h->workq); in cc_req_mgr_fini()
108 tasklet_kill(&req_mgr_h->comptask); in cc_req_mgr_fini()
110 kfree_sensitive(req_mgr_h); in cc_req_mgr_fini()
116 struct cc_req_mgr_handle *req_mgr_h; in cc_req_mgr_init() local
120 req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL); in cc_req_mgr_init()
121 if (!req_mgr_h) { in cc_req_mgr_init()
126 drvdata->request_mgr_handle = req_mgr_h; in cc_req_mgr_init()
128 spin_lock_init(&req_mgr_h->hw_lock); in cc_req_mgr_init()
129 spin_lock_init(&req_mgr_h->bl_lock); in cc_req_mgr_init()
130 INIT_LIST_HEAD(&req_mgr_h->backlog); in cc_req_mgr_init()
134 req_mgr_h->workq = create_singlethread_workqueue("ccree"); in cc_req_mgr_init()
135 if (!req_mgr_h->workq) { in cc_req_mgr_init()
140 INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); in cc_req_mgr_init()
143 tasklet_init(&req_mgr_h->comptask, comp_handler, in cc_req_mgr_init()
146 req_mgr_h->hw_queue_size = cc_ioread(drvdata, in cc_req_mgr_init()
148 dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); in cc_req_mgr_init()
149 if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { in cc_req_mgr_init()
151 req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); in cc_req_mgr_init()
155 req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; in cc_req_mgr_init()
156 req_mgr_h->max_used_sw_slots = 0; in cc_req_mgr_init()
159 req_mgr_h->dummy_comp_buff = in cc_req_mgr_init()
161 &req_mgr_h->dummy_comp_buff_dma, in cc_req_mgr_init()
163 if (!req_mgr_h->dummy_comp_buff) { in cc_req_mgr_init()
171 hw_desc_init(&req_mgr_h->compl_desc); in cc_req_mgr_init()
172 set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32)); in cc_req_mgr_init()
173 set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma, in cc_req_mgr_init()
175 set_flow_mode(&req_mgr_h->compl_desc, BYPASS); in cc_req_mgr_init()
176 set_queue_last_ind(drvdata, &req_mgr_h->compl_desc); in cc_req_mgr_init()
226 struct cc_req_mgr_handle *req_mgr_h, in cc_queues_status() argument
236 if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == in cc_queues_status()
237 req_mgr_h->req_queue_tail) { in cc_queues_status()
239 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); in cc_queues_status()
243 if (req_mgr_h->q_free_slots >= total_seq_len) in cc_queues_status()
248 req_mgr_h->q_free_slots = in cc_queues_status()
250 if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) in cc_queues_status()
251 req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; in cc_queues_status()
253 if (req_mgr_h->q_free_slots >= total_seq_len) { in cc_queues_status()
259 req_mgr_h->q_free_slots, total_seq_len); in cc_queues_status()
263 req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE, in cc_queues_status()
264 req_mgr_h->q_free_slots, total_seq_len); in cc_queues_status()
284 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in cc_do_send_request() local
289 used_sw_slots = ((req_mgr_h->req_queue_head - in cc_do_send_request()
290 req_mgr_h->req_queue_tail) & in cc_do_send_request()
292 if (used_sw_slots > req_mgr_h->max_used_sw_slots) in cc_do_send_request()
293 req_mgr_h->max_used_sw_slots = used_sw_slots; in cc_do_send_request()
296 req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; in cc_do_send_request()
297 req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & in cc_do_send_request()
300 dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head); in cc_do_send_request()
314 enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1); in cc_do_send_request()
318 if (req_mgr_h->q_free_slots < total_seq_len) { in cc_do_send_request()
324 req_mgr_h->q_free_slots = 0; in cc_do_send_request()
327 req_mgr_h->q_free_slots -= total_seq_len; in cc_do_send_request()
505 struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; in send_request_init() local
511 rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len); in send_request_init()
526 req_mgr_h->q_free_slots = in send_request_init()