Home
last modified time | relevance | path

Searched refs:cmd_q (Results 1 – 25 of 29) sorted by relevance

12

/Linux-v5.10/drivers/crypto/ccp/
Dccp-dev-v5.c25 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) in ccp_lsb_alloc() argument
31 if (cmd_q->lsb >= 0) { in ccp_lsb_alloc()
32 start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, in ccp_lsb_alloc()
36 bitmap_set(cmd_q->lsbmap, start, count); in ccp_lsb_alloc()
37 return start + cmd_q->lsb * LSB_SIZE; in ccp_lsb_alloc()
42 ccp = cmd_q->ccp; in ccp_lsb_alloc()
70 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, in ccp_lsb_free() argument
76 if (cmd_q->lsb == start) { in ccp_lsb_free()
78 bitmap_clear(cmd_q->lsbmap, start, count); in ccp_lsb_free()
81 struct ccp_device *ccp = cmd_q->ccp; in ccp_lsb_free()
[all …]
Dccp-dev-v3.c19 static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count) in ccp_alloc_ksb() argument
22 struct ccp_device *ccp = cmd_q->ccp; in ccp_alloc_ksb()
50 static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, in ccp_free_ksb() argument
53 struct ccp_device *ccp = cmd_q->ccp; in ccp_free_ksb()
69 static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q) in ccp_get_free_slots() argument
71 return CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); in ccp_get_free_slots()
76 struct ccp_cmd_queue *cmd_q = op->cmd_q; in ccp_do_cmd() local
77 struct ccp_device *ccp = cmd_q->ccp; in ccp_do_cmd()
87 cmd_q->free_slots--; in ccp_do_cmd()
89 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) in ccp_do_cmd()
[all …]
Dccp-debugfs.c115 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; in ccp5_debugfs_stats_read() local
117 total_ops += cmd_q->total_ops; in ccp5_debugfs_stats_read()
118 total_aes_ops += cmd_q->total_aes_ops; in ccp5_debugfs_stats_read()
119 total_xts_aes_ops += cmd_q->total_xts_aes_ops; in ccp5_debugfs_stats_read()
120 total_3des_ops += cmd_q->total_3des_ops; in ccp5_debugfs_stats_read()
121 total_sha_ops += cmd_q->total_sha_ops; in ccp5_debugfs_stats_read()
122 total_rsa_ops += cmd_q->total_rsa_ops; in ccp5_debugfs_stats_read()
123 total_pt_ops += cmd_q->total_pt_ops; in ccp5_debugfs_stats_read()
124 total_ecc_ops += cmd_q->total_ecc_ops; in ccp5_debugfs_stats_read()
158 static void ccp5_debugfs_reset_queue_stats(struct ccp_cmd_queue *cmd_q) in ccp5_debugfs_reset_queue_stats() argument
[all …]
Dccp-ops.c153 struct ccp_cmd_queue *cmd_q, in ccp_init_dm_workarea() argument
162 wa->dev = cmd_q->ccp->dev; in ccp_init_dm_workarea()
166 wa->dma_pool = cmd_q->dma_pool; in ccp_init_dm_workarea()
262 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) in ccp_free_data() argument
268 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, in ccp_init_data() argument
277 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, in ccp_init_data()
282 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); in ccp_init_data()
289 ccp_free_data(data, cmd_q); in ccp_init_data()
426 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, in ccp_copy_to_from_sb() argument
434 op.cmd_q = cmd_q; in ccp_copy_to_from_sb()
[all …]
Dccp-dev.c327 if (ccp->cmd_q[i].active) in ccp_enqueue_cmd()
339 wake_up_process(ccp->cmd_q[i].kthread); in ccp_enqueue_cmd()
361 if (ccp->cmd_q[i].active) in ccp_do_cmd_backlog()
371 wake_up_process(ccp->cmd_q[i].kthread); in ccp_do_cmd_backlog()
374 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) in ccp_dequeue_cmd() argument
376 struct ccp_device *ccp = cmd_q->ccp; in ccp_dequeue_cmd()
383 cmd_q->active = 0; in ccp_dequeue_cmd()
386 cmd_q->suspended = 1; in ccp_dequeue_cmd()
395 cmd_q->active = 1; in ccp_dequeue_cmd()
436 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; in ccp_cmd_queue_thread() local
[all …]
Dccp-dev.h380 struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; member
532 struct ccp_cmd_queue *cmd_q; member
641 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
/Linux-v5.10/drivers/scsi/ibmvscsi_tgt/
Dibmvscsi_tgt.c277 bytes = vscsi->cmd_q.size * PAGE_SIZE; in ibmvscsis_free_command_q()
278 memset(vscsi->cmd_q.base_addr, 0, bytes); in ibmvscsis_free_command_q()
279 vscsi->cmd_q.index = 0; in ibmvscsis_free_command_q()
362 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, in ibmvscsis_check_init_msg()
363 vscsi->cmd_q.base_addr); in ibmvscsis_check_init_msg()
376 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, in ibmvscsis_check_init_msg()
377 &vscsi->cmd_q.index, in ibmvscsis_check_init_msg()
378 vscsi->cmd_q.base_addr); in ibmvscsis_check_init_msg()
895 bytes = vscsi->cmd_q.size * PAGE_SIZE; in ibmvscsis_reset_queue()
897 vscsi->cmd_q.crq_token, bytes); in ibmvscsis_reset_queue()
[all …]
Dibmvscsi_tgt.h268 struct cmd_queue cmd_q; member
/Linux-v5.10/drivers/platform/olpc/
Dolpc-ec.c44 struct list_head cmd_q; member
86 if (!list_empty(&ec->cmd_q)) { in olpc_ec_worker()
87 desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node); in olpc_ec_worker()
121 list_add_tail(&desc->node, &ec->cmd_q); in queue_ec_descriptor()
421 INIT_LIST_HEAD(&ec->cmd_q); in olpc_ec_probe()
/Linux-v5.10/net/bluetooth/
Dhci_request.c39 skb_queue_head_init(&req->cmd_q); in hci_req_init()
46 skb_queue_purge(&req->cmd_q); in hci_req_purge()
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q)); in req_run()
67 skb_queue_purge(&req->cmd_q); in req_run()
72 if (skb_queue_empty(&req->cmd_q)) in req_run()
75 skb = skb_peek_tail(&req->cmd_q); in req_run()
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags); in req_run()
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); in req_run()
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); in req_run()
334 if (skb_queue_empty(&req->cmd_q)) in hci_req_add_ev()
[all …]
Dhci_core.c1606 skb_queue_purge(&hdev->cmd_q); in hci_dev_do_open()
1783 skb_queue_purge(&hdev->cmd_q); in hci_dev_do_close()
1797 skb_queue_purge(&hdev->cmd_q); in hci_dev_do_close()
1868 skb_queue_purge(&hdev->cmd_q); in hci_dev_do_reset()
3669 skb_queue_head_init(&hdev->cmd_q); in hci_alloc_dev()
4060 skb_queue_tail(&hdev->cmd_q, skb); in hci_send_cmd()
4789 skb = skb_peek(&hdev->cmd_q); in hci_req_is_complete()
4814 skb_queue_head(&hdev->cmd_q, skb); in hci_resend_last()
4867 spin_lock_irqsave(&hdev->cmd_q.lock, flags); in hci_req_cmd_complete()
4868 while ((skb = __skb_dequeue(&hdev->cmd_q))) { in hci_req_cmd_complete()
[all …]
Dhci_request.h30 struct sk_buff_head cmd_q; member
Dhci_sock.c1812 skb_queue_tail(&hdev->cmd_q, skb); in hci_sock_sendmsg()
/Linux-v5.10/net/nfc/nci/
Dcore.c520 skb_queue_purge(&ndev->cmd_q); in nci_open_device()
554 skb_queue_purge(&ndev->cmd_q); in nci_close_device()
1219 skb_queue_head_init(&ndev->cmd_q); in nci_register_device()
1342 skb_queue_tail(&ndev->cmd_q, skb); in nci_send_cmd()
1517 skb = skb_dequeue(&ndev->cmd_q); in nci_cmd_work()
Drsp.c367 if (!skb_queue_empty(&ndev->cmd_q)) in nci_rsp_packet()
/Linux-v5.10/drivers/staging/unisys/include/
Diochannel.h551 struct signal_queue_header cmd_q; member
/Linux-v5.10/drivers/scsi/
Dsg.c148 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ member
726 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ in sg_new_write()
1054 sfp->cmd_q = val ? 1 : 0; in sg_ioctl_common()
1057 return put_user((int) sfp->cmd_q, ip); in sg_ioctl_common()
1217 else if (!sfp->cmd_q) { in sg_poll()
2125 if (!sfp->cmd_q) in sg_add_request()
2184 sfp->cmd_q = SG_DEF_COMMAND_Q; in sg_add_sfp()
2554 (int) fp->cmd_q, (int) fp->force_packid, in sg_proc_debug_helper()
/Linux-v5.10/drivers/net/ethernet/brocade/bna/
Dbfa_ioc.c2120 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach()
2140 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll()
2153 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); in bfa_ioc_mbox_poll()
2175 while (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_flush()
2176 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); in bfa_ioc_mbox_flush()
2662 if (!list_empty(&mod->cmd_q)) { in bfa_nw_ioc_mbox_queue()
2663 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_nw_ioc_mbox_queue()
2672 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_nw_ioc_mbox_queue()
Dbfa_ioc.h109 struct list_head cmd_q; /*!< pending mbox queue */ member
/Linux-v5.10/drivers/net/wireless/intel/iwlwifi/mvm/
Dutils.c921 bool tdls, bool cmd_q) in iwl_mvm_get_wd_timeout() argument
925 unsigned int default_timeout = cmd_q ? in iwl_mvm_get_wd_timeout()
947 if (cmd_q) in iwl_mvm_get_wd_timeout()
Dmvm.h2078 bool tdls, bool cmd_q);
/Linux-v5.10/include/net/nfc/
Dnci_core.h224 struct sk_buff_head cmd_q; member
/Linux-v5.10/drivers/scsi/bfa/
Dbfa_ioc.c82 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
1981 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach()
2001 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll()
2014 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_poll()
2027 while (!list_empty(&mod->cmd_q)) in bfa_ioc_mbox_flush()
2028 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_flush()
2568 if (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_queue()
2569 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
2578 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
Dbfa_ioc.h238 struct list_head cmd_q; /* pending mbox queue */ member
/Linux-v5.10/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_83xx_hw.c3888 struct list_head *head = &mbx->cmd_q; in qlcnic_83xx_flush_mbx_queue()
4010 list_add_tail(&cmd->list, &mbx->cmd_q); in qlcnic_83xx_enqueue_mbx_cmd()
4095 struct list_head *head = &mbx->cmd_q; in qlcnic_83xx_mailbox_worker()
4168 INIT_LIST_HEAD(&mbx->cmd_q); in qlcnic_83xx_init_mailbox_work()

12