Home
last modified time | relevance | path

Searched refs:cmdq (Results 1 – 25 of 30) sorted by relevance

12

/Linux-v4.19/drivers/mailbox/
Dmtk-cmdq-mailbox.c62 struct cmdq *cmdq; member
69 struct cmdq { struct
79 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) in cmdq_thread_suspend() argument
91 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", in cmdq_thread_suspend()
92 (u32)(thread->base - cmdq->base)); in cmdq_thread_suspend()
104 static void cmdq_init(struct cmdq *cmdq) in cmdq_init() argument
106 WARN_ON(clk_enable(cmdq->clock) < 0); in cmdq_init()
107 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); in cmdq_init()
108 clk_disable(cmdq->clock); in cmdq_init()
111 static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread) in cmdq_thread_reset() argument
[all …]
DMakefile46 obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
/Linux-v4.19/drivers/crypto/cavium/nitrox/
Dnitrox_lib.c23 static int cmdq_common_init(struct nitrox_cmdq *cmdq) in cmdq_common_init() argument
25 struct nitrox_device *ndev = cmdq->ndev; in cmdq_common_init()
28 qsize = (ndev->qlen) * cmdq->instr_size; in cmdq_common_init()
29 cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev), in cmdq_common_init()
31 &cmdq->dma_unaligned, in cmdq_common_init()
33 if (!cmdq->head_unaligned) in cmdq_common_init()
36 cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); in cmdq_common_init()
37 cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); in cmdq_common_init()
38 cmdq->qsize = (qsize + PKT_IN_ALIGN); in cmdq_common_init()
39 cmdq->write_idx = 0; in cmdq_common_init()
[all …]
Dnitrox_reqmgr.c381 struct nitrox_cmdq *cmdq) in backlog_list_add() argument
385 spin_lock_bh(&cmdq->backlog_lock); in backlog_list_add()
386 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add()
387 atomic_inc(&cmdq->backlog_count); in backlog_list_add()
389 spin_unlock_bh(&cmdq->backlog_lock); in backlog_list_add()
393 struct nitrox_cmdq *cmdq) in response_list_add() argument
397 spin_lock_bh(&cmdq->response_lock); in response_list_add()
398 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add()
399 spin_unlock_bh(&cmdq->response_lock); in response_list_add()
403 struct nitrox_cmdq *cmdq) in response_list_del() argument
[all …]
Dnitrox_dev.h83 struct nitrox_cmdq *cmdq; member
Dnitrox_hal.c120 struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; in nitrox_config_pkt_input_rings() local
130 nitrox_write_csr(ndev, offset, cmdq->dma); in nitrox_config_pkt_input_rings()
Dnitrox_req.h433 struct nitrox_cmdq *cmdq; member
Dnitrox_isr.c337 bh->cmdq = &ndev->pkt_cmdqs[i]; in nitrox_setup_pkt_slc_bh()
/Linux-v4.19/drivers/net/ethernet/brocade/bna/
Dbfa_msgq.c39 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
40 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
51 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
52 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
53 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
54 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
58 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) in cmdq_sm_stopped_entry() argument
62 cmdq->producer_index = 0; in cmdq_sm_stopped_entry()
63 cmdq->consumer_index = 0; in cmdq_sm_stopped_entry()
64 cmdq->flags = 0; in cmdq_sm_stopped_entry()
[all …]
Dbfa_msgq.h112 struct bfa_msgq_cmdq cmdq; member
Dbfi.h428 struct bfi_msgq cmdq; member
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_cmdq.c87 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument
88 struct hinic_cmdqs, cmdq[0])
334 static void cmdq_set_db(struct hinic_cmdq *cmdq, in cmdq_set_db() argument
346 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); in cmdq_set_db()
349 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, in cmdq_sync_cmd_direct_resp() argument
357 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp()
362 spin_lock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp()
367 spin_unlock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp()
373 wrapped = cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
378 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
[all …]
Dhinic_hw_io.c451 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local
482 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init()
490 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init()
504 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init()
523 enum hinic_cmdq_type cmdq; in hinic_io_free() local
527 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free()
528 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); in hinic_io_free()
Dhinic_hw_cmdq.h166 struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; member
/Linux-v4.19/drivers/infiniband/hw/bnxt_re/
Dqplib_rcfw.c85 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; in __send_message() local
116 spin_lock_irqsave(&cmdq->lock, flags); in __send_message()
117 if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { in __send_message()
119 spin_unlock_irqrestore(&cmdq->lock, flags); in __send_message()
133 spin_unlock_irqrestore(&cmdq->lock, flags); in __send_message()
148 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; in __send_message()
153 sw_prod = HWQ_CMP(cmdq->prod, cmdq); in __send_message()
165 cmdq->prod++; in __send_message()
171 cmdq_prod = cmdq->prod; in __send_message()
189 spin_unlock_irqrestore(&cmdq->lock, flags); in __send_message()
[all …]
Dqplib_rcfw.h189 struct bnxt_qplib_hwq cmdq; member
/Linux-v4.19/drivers/atm/
Dfore200e.c615 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_pca_prom_read() local
616 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_pca_prom_read()
621 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_pca_prom_read()
1317 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_activate_vcin() local
1318 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_activate_vcin()
1325 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_activate_vcin()
1743 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_getstats() local
1744 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_getstats()
1758 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_getstats()
1812 struct host_cmdq* cmdq = &fore200e->host_cmdq;
[all …]
/Linux-v4.19/drivers/iommu/
Darm-smmu-v3.c564 struct arm_smmu_cmdq cmdq; member
860 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_skip_err()
901 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_insert_cmd()
922 spin_lock_irqsave(&smmu->cmdq.lock, flags); in arm_smmu_cmdq_issue_cmd()
924 spin_unlock_irqrestore(&smmu->cmdq.lock, flags); in arm_smmu_cmdq_issue_cmd()
958 spin_lock_irqsave(&smmu->cmdq.lock, flags); in __arm_smmu_cmdq_issue_sync_msi()
960 spin_unlock_irqrestore(&smmu->cmdq.lock, flags); in __arm_smmu_cmdq_issue_sync_msi()
975 spin_lock_irqsave(&smmu->cmdq.lock, flags); in __arm_smmu_cmdq_issue_sync()
977 ret = queue_poll_cons(&smmu->cmdq.q, true, wfe); in __arm_smmu_cmdq_issue_sync()
978 spin_unlock_irqrestore(&smmu->cmdq.lock, flags); in __arm_smmu_cmdq_issue_sync()
[all …]
/Linux-v4.19/Documentation/devicetree/bindings/iommu/
Darm,smmu-v3.txt27 * "cmdq-sync" - CMD_SYNC complete
73 interrupt-names = "eventq", "priq", "cmdq-sync", "gerror";
/Linux-v4.19/drivers/scsi/aacraid/
Ddpcsup.c215 list_add_tail(&fib->fiblink, &q->cmdq); in aac_command_normal()
326 list_add_tail(&fib->fiblink, &q->cmdq); in aac_intr_normal()
Dcomminit.c278 INIT_LIST_HEAD(&q->cmdq); in aac_queue_init()
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb/
Dsge.c1436 struct cmdQ *cmdq = &sge->cmdQ[0]; in update_tx_info() local
1438 cmdq->processed += pr0; in update_tx_info()
1444 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status); in update_tx_info()
1446 if (cmdq->cleaned + cmdq->in_use != cmdq->processed && in update_tx_info()
1447 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) { in update_tx_info()
1448 set_bit(CMDQ_STAT_RUNNING, &cmdq->status); in update_tx_info()
/Linux-v4.19/drivers/net/wireless/intel/iwlwifi/pcie/
Dtrans.c2952 struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; in iwl_trans_pcie_dump_data() local
2967 cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); in iwl_trans_pcie_dump_data()
3076 spin_lock_bh(&cmdq->lock); in iwl_trans_pcie_dump_data()
3077 ptr = cmdq->write_ptr; in iwl_trans_pcie_dump_data()
3078 for (i = 0; i < cmdq->n_window; i++) { in iwl_trans_pcie_dump_data()
3079 u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); in iwl_trans_pcie_dump_data()
3083 cmdq->tfds + in iwl_trans_pcie_dump_data()
3091 memcpy(txcmd->data, cmdq->entries[idx].cmd, in iwl_trans_pcie_dump_data()
3098 spin_unlock_bh(&cmdq->lock); in iwl_trans_pcie_dump_data()
/Linux-v4.19/drivers/scsi/bfa/
Dbfi.h600 struct bfi_msgq_s cmdq; member
/Linux-v4.19/drivers/staging/wlan-ng/
Dhfa384x.h1269 wait_queue_head_t cmdq; /* wait queue itself */ member

12