Home
last modified time | relevance | path

Searched refs:qid (Results 1 – 25 of 286) sorted by relevance

12345678910>>...12

/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/
Dhtb.c17 u16 qid; member
40 if (node->qid == MLX5E_QOS_QID_INNER) in mlx5e_htb_enumerate_leaves()
42 err = callback(data, node->qid, node->hw_id); in mlx5e_htb_enumerate_leaves()
70 mlx5e_htb_node_create_leaf(struct mlx5e_htb *htb, u16 classid, u16 qid, in mlx5e_htb_node_create_leaf() argument
81 node->qid = qid; in mlx5e_htb_node_create_leaf()
82 __set_bit(qid, htb->qos_used_qids); in mlx5e_htb_node_create_leaf()
100 node->qid = MLX5E_QOS_QID_INNER; in mlx5e_htb_node_create_root()
134 if (node->qid != MLX5E_QOS_QID_INNER) { in mlx5e_htb_node_delete()
135 __clear_bit(node->qid, htb->qos_used_qids); in mlx5e_htb_node_delete()
150 u16 qid; in mlx5e_htb_get_txq_by_classid() local
[all …]
Dqos.c38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) in mlx5e_qid_from_qos() argument
49 return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid; in mlx5e_qid_from_qos()
54 static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid) in mlx5e_get_qos_sq() argument
61 ix = qid % params->num_channels; in mlx5e_get_qos_sq()
62 qid /= params->num_channels; in mlx5e_get_qos_sq()
66 return mlx5e_state_dereference(priv, qos_sqs[qid]); in mlx5e_get_qos_sq()
76 int txq_ix, ix, qid, err = 0; in mlx5e_open_qos_sq() local
111 qid = node_qid / params->num_channels; in mlx5e_open_qos_sq()
135 rcu_assign_pointer(qos_sqs[qid], sq); in mlx5e_open_qos_sq()
157 u16 qid; in mlx5e_activate_qos_sq() local
[all …]
Dqos.h24 void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid);
25 void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid);
26 void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq);
27 void mlx5e_reset_qdisc(struct net_device *dev, u16 qid);
38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid);
/Linux-v6.1/drivers/net/ethernet/marvell/prestera/
Dprestera_pci.c231 static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid) in prestera_fw_evtq_len() argument
233 return fw->evt_queue[qid].len; in prestera_fw_evtq_len()
236 static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid) in prestera_fw_evtq_avail() argument
238 u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid)); in prestera_fw_evtq_avail()
239 u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid)); in prestera_fw_evtq_avail()
241 return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid)); in prestera_fw_evtq_avail()
245 u8 qid, u32 idx) in prestera_fw_evtq_rd_set() argument
247 u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1); in prestera_fw_evtq_rd_set()
249 prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx); in prestera_fw_evtq_rd_set()
252 static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid) in prestera_fw_evtq_buf() argument
[all …]
/Linux-v6.1/arch/s390/include/asm/
Dap.h83 static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) in ap_tapq() argument
95 : [qid] "d" (qid) in ap_tapq()
110 static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, in ap_test_queue() argument
115 qid |= 1UL << 23; /* set T bit*/ in ap_test_queue()
116 return ap_tapq(qid, info); in ap_test_queue()
125 static inline struct ap_queue_status ap_rapq(ap_qid_t qid) in ap_rapq() argument
127 unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */ in ap_rapq()
146 static inline struct ap_queue_status ap_zapq(ap_qid_t qid) in ap_zapq() argument
148 unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */ in ap_zapq()
234 static inline struct ap_queue_status ap_aqic(ap_qid_t qid, in ap_aqic() argument
[all …]
/Linux-v6.1/drivers/infiniband/hw/cxgb4/
Dresource.c111 u32 qid; in c4iw_get_cqid() local
119 qid = entry->qid; in c4iw_get_cqid()
122 qid = c4iw_get_resource(&rdev->resource.qid_table); in c4iw_get_cqid()
123 if (!qid) in c4iw_get_cqid()
126 rdev->stats.qid.cur += rdev->qpmask + 1; in c4iw_get_cqid()
128 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
132 entry->qid = i; in c4iw_get_cqid()
143 entry->qid = qid; in c4iw_get_cqid()
145 for (i = qid+1; i & rdev->qpmask; i++) { in c4iw_get_cqid()
149 entry->qid = i; in c4iw_get_cqid()
[all …]
/Linux-v6.1/drivers/nvme/host/
Dauth.c24 int qid; member
45 #define nvme_auth_flags_from_qid(qid) \ argument
46 (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
47 #define nvme_auth_queue_from_qid(ctrl, qid) \ argument
48 (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q
50 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, in nvme_auth_submit() argument
54 blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); in nvme_auth_submit()
55 struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); in nvme_auth_submit()
71 qid == 0 ? NVME_QID_ANY : qid, in nvme_auth_submit()
75 "qid %d auth_send failed with status %d\n", qid, ret); in nvme_auth_submit()
[all …]
Dtrace.h26 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
29 ((qid) ? \
53 __field(int, qid)
64 __entry->qid = nvme_req_qid(req);
77 __entry->qid, __entry->cid, __entry->nsid,
79 show_opcode_name(__entry->qid, __entry->opcode,
81 parse_nvme_cmd(__entry->qid, __entry->opcode,
91 __field(int, qid)
100 __entry->qid = nvme_req_qid(req);
110 __entry->qid, __entry->cid, __entry->result,
[all …]
/Linux-v6.1/drivers/scsi/lpfc/
Dlpfc_debugfs.h570 lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_wq_by_id() argument
575 if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid) in lpfc_debug_dump_wq_by_id()
578 pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); in lpfc_debug_dump_wq_by_id()
583 if (phba->sli4_hba.els_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id()
584 pr_err("ELS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id()
589 if (phba->sli4_hba.nvmels_wq->queue_id == qid) { in lpfc_debug_dump_wq_by_id()
590 pr_err("NVME LS WQ[Qid:%d]\n", qid); in lpfc_debug_dump_wq_by_id()
604 lpfc_debug_dump_mq_by_id(struct lpfc_hba *phba, int qid) in lpfc_debug_dump_mq_by_id() argument
606 if (phba->sli4_hba.mbx_wq->queue_id == qid) { in lpfc_debug_dump_mq_by_id()
607 printk(KERN_ERR "MBX WQ[Qid:%d]\n", qid); in lpfc_debug_dump_mq_by_id()
[all …]
/Linux-v6.1/drivers/nvme/target/
Dfabrics-cmd-auth.c21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); in nvmet_auth_expired_work()
41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
70 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate()
74 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
98 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate()
102 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
107 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate()
120 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_reply()
137 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply()
143 ctrl->cntlid, req->sq->qid); in nvmet_auth_reply()
[all …]
Dtrace.h28 #define parse_nvme_cmd(qid, opcode, fctype, cdw10) \ argument
31 (qid ? \
68 __field(int, qid)
81 __entry->qid = req->sq->qid;
95 __entry->qid, __entry->cid, __entry->nsid,
97 show_opcode_name(__entry->qid, __entry->opcode,
99 parse_nvme_cmd(__entry->qid, __entry->opcode,
109 __field(int, qid)
116 __entry->qid = req->cq->qid;
125 __entry->qid, __entry->cid, __entry->result, __entry->status)
Dfabrics-cmd.c140 u16 qid = le16_to_cpu(c->qid); in nvmet_install_queue() local
154 if (ctrl->sqs[qid] != NULL) { in nvmet_install_queue()
155 pr_warn("qid %u has already been created\n", qid); in nvmet_install_queue()
156 req->error_loc = offsetof(struct nvmf_connect_command, qid); in nvmet_install_queue()
176 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); in nvmet_install_queue()
177 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); in nvmet_install_queue()
188 qid, ctrl->cntlid, ret); in nvmet_install_queue()
189 ctrl->sqs[qid] = NULL; in nvmet_install_queue()
290 u16 qid = le16_to_cpu(c->qid); in nvmet_execute_io_connect() local
323 if (unlikely(qid > ctrl->subsys->max_qid)) { in nvmet_execute_io_connect()
[all …]
/Linux-v6.1/drivers/gpu/drm/amd/amdkfd/
Dkfd_process_queue_manager.c33 struct process_queue_manager *pqm, unsigned int qid) in get_queue_by_qid() argument
38 if ((pqn->q && pqn->q->properties.queue_id == qid) || in get_queue_by_qid()
39 (pqn->kq && pqn->kq->queue->properties.queue_id == qid)) in get_queue_by_qid()
47 unsigned int qid) in assign_queue_slot_by_qid() argument
49 if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) in assign_queue_slot_by_qid()
52 if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) { in assign_queue_slot_by_qid()
53 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid); in assign_queue_slot_by_qid()
61 unsigned int *qid) in find_available_queue_slot() argument
77 *qid = found; in find_available_queue_slot()
93 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, in pqm_set_gws() argument
[all …]
/Linux-v6.1/drivers/s390/crypto/
Dap_queue.c37 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind)); in ap_queue_enable_irq()
47 AP_QID_CARD(aq->qid), in ap_queue_enable_irq()
48 AP_QID_QUEUE(aq->qid)); in ap_queue_enable_irq()
71 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, in __ap_send() argument
75 qid |= 0x400000UL; in __ap_send()
76 return ap_nqap(qid, psmid, msg, length); in __ap_send()
79 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) in ap_send() argument
83 status = __ap_send(qid, psmid, msg, length, 0); in ap_send()
98 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) in ap_recv() argument
104 status = ap_dqap(qid, psmid, msg, length, NULL, NULL); in ap_recv()
[all …]
Dzcrypt_api.c683 int cpen, qpen, qid = 0, rc = -ENODEV; in zcrypt_rsa_modexpo() local
742 AP_QID_QUEUE(zq->queue->qid))) in zcrypt_rsa_modexpo()
746 tr->last_qid == zq->queue->qid) ? in zcrypt_rsa_modexpo()
766 qid = pref_zq->queue->qid; in zcrypt_rsa_modexpo()
777 tr->last_qid = qid; in zcrypt_rsa_modexpo()
780 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); in zcrypt_rsa_modexpo()
793 int cpen, qpen, qid = 0, rc = -ENODEV; in zcrypt_rsa_crt() local
852 AP_QID_QUEUE(zq->queue->qid))) in zcrypt_rsa_crt()
856 tr->last_qid == zq->queue->qid) ? in zcrypt_rsa_crt()
876 qid = pref_zq->queue->qid; in zcrypt_rsa_crt()
[all …]
Dzcrypt_msgtype6.c240 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); in icamex_msg_to_type6mex_msgx()
310 msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); in icacrt_msg_to_type6crt_msgx()
633 __func__, AP_QID_CARD(zq->queue->qid), in convert_type86_ica()
634 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica()
640 AP_QID_CARD(zq->queue->qid), in convert_type86_ica()
641 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica()
644 __func__, AP_QID_CARD(zq->queue->qid), in convert_type86_ica()
645 AP_QID_QUEUE(zq->queue->qid), in convert_type86_ica()
805 AP_QID_CARD(zq->queue->qid), in convert_response_ica()
806 AP_QID_QUEUE(zq->queue->qid), in convert_response_ica()
[all …]
/Linux-v6.1/drivers/net/ethernet/intel/ixgbe/
Dixgbe_xsk.c15 int qid = ring->ring_idx; in ixgbe_xsk_pool() local
17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) in ixgbe_xsk_pool()
20 return xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool()
25 u16 qid) in ixgbe_xsk_pool_enable() argument
31 if (qid >= adapter->num_rx_queues) in ixgbe_xsk_pool_enable()
34 if (qid >= netdev->real_num_rx_queues || in ixgbe_xsk_pool_enable()
35 qid >= netdev->real_num_tx_queues) in ixgbe_xsk_pool_enable()
46 ixgbe_txrx_ring_disable(adapter, qid); in ixgbe_xsk_pool_enable()
48 set_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable()
51 ixgbe_txrx_ring_enable(adapter, qid); in ixgbe_xsk_pool_enable()
[all …]
/Linux-v6.1/include/linux/
Dquota.h79 extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
80 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
81 extern bool qid_valid(struct kqid qid);
98 enum quota_type type, qid_t qid) in make_kqid() argument
105 kqid.uid = make_kuid(from, qid); in make_kqid()
108 kqid.gid = make_kgid(from, qid); in make_kqid()
111 kqid.projid = make_kprojid(from, qid); in make_kqid()
187 static inline bool qid_has_mapping(struct user_namespace *ns, struct kqid qid) in qid_has_mapping() argument
189 return from_kqid(ns, qid) != (qid_t) -1; in qid_has_mapping()
318 …int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structu…
[all …]
/Linux-v6.1/fs/xfs/
Dxfs_quotaops.c216 struct kqid qid, in xfs_fs_get_dqblk() argument
225 id = from_kqid(&init_user_ns, qid); in xfs_fs_get_dqblk()
226 return xfs_qm_scall_getquota(mp, id, xfs_quota_type(qid.type), qdq); in xfs_fs_get_dqblk()
233 struct kqid *qid, in xfs_fs_get_nextdqblk() argument
243 id = from_kqid(&init_user_ns, *qid); in xfs_fs_get_nextdqblk()
244 ret = xfs_qm_scall_getquota_next(mp, &id, xfs_quota_type(qid->type), in xfs_fs_get_nextdqblk()
250 *qid = make_kqid(current_user_ns(), qid->type, id); in xfs_fs_get_nextdqblk()
257 struct kqid qid, in xfs_fs_set_dqblk() argument
267 return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), in xfs_fs_set_dqblk()
268 xfs_quota_type(qid.type), qdq); in xfs_fs_set_dqblk()
/Linux-v6.1/drivers/vdpa/alibaba/
Deni_vdpa.c257 static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_get_vq_state() argument
263 static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_state() argument
273 if (!vp_legacy_get_queue_enable(ldev, qid) in eni_vdpa_set_vq_state()
281 static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_cb() argument
286 eni_vdpa->vring[qid].cb = *cb; in eni_vdpa_set_vq_cb()
289 static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_ready() argument
299 vp_legacy_set_queue_address(ldev, qid, 0); in eni_vdpa_set_vq_ready()
302 static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) in eni_vdpa_get_vq_ready() argument
306 return vp_legacy_get_queue_enable(ldev, qid); in eni_vdpa_get_vq_ready()
309 static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, in eni_vdpa_set_vq_num() argument
[all …]
/Linux-v6.1/drivers/vdpa/virtio_pci/
Dvp_vdpa.c244 static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid, in vp_vdpa_get_vq_state() argument
279 static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid, in vp_vdpa_set_vq_state() argument
289 !vp_modern_get_queue_enable(mdev, qid)) { in vp_vdpa_set_vq_state()
300 static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid, in vp_vdpa_set_vq_cb() argument
305 vp_vdpa->vring[qid].cb = *cb; in vp_vdpa_set_vq_cb()
309 u16 qid, bool ready) in vp_vdpa_set_vq_ready() argument
313 vp_modern_set_queue_enable(mdev, qid, ready); in vp_vdpa_set_vq_ready()
316 static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid) in vp_vdpa_get_vq_ready() argument
320 return vp_modern_get_queue_enable(mdev, qid); in vp_vdpa_get_vq_ready()
323 static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid, in vp_vdpa_set_vq_num() argument
[all …]
/Linux-v6.1/drivers/net/ethernet/intel/i40e/
Di40e_xsk.c87 u16 qid) in i40e_xsk_pool_enable() argument
96 if (qid >= vsi->num_queue_pairs) in i40e_xsk_pool_enable()
99 if (qid >= netdev->real_num_rx_queues || in i40e_xsk_pool_enable()
100 qid >= netdev->real_num_tx_queues) in i40e_xsk_pool_enable()
107 set_bit(qid, vsi->af_xdp_zc_qps); in i40e_xsk_pool_enable()
112 err = i40e_queue_pair_disable(vsi, qid); in i40e_xsk_pool_enable()
116 err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); in i40e_xsk_pool_enable()
120 err = i40e_queue_pair_enable(vsi, qid); in i40e_xsk_pool_enable()
125 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); in i40e_xsk_pool_enable()
141 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) in i40e_xsk_pool_disable() argument
[all …]
/Linux-v6.1/fs/quota/
Dkqid.c120 bool qid_valid(struct kqid qid) in qid_valid() argument
122 switch (qid.type) { in qid_valid()
124 return uid_valid(qid.uid); in qid_valid()
126 return gid_valid(qid.gid); in qid_valid()
128 return projid_valid(qid.projid); in qid_valid()
/Linux-v6.1/drivers/net/wireless/mediatek/mt76/
Dtx.c281 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb, in __mt76_tx_queue_skb() argument
286 struct mt76_queue *q = phy->q_tx[qid]; in __mt76_tx_queue_skb()
293 idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta); in __mt76_tx_queue_skb()
318 int qid = skb_get_queue_mapping(skb); in mt76_tx() local
325 if (WARN_ON(qid >= MT_TXQ_PSD)) { in mt76_tx()
326 qid = MT_TXQ_BE; in mt76_tx()
327 skb_set_queue_mapping(skb, qid); in mt76_tx()
334 qid = MT_TXQ_PSD; in mt76_tx()
342 q = phy->q_tx[qid]; in mt76_tx()
345 __mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL); in mt76_tx()
[all …]
/Linux-v6.1/net/9p/
Dclient.c1085 struct p9_qid qid; in p9_client_attach() local
1103 err = p9pdu_readf(&req->rc, clnt->proto_version, "Q", &qid); in p9_client_attach()
1111 qid.type, qid.path, qid.version); in p9_client_attach()
1113 memmove(&fid->qid, &qid, sizeof(struct p9_qid)); in p9_client_attach()
1181 memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid)); in p9_client_walk()
1183 memmove(&fid->qid, &oldfid->qid, sizeof(struct p9_qid)); in p9_client_walk()
1206 struct p9_qid qid; in p9_client_open() local
1226 err = p9pdu_readf(&req->rc, clnt->proto_version, "Qd", &qid, &iounit); in p9_client_open()
1233 p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type, in p9_client_open()
1234 qid.path, qid.version, iounit); in p9_client_open()
[all …]

12345678910>>...12