Home
last modified time | relevance | path

Searched refs:nreq (Results 1 – 25 of 25) sorted by relevance

/Linux-v4.19/arch/ia64/include/asm/
Dperfmon.h87 extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *re…
88 extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *r…
89 extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg…
90 extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg…
/Linux-v4.19/crypto/
Dechainiv.c50 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in echainiv_encrypt()
52 skcipher_request_set_tfm(nreq, ctx->sknull); in echainiv_encrypt()
53 skcipher_request_set_callback(nreq, req->base.flags, in echainiv_encrypt()
55 skcipher_request_set_crypt(nreq, req->src, req->dst, in echainiv_encrypt()
59 err = crypto_skcipher_encrypt(nreq); in echainiv_encrypt()
Dseqiv.c76 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in seqiv_aead_encrypt()
78 skcipher_request_set_tfm(nreq, ctx->sknull); in seqiv_aead_encrypt()
79 skcipher_request_set_callback(nreq, req->base.flags, in seqiv_aead_encrypt()
81 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt()
85 err = crypto_skcipher_encrypt(nreq); in seqiv_aead_encrypt()
Dgcm.c1070 SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst()
1072 skcipher_request_set_tfm(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst()
1073 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); in crypto_rfc4543_copy_src_to_dst()
1074 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); in crypto_rfc4543_copy_src_to_dst()
1076 return crypto_skcipher_encrypt(nreq); in crypto_rfc4543_copy_src_to_dst()
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_qp.c1557 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument
1564 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow()
1572 return cur + nreq >= wq->max; in mthca_wq_overflow()
1623 int nreq; in mthca_tavor_post_send() local
1644 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1645 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send()
1649 qp->sq.max, nreq); in mthca_tavor_post_send()
1778 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send()
1782 if (!nreq) { in mthca_tavor_post_send()
1795 if (likely(nreq)) { in mthca_tavor_post_send()
[all …]
Dmthca_srq.c485 int nreq; in mthca_tavor_post_srq_recv() local
494 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
535 ++nreq; in mthca_tavor_post_srq_recv()
536 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv()
537 nreq = 0; in mthca_tavor_post_srq_recv()
553 if (likely(nreq)) { in mthca_tavor_post_srq_recv()
560 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
584 int nreq; in mthca_arbel_post_srq_recv() local
590 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
625 if (likely(nreq)) { in mthca_arbel_post_srq_recv()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dsrq.c318 int nreq; in mlx4_ib_post_srq_recv() local
326 nreq = 0; in mlx4_ib_post_srq_recv()
330 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
362 if (likely(nreq)) { in mlx4_ib_post_srq_recv()
363 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
Dqp.c3197 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx4_wq_overflow() argument
3203 if (likely(cur + nreq < wq->max_post)) in mlx4_wq_overflow()
3211 return cur + nreq >= wq->max_post; in mlx4_wq_overflow()
3449 int nreq; in _mlx4_ib_post_send() local
3485 nreq = 0; in _mlx4_ib_post_send()
3491 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_send()
3495 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send()
3508 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send()
3735 if (likely(nreq)) { in _mlx4_ib_post_send()
3736 qp->sq.head += nreq; in _mlx4_ib_post_send()
[all …]
/Linux-v4.19/fs/nfs/
Dpnfs_nfs.c194 unsigned int nreq = 0; in pnfs_generic_alloc_ds_commits() local
206 nreq++; in pnfs_generic_alloc_ds_commits()
211 return nreq; in pnfs_generic_alloc_ds_commits()
267 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local
273 nreq++; in pnfs_generic_commit_pagelist()
276 nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); in pnfs_generic_commit_pagelist()
278 if (nreq == 0) in pnfs_generic_commit_pagelist()
281 atomic_add(nreq, &cinfo->mds->rpcs_out); in pnfs_generic_commit_pagelist()
/Linux-v4.19/drivers/crypto/inside-secure/
Dsafexcel.c509 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local
549 nreq++; in safexcel_dequeue()
560 if (!nreq) in safexcel_dequeue()
565 priv->ring[ring].requests += nreq; in safexcel_dequeue()
689 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local
695 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor()
696 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor()
697 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor()
698 if (!nreq) in safexcel_handle_result_descriptor()
701 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dsrq.c459 int nreq; in mlx5_ib_post_srq_recv() local
470 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv()
502 if (likely(nreq)) { in mlx5_ib_post_srq_recv()
503 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
Dqp.c3485 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5_wq_overflow() argument
3491 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow()
3499 return cur + nreq >= wq->max_post; in mlx5_wq_overflow()
4300 int *size, int nreq, bool send_signaled, bool solicited) in __begin_wqe() argument
4302 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in __begin_wqe()
4323 int *size, int nreq) in begin_wqe() argument
4325 return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq, in begin_wqe()
4333 int nreq, u8 fence, u32 mlx5_opcode) in finish_wqe() argument
4346 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe()
4369 int nreq; in _mlx5_ib_post_send() local
[all …]
/Linux-v4.19/drivers/dma/
Dbcm-sba-raid.c298 struct sba_request *nreq; in sba_free_chained_requests() local
304 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests()
305 _sba_free_request(sba, nreq); in sba_free_chained_requests()
421 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local
443 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request()
444 _sba_free_request(sba, nreq); in sba_process_received_request()
529 struct sba_request *req, *nreq; in sba_tx_submit() local
541 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit()
542 _sba_pending_request(sba, nreq); in sba_tx_submit()
/Linux-v4.19/fs/nilfs2/
Dbtree.c1728 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() argument
1751 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert()
1752 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert()
1753 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1757 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert()
1770 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1784 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() argument
1802 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert()
1804 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert()
1820 tmpptr = nreq->bpr_ptr; in nilfs_btree_commit_convert_and_insert()
[all …]
/Linux-v4.19/drivers/infiniband/hw/hns/
Dhns_roce_hw_v2.c195 int nreq; in hns_roce_v2_post_send() local
217 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send()
218 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send()
233 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = in hns_roce_v2_post_send()
237 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send()
512 if (likely(nreq)) { in hns_roce_v2_post_send()
513 qp->sq.head += nreq; in hns_roce_v2_post_send()
568 int nreq; in hns_roce_v2_post_recv() local
581 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_recv()
582 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v2_post_recv()
[all …]
Dhns_roce_hw_v1.c77 int nreq = 0; in hns_roce_v1_post_send() local
92 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_send()
93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v1_post_send()
108 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = in hns_roce_v1_post_send()
320 if (likely(nreq)) { in hns_roce_v1_post_send()
321 qp->sq.head += nreq; in hns_roce_v1_post_send()
355 int nreq = 0; in hns_roce_v1_post_recv() local
371 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v1_post_recv()
372 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v1_post_recv()
405 if (likely(nreq)) { in hns_roce_v1_post_recv()
[all …]
Dhns_roce_qp.c1086 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, in hns_roce_wq_overflow() argument
1093 if (likely(cur + nreq < hr_wq->max_post)) in hns_roce_wq_overflow()
1101 return cur + nreq >= hr_wq->max_post; in hns_roce_wq_overflow()
Dhns_roce_device.h993 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx4/
Dmain.c2927 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local
2933 nreq = min_t(int, nreq, msi_x); in mlx4_enable_msi_x()
2935 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x()
2939 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x()
2942 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x()
2943 nreq); in mlx4_enable_msi_x()
2945 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x()
2950 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
/Linux-v4.19/arch/ia64/kernel/
Dperfmon.c3380 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_pmcs() argument
3396 return pfm_write_pmcs(ctx, req, nreq, regs); in pfm_mod_write_pmcs()
3401 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_read_pmds() argument
3417 return pfm_read_pmds(ctx, req, nreq, regs); in pfm_mod_read_pmds()
3881 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_ibrs() argument
3897 return pfm_write_ibrs(ctx, req, nreq, regs); in pfm_mod_write_ibrs()
3902 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_dbrs() argument
3918 return pfm_write_dbrs(ctx, req, nreq, regs); in pfm_mod_write_dbrs()
/Linux-v4.19/drivers/usb/isp1760/
Disp1760-udc.c772 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local
798 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
/Linux-v4.19/drivers/infiniband/sw/rdmavt/
Dqp.c1901 unsigned nreq = 0; in rvt_post_send() local
1928 nreq++; in rvt_post_send()
1932 if (nreq) { in rvt_post_send()
/Linux-v4.19/drivers/net/ethernet/neterion/vxge/
Dvxge-config.c2324 u32 nreq = 0, i; in __vxge_hw_blockpool_blocks_add() local
2328 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; in __vxge_hw_blockpool_blocks_add()
2329 blockpool->req_out += nreq; in __vxge_hw_blockpool_blocks_add()
2332 for (i = 0; i < nreq; i++) in __vxge_hw_blockpool_blocks_add()
/Linux-v4.19/fs/ceph/
Dmds_client.c2898 struct ceph_mds_request *req, *nreq; in replay_unsafe_requests() local
2905 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { in replay_unsafe_requests()
/Linux-v4.19/drivers/nvme/host/
Dfc.c84 struct nvme_request nreq; /* member