Home
last modified time | relevance | path

Searched refs:wc (Results 1 – 25 of 204) sorted by relevance

123456789

/Linux-v5.4/drivers/md/
Ddm-writecache.c98 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode) argument
99 #define WC_MODE_FUA(wc) ((wc)->writeback_fua) argument
101 #define WC_MODE_PMEM(wc) false argument
102 #define WC_MODE_FUA(wc) false argument
104 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc)) argument
190 struct dm_writecache *wc; member
199 struct dm_writecache *wc; member
208 static void wc_lock(struct dm_writecache *wc) in wc_lock() argument
210 mutex_lock(&wc->lock); in wc_lock()
213 static void wc_unlock(struct dm_writecache *wc) in wc_unlock() argument
[all …]
/Linux-v5.4/include/math-emu/
Dop-common.h27 #define _FP_DECL(wc, X) \ argument
29 _FP_FRAC_DECL_##wc(X)
36 #define _FP_UNPACK_CANONICAL(fs, wc, X) \ argument
42 _FP_FRAC_SLL_##wc(X, _FP_WORKBITS); \
48 if (_FP_FRAC_ZEROP_##wc(X)) \
54 _FP_FRAC_CLZ_##wc(_shift, X); \
56 _FP_FRAC_SLL_##wc(X, (_shift+_FP_WORKBITS)); \
69 if (_FP_FRAC_ZEROP_##wc(X)) \
89 #define _FP_PACK_CANONICAL(fs, wc, X) \ argument
97 _FP_ROUND(wc, X); \
[all …]
Dsoft-fp.h135 #define _FP_ROUND_NEAREST(wc, X) \ argument
137 if ((_FP_FRAC_LOW_##wc(X) & 15) != _FP_WORK_ROUND) \
138 _FP_FRAC_ADDI_##wc(X, _FP_WORK_ROUND); \
141 #define _FP_ROUND_ZERO(wc, X) (void)0 argument
143 #define _FP_ROUND_PINF(wc, X) \ argument
145 if (!X##_s && (_FP_FRAC_LOW_##wc(X) & 7)) \
146 _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \
149 #define _FP_ROUND_MINF(wc, X) \ argument
151 if (X##_s && (_FP_FRAC_LOW_##wc(X) & 7)) \
152 _FP_FRAC_ADDI_##wc(X, _FP_WORK_LSB); \
[all …]
/Linux-v5.4/drivers/infiniband/hw/cxgb3/
Diwch_cq.c36 struct iwch_qp *qhp, struct ib_wc *wc) in __iwch_poll_cq_one() argument
59 wc->wr_id = cookie; in __iwch_poll_cq_one()
60 wc->qp = qhp ? &qhp->ibqp : NULL; in __iwch_poll_cq_one()
61 wc->vendor_err = CQE_STATUS(cqe); in __iwch_poll_cq_one()
62 wc->wc_flags = 0; in __iwch_poll_cq_one()
72 wc->byte_len = CQE_LEN(cqe); in __iwch_poll_cq_one()
74 wc->byte_len = 0; in __iwch_poll_cq_one()
75 wc->opcode = IB_WC_RECV; in __iwch_poll_cq_one()
78 wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe); in __iwch_poll_cq_one()
79 wc->wc_flags |= IB_WC_WITH_INVALIDATE; in __iwch_poll_cq_one()
[all …]
/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dcq.c511 struct ib_wc *wc) in mlx4_ib_handle_error_cqe() argument
525 wc->status = IB_WC_LOC_LEN_ERR; in mlx4_ib_handle_error_cqe()
528 wc->status = IB_WC_LOC_QP_OP_ERR; in mlx4_ib_handle_error_cqe()
531 wc->status = IB_WC_LOC_PROT_ERR; in mlx4_ib_handle_error_cqe()
534 wc->status = IB_WC_WR_FLUSH_ERR; in mlx4_ib_handle_error_cqe()
537 wc->status = IB_WC_MW_BIND_ERR; in mlx4_ib_handle_error_cqe()
540 wc->status = IB_WC_BAD_RESP_ERR; in mlx4_ib_handle_error_cqe()
543 wc->status = IB_WC_LOC_ACCESS_ERR; in mlx4_ib_handle_error_cqe()
546 wc->status = IB_WC_REM_INV_REQ_ERR; in mlx4_ib_handle_error_cqe()
549 wc->status = IB_WC_REM_ACCESS_ERR; in mlx4_ib_handle_error_cqe()
[all …]
/Linux-v5.4/fs/ocfs2/
Daops.c803 static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) in ocfs2_unlock_pages() argument
812 if (wc->w_target_locked) { in ocfs2_unlock_pages()
813 BUG_ON(!wc->w_target_page); in ocfs2_unlock_pages()
814 for (i = 0; i < wc->w_num_pages; i++) { in ocfs2_unlock_pages()
815 if (wc->w_target_page == wc->w_pages[i]) { in ocfs2_unlock_pages()
816 wc->w_pages[i] = NULL; in ocfs2_unlock_pages()
820 mark_page_accessed(wc->w_target_page); in ocfs2_unlock_pages()
821 put_page(wc->w_target_page); in ocfs2_unlock_pages()
823 ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); in ocfs2_unlock_pages()
842 struct ocfs2_write_ctxt *wc) in ocfs2_free_write_ctxt() argument
[all …]
/Linux-v5.4/drivers/net/ethernet/brocade/bna/
Dbfa_cs.h73 bfa_wc_up(struct bfa_wc *wc) in bfa_wc_up() argument
75 wc->wc_count++; in bfa_wc_up()
79 bfa_wc_down(struct bfa_wc *wc) in bfa_wc_down() argument
81 wc->wc_count--; in bfa_wc_down()
82 if (wc->wc_count == 0) in bfa_wc_down()
83 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down()
88 bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) in bfa_wc_init() argument
90 wc->wc_resume = wc_resume; in bfa_wc_init()
91 wc->wc_cbarg = wc_cbarg; in bfa_wc_init()
92 wc->wc_count = 0; in bfa_wc_init()
[all …]
/Linux-v5.4/drivers/infiniband/hw/mlx5/
Dcq.c116 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
119 wc->wc_flags = 0; in handle_good_req()
122 wc->wc_flags |= IB_WC_WITH_IMM; in handle_good_req()
125 wc->opcode = IB_WC_RDMA_WRITE; in handle_good_req()
128 wc->wc_flags |= IB_WC_WITH_IMM; in handle_good_req()
132 wc->opcode = IB_WC_SEND; in handle_good_req()
135 wc->opcode = IB_WC_RDMA_READ; in handle_good_req()
136 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
139 wc->opcode = IB_WC_COMP_SWAP; in handle_good_req()
140 wc->byte_len = 8; in handle_good_req()
[all …]
Dgsi.c37 struct ib_wc wc; member
88 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions()
96 static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc) in handle_single_completion() argument
100 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe); in handle_single_completion()
106 wr_id = wr->wc.wr_id; in handle_single_completion()
107 wr->wc = *wc; in handle_single_completion()
108 wr->wc.wr_id = wr_id; in handle_single_completion()
109 wr->wc.qp = &gsi->ibqp; in handle_single_completion()
415 struct ib_ud_wr *wr, struct ib_wc *wc) in mlx5_ib_add_outstanding_wr() argument
429 if (!wc) { in mlx5_ib_add_outstanding_wr()
[all …]
/Linux-v5.4/drivers/infiniband/sw/siw/
Dsiw_cq.c48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) in siw_reap_cqe() argument
57 memset(wc, 0, sizeof(*wc)); in siw_reap_cqe()
58 wc->wr_id = cqe->id; in siw_reap_cqe()
59 wc->status = map_cqe_status[cqe->status].ib; in siw_reap_cqe()
60 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe()
61 wc->byte_len = cqe->bytes; in siw_reap_cqe()
70 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe()
71 wc->wc_flags = IB_WC_WITH_INVALIDATE; in siw_reap_cqe()
73 wc->qp = cqe->base_qp; in siw_reap_cqe()
98 struct ib_wc wc; in siw_cq_flush() local
[all …]
/Linux-v5.4/drivers/infiniband/hw/qib/
Dqib_ud.c62 struct ib_wc wc; in qib_ud_loopback() local
125 memset(&wc, 0, sizeof(wc)); in qib_ud_loopback()
126 wc.byte_len = length + sizeof(struct ib_grh); in qib_ud_loopback()
129 wc.wc_flags = IB_WC_WITH_IMM; in qib_ud_loopback()
130 wc.ex.imm_data = swqe->wr.ex.imm_data; in qib_ud_loopback()
155 if (unlikely(wc.byte_len > qp->r_len)) { in qib_ud_loopback()
168 wc.wc_flags |= IB_WC_GRH; in qib_ud_loopback()
201 wc.wr_id = qp->r_wr_id; in qib_ud_loopback()
202 wc.status = IB_WC_SUCCESS; in qib_ud_loopback()
203 wc.opcode = IB_WC_RECV; in qib_ud_loopback()
[all …]
Dqib_uc.c245 struct ib_wc wc; in qib_uc_rcv() local
367 wc.ex.imm_data = ohdr->u.imm_data; in qib_uc_rcv()
369 wc.wc_flags = IB_WC_WITH_IMM; in qib_uc_rcv()
373 wc.ex.imm_data = 0; in qib_uc_rcv()
374 wc.wc_flags = 0; in qib_uc_rcv()
384 wc.byte_len = tlen + qp->r_rcv_len; in qib_uc_rcv()
385 if (unlikely(wc.byte_len > qp->r_len)) in qib_uc_rcv()
387 wc.opcode = IB_WC_RECV; in qib_uc_rcv()
391 wc.wr_id = qp->r_wr_id; in qib_uc_rcv()
392 wc.status = IB_WC_SUCCESS; in qib_uc_rcv()
[all …]
/Linux-v5.4/drivers/infiniband/sw/rdmavt/
Dtrace_cq.h112 TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx),
113 TP_ARGS(cq, wc, idx),
127 __entry->wr_id = wc->wr_id;
128 __entry->status = wc->status;
129 __entry->opcode = wc->opcode;
130 __entry->length = wc->byte_len;
131 __entry->qpn = wc->qp->qp_num;
133 __entry->flags = wc->wc_flags;
134 __entry->imm = be32_to_cpu(wc->ex.imm_data);
152 TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx),
[all …]
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_cq.c319 struct ib_wc *wc) in pvrdma_poll_one() argument
352 wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode); in pvrdma_poll_one()
353 wc->status = pvrdma_wc_status_to_ib(cqe->status); in pvrdma_poll_one()
354 wc->wr_id = cqe->wr_id; in pvrdma_poll_one()
355 wc->qp = &(*cur_qp)->ibqp; in pvrdma_poll_one()
356 wc->byte_len = cqe->byte_len; in pvrdma_poll_one()
357 wc->ex.imm_data = cqe->imm_data; in pvrdma_poll_one()
358 wc->src_qp = cqe->src_qp; in pvrdma_poll_one()
359 wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags); in pvrdma_poll_one()
360 wc->pkey_index = cqe->pkey_index; in pvrdma_poll_one()
[all …]
/Linux-v5.4/fs/ntfs/
Dunistr.c250 wchar_t wc; in ntfs_nlstoucs() local
259 &wc); in ntfs_nlstoucs()
262 if (likely(wc)) { in ntfs_nlstoucs()
263 ucs[o++] = cpu_to_le16(wc); in ntfs_nlstoucs()
327 int i, o, ns_len, wc; in ntfs_ucstonls() local
334 wc = -ENAMETOOLONG; in ntfs_ucstonls()
344 retry: wc = nls->uni2char(le16_to_cpu(ins[i]), ns + o, in ntfs_ucstonls()
346 if (wc > 0) { in ntfs_ucstonls()
347 o += wc; in ntfs_ucstonls()
349 } else if (!wc) in ntfs_ucstonls()
[all …]
/Linux-v5.4/drivers/scsi/bfa/
Dbfa_cs.h253 bfa_wc_up(struct bfa_wc_s *wc) in bfa_wc_up() argument
255 wc->wc_count++; in bfa_wc_up()
259 bfa_wc_down(struct bfa_wc_s *wc) in bfa_wc_down() argument
261 wc->wc_count--; in bfa_wc_down()
262 if (wc->wc_count == 0) in bfa_wc_down()
263 wc->wc_resume(wc->wc_cbarg); in bfa_wc_down()
270 bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) in bfa_wc_init() argument
272 wc->wc_resume = wc_resume; in bfa_wc_init()
273 wc->wc_cbarg = wc_cbarg; in bfa_wc_init()
274 wc->wc_count = 0; in bfa_wc_init()
[all …]
/Linux-v5.4/drivers/infiniband/hw/hfi1/
Dud.c83 struct ib_wc wc; in ud_loopback() local
149 memset(&wc, 0, sizeof(wc)); in ud_loopback()
150 wc.byte_len = length + sizeof(struct ib_grh); in ud_loopback()
153 wc.wc_flags = IB_WC_WITH_IMM; in ud_loopback()
154 wc.ex.imm_data = swqe->wr.ex.imm_data; in ud_loopback()
179 if (unlikely(wc.byte_len > qp->r_len)) { in ud_loopback()
216 wc.wc_flags |= IB_WC_GRH; in ud_loopback()
235 wc.wr_id = qp->r_wr_id; in ud_loopback()
236 wc.status = IB_WC_SUCCESS; in ud_loopback()
237 wc.opcode = IB_WC_RECV; in ud_loopback()
[all …]
Duc.c316 struct ib_wc wc; in hfi1_uc_rcv() local
435 wc.ex.imm_data = ohdr->u.imm_data; in hfi1_uc_rcv()
436 wc.wc_flags = IB_WC_WITH_IMM; in hfi1_uc_rcv()
440 wc.ex.imm_data = 0; in hfi1_uc_rcv()
441 wc.wc_flags = 0; in hfi1_uc_rcv()
449 wc.byte_len = tlen + qp->r_rcv_len; in hfi1_uc_rcv()
450 if (unlikely(wc.byte_len > qp->r_len)) in hfi1_uc_rcv()
452 wc.opcode = IB_WC_RECV; in hfi1_uc_rcv()
456 wc.wr_id = qp->r_wr_id; in hfi1_uc_rcv()
457 wc.status = IB_WC_SUCCESS; in hfi1_uc_rcv()
[all …]
/Linux-v5.4/arch/c6x/platforms/
Dcache.c138 unsigned int wc = 0; in cache_block_operation() local
140 for (; wcnt; wcnt -= wc, start += wc) { in cache_block_operation()
160 wc = 0xffff; in cache_block_operation()
162 wc = wcnt; in cache_block_operation()
165 imcr_set(wc_reg, wc & 0xffff); in cache_block_operation()
183 unsigned int wc = 0; in cache_block_operation_nowait() local
185 for (; wcnt; wcnt -= wc, start += wc) { in cache_block_operation_nowait()
192 wc = 0xffff; in cache_block_operation_nowait()
194 wc = wcnt; in cache_block_operation_nowait()
197 imcr_set(wc_reg, wc & 0xffff); in cache_block_operation_nowait()
/Linux-v5.4/net/smc/
Dsmc_wr.c64 static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) in smc_wr_tx_process_cqe() argument
71 link = wc->qp->qp_context; in smc_wr_tx_process_cqe()
73 if (wc->opcode == IB_WC_REG_MR) { in smc_wr_tx_process_cqe()
74 if (wc->status) in smc_wr_tx_process_cqe()
82 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); in smc_wr_tx_process_cqe()
85 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; in smc_wr_tx_process_cqe()
94 if (wc->status) { in smc_wr_tx_process_cqe()
107 pnd_snd.handler(&pnd_snd.priv, link, wc->status); in smc_wr_tx_process_cqe()
114 struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; in smc_wr_tx_tasklet_fn() local
121 memset(&wc, 0, sizeof(wc)); in smc_wr_tx_tasklet_fn()
[all …]
/Linux-v5.4/drivers/infiniband/ulp/iser/
Diser_initiator.c558 void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_login_rsp() argument
560 struct ib_conn *ib_conn = wc->qp->qp_context; in iser_login_rsp()
562 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp()
567 if (unlikely(wc->status != IB_WC_SUCCESS)) { in iser_login_rsp()
568 iser_err_comp(wc, "login_rsp"); in iser_login_rsp()
578 length = wc->byte_len - ISER_HEADERS_LEN; in iser_login_rsp()
608 struct ib_wc *wc, in iser_check_remote_inv() argument
611 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { in iser_check_remote_inv()
613 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv()
650 void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) in iser_task_rsp() argument
[all …]
/Linux-v5.4/drivers/infiniband/core/
Dcq.c81 struct ib_wc *wc = &wcs[i]; in __ib_process_cq() local
83 if (wc->wr_cqe) in __ib_process_cq()
84 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
86 WARN_ON_ONCE(wc->status == IB_WC_SUCCESS); in __ib_process_cq()
131 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); in ib_poll_handler()
154 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, in ib_cq_poll_work()
204 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); in __ib_alloc_cq_user()
205 if (!cq->wc) in __ib_alloc_cq_user()
248 kfree(cq->wc); in __ib_alloc_cq_user()
312 kfree(cq->wc); in ib_free_cq_user()
/Linux-v5.4/sound/isa/wavefront/
Dwavefront_synth.c1501 wavefront_control *wc) in wavefront_synth_control() argument
1509 "cmd 0x%x\n", wc->cmd); in wavefront_synth_control()
1513 switch (wc->cmd) { in wavefront_synth_control()
1528 wc->rbuf[0] = dev->interrupts_are_midi; in wavefront_synth_control()
1532 dev->rom_samples_rdonly = wc->wbuf[0]; in wavefront_synth_control()
1533 wc->status = 0; in wavefront_synth_control()
1537 i = wc->wbuf[0] | (wc->wbuf[1] << 7); in wavefront_synth_control()
1541 wc->status = EINVAL; in wavefront_synth_control()
1544 wc->rbuf[0] = dev->sample_status[i]; in wavefront_synth_control()
1545 wc->status = 0; in wavefront_synth_control()
[all …]
/Linux-v5.4/drivers/infiniband/hw/cxgb4/
Dcq.c755 struct ib_wc *wc, struct c4iw_srq *srq) in __c4iw_poll_cq_one() argument
769 wc->wr_id = cookie; in __c4iw_poll_cq_one()
770 wc->qp = qhp ? &qhp->ibqp : NULL; in __c4iw_poll_cq_one()
771 wc->vendor_err = CQE_STATUS(&cqe); in __c4iw_poll_cq_one()
772 wc->wc_flags = 0; in __c4iw_poll_cq_one()
790 wc->byte_len = CQE_LEN(&cqe); in __c4iw_poll_cq_one()
792 wc->byte_len = 0; in __c4iw_poll_cq_one()
796 wc->opcode = IB_WC_RECV; in __c4iw_poll_cq_one()
800 wc->opcode = IB_WC_RECV; in __c4iw_poll_cq_one()
801 wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); in __c4iw_poll_cq_one()
[all …]
/Linux-v5.4/fs/btrfs/
Dextent-tree.c4578 struct walk_control *wc, in reada_walk_down() argument
4593 if (path->slots[wc->level] < wc->reada_slot) { in reada_walk_down()
4594 wc->reada_count = wc->reada_count * 2 / 3; in reada_walk_down()
4595 wc->reada_count = max(wc->reada_count, 2); in reada_walk_down()
4597 wc->reada_count = wc->reada_count * 3 / 2; in reada_walk_down()
4598 wc->reada_count = min_t(int, wc->reada_count, in reada_walk_down()
4602 eb = path->nodes[wc->level]; in reada_walk_down()
4605 for (slot = path->slots[wc->level]; slot < nritems; slot++) { in reada_walk_down()
4606 if (nread >= wc->reada_count) in reada_walk_down()
4613 if (slot == path->slots[wc->level]) in reada_walk_down()
[all …]

123456789