/Linux-v5.4/drivers/media/dvb-frontends/ |
D | dib3000mb.c | 148 wr(DIB3000MB_REG_LOCK1_MASK, DIB3000MB_LOCK1_SEARCH_4); in dib3000mb_set_frontend() 153 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_2K); in dib3000mb_set_frontend() 157 wr(DIB3000MB_REG_FFT, DIB3000_TRANSMISSION_MODE_8K); in dib3000mb_set_frontend() 169 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_32); in dib3000mb_set_frontend() 173 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_16); in dib3000mb_set_frontend() 177 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_8); in dib3000mb_set_frontend() 181 wr(DIB3000MB_REG_GUARD_TIME, DIB3000_GUARD_TIME_1_4); in dib3000mb_set_frontend() 193 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_OFF); in dib3000mb_set_frontend() 200 wr(DIB3000MB_REG_DDS_INV, DIB3000_DDS_INVERSION_ON); in dib3000mb_set_frontend() 209 wr(DIB3000MB_REG_QAM, DIB3000_CONSTELLATION_QPSK); in dib3000mb_set_frontend() [all …]
|
/Linux-v5.4/lib/ |
D | decompress_unlzma.c | 292 static inline size_t INIT get_pos(struct writer *wr) in get_pos() argument 295 wr->global_pos + wr->buffer_pos; in get_pos() 298 static inline uint8_t INIT peek_old_byte(struct writer *wr, in peek_old_byte() argument 301 if (!wr->flush) { in peek_old_byte() 303 while (offs > wr->header->dict_size) in peek_old_byte() 304 offs -= wr->header->dict_size; in peek_old_byte() 305 pos = wr->buffer_pos - offs; in peek_old_byte() 306 return wr->buffer[pos]; in peek_old_byte() 308 uint32_t pos = wr->buffer_pos - offs; in peek_old_byte() 309 while (pos >= wr->header->dict_size) in peek_old_byte() [all …]
|
/Linux-v5.4/tools/bpf/bpftool/ |
D | json_writer.c | 308 json_writer_t *wr = jsonw_new(stdout); in main() local 310 jsonw_start_object(wr); in main() 311 jsonw_pretty(wr, true); in main() 312 jsonw_name(wr, "Vyatta"); in main() 313 jsonw_start_object(wr); in main() 314 jsonw_string_field(wr, "url", "http://vyatta.com"); in main() 315 jsonw_uint_field(wr, "downloads", 2000000ul); in main() 316 jsonw_float_field(wr, "stock", 8.16); in main() 318 jsonw_name(wr, "ARGV"); in main() 319 jsonw_start_array(wr); in main() [all …]
|
/Linux-v5.4/fs/orangefs/ |
D | inode.c | 22 struct orangefs_write_range *wr = NULL; in orangefs_writepage_locked() local 33 wr = (struct orangefs_write_range *)page_private(page); in orangefs_writepage_locked() 34 WARN_ON(wr->pos >= len); in orangefs_writepage_locked() 35 off = wr->pos; in orangefs_writepage_locked() 36 if (off + wr->len > len) in orangefs_writepage_locked() 39 wlen = wr->len; in orangefs_writepage_locked() 58 len, wr, NULL); in orangefs_writepage_locked() 65 if (wr) { in orangefs_writepage_locked() 66 kfree(wr); in orangefs_writepage_locked() 98 struct orangefs_write_range *wrp, wr; in orangefs_writepages_work() local [all …]
|
/Linux-v5.4/include/trace/events/ |
D | ib_mad.h | 24 TP_PROTO(struct ib_mad_send_wr_private *wr, 26 TP_ARGS(wr, qp_info), 56 __entry->dev_index = wr->mad_agent_priv->agent.device->index; 57 __entry->port_num = wr->mad_agent_priv->agent.port_num; 58 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 59 __entry->agent_priv = wr->mad_agent_priv; 60 __entry->wrtid = wr->tid; 61 __entry->max_retries = wr->max_retries; 62 __entry->retries_left = wr->retries_left; 63 __entry->retry = wr->retry; [all …]
|
/Linux-v5.4/drivers/infiniband/core/ |
D | rw.c | 75 reg->inv_wr.next = ®->reg_wr.wr; in rdma_rw_inv_key() 106 reg->reg_wr.wr.opcode = IB_WR_REG_MR; in rdma_rw_init_one_mr() 146 prev->wr.wr.next = ®->inv_wr; in rdma_rw_init_mr_wrs() 148 prev->wr.wr.next = ®->reg_wr.wr; in rdma_rw_init_mr_wrs() 151 reg->reg_wr.wr.next = ®->wr.wr; in rdma_rw_init_mr_wrs() 153 reg->wr.wr.sg_list = ®->sge; in rdma_rw_init_mr_wrs() 154 reg->wr.wr.num_sge = 1; in rdma_rw_init_mr_wrs() 155 reg->wr.remote_addr = remote_addr; in rdma_rw_init_mr_wrs() 156 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs() 158 reg->wr.wr.opcode = IB_WR_RDMA_WRITE; in rdma_rw_init_mr_wrs() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 594 const struct ib_reg_wr *wr) in set_reg_seg() argument 596 struct pvrdma_user_mr *mr = to_vmr(wr->mr); in set_reg_seg() 598 wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova; in set_reg_seg() 599 wqe_hdr->wr.fast_reg.pl_pdir_dma = mr->pdir.dir_dma; in set_reg_seg() 600 wqe_hdr->wr.fast_reg.page_shift = mr->page_shift; in set_reg_seg() 601 wqe_hdr->wr.fast_reg.page_list_len = mr->npages; in set_reg_seg() 602 wqe_hdr->wr.fast_reg.length = mr->ibmr.length; in set_reg_seg() 603 wqe_hdr->wr.fast_reg.access_flags = wr->access; in set_reg_seg() 604 wqe_hdr->wr.fast_reg.rkey = wr->key; in set_reg_seg() 618 int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, in pvrdma_post_send() argument [all …]
|
/Linux-v5.4/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, const struct ib_send_wr *wr, in build_rdma_send() argument 48 switch (wr->opcode) { in build_rdma_send() 50 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send() 57 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send() 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 66 if (wr->num_sge > T3_MAX_SGE) in build_rdma_send() 72 for (i = 0; i < wr->num_sge; i++) { in build_rdma_send() 73 if ((plen + wr->sg_list[i].length) < plen) in build_rdma_send() 76 plen += wr->sg_list[i].length; in build_rdma_send() 77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); in build_rdma_send() [all …]
|
/Linux-v5.4/tools/testing/selftests/breakpoints/ |
D | breakpoint_test_arm64.c | 33 static void child(int size, int wr) in child() argument 35 volatile uint8_t *addr = &var[32 + wr]; in child() 112 static bool arun_test(int wr_size, int wp_size, int wr, int wp) in arun_test() argument 125 child(wr_size, wr); in arun_test() 204 int wr, wp, size; in main() local 215 for (wr = 0; wr <= 32; wr = wr + size) { in main() 216 for (wp = wr - size; wp <= wr + size; wp = wp + size) { in main() 217 result = run_test(size, MIN(size, 8), wr, wp); in main() 218 if ((result && wr == wp) || in main() 219 (!result && wr != wp)) in main() [all …]
|
/Linux-v5.4/include/linux/ |
D | hdlcdrv.h | 28 unsigned rd, wr; member 35 unsigned int wr; member 49 buf->buffer[buf->wr] = buf->shreg; in hdlcdrv_add_bitbuffer() 50 buf->wr = (buf->wr+1) % sizeof(buf->buffer); in hdlcdrv_add_bitbuffer() 58 buf->buffer[buf->wr] = bits & 0xff; in hdlcdrv_add_bitbuffer_word() 59 buf->wr = (buf->wr+1) % sizeof(buf->buffer); in hdlcdrv_add_bitbuffer_word() 60 buf->buffer[buf->wr] = (bits >> 8) & 0xff; in hdlcdrv_add_bitbuffer_word() 61 buf->wr = (buf->wr+1) % sizeof(buf->buffer); in hdlcdrv_add_bitbuffer_word() 164 ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER); in hdlcdrv_hbuf_full() 177 ret = (hb->rd == hb->wr); in hdlcdrv_hbuf_empty() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | gsi.c | 76 struct mlx5_ib_gsi_wr *wr; in generate_completions() local 81 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions() 83 if (!wr->completed) in generate_completions() 87 wr->send_flags & IB_SEND_SIGNALED) in generate_completions() 88 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); in generate_completions() 90 wr->completed = false; in generate_completions() 99 struct mlx5_ib_gsi_wr *wr = in handle_single_completion() local 105 wr->completed = true; in handle_single_completion() 106 wr_id = wr->wc.wr_id; in handle_single_completion() 107 wr->wc = *wc; in handle_single_completion() [all …]
|
/Linux-v5.4/drivers/infiniband/sw/rxe/ |
D | rxe_verbs.c | 387 static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, in rxe_post_srq_recv() argument 396 while (wr) { in rxe_post_srq_recv() 397 err = post_one_recv(&srq->rq, wr); in rxe_post_srq_recv() 400 wr = wr->next; in rxe_post_srq_recv() 406 *bad_wr = wr; in rxe_post_srq_recv() 530 static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, in init_send_wr() argument 533 wr->wr_id = ibwr->wr_id; in init_send_wr() 534 wr->num_sge = ibwr->num_sge; in init_send_wr() 535 wr->opcode = ibwr->opcode; in init_send_wr() 536 wr->send_flags = ibwr->send_flags; in init_send_wr() [all …]
|
D | rxe_req.c | 55 wqe->wr.opcode); in retry_first_write_send() 57 if (wqe->wr.send_flags & IB_SEND_INLINE) { in retry_first_write_send() 84 mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_retry() 93 wqe->wr.wr.atomic.remote_addr : in req_retry() 95 wqe->wr.wr.rdma.remote_addr : in req_retry() 184 if (unlikely((wqe->wr.send_flags & IB_SEND_FENCE) && in req_next_wqe() 190 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp); in req_next_wqe() 386 struct rxe_send_wr *ibwr = &wqe->wr; in init_req_packet() 423 port->pkey_tbl[ibwr->wr.ud.pkey_index] : in init_req_packet() 426 qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : in init_req_packet() [all …]
|
/Linux-v5.4/drivers/infiniband/ulp/iser/ |
D | iser_memory.c | 390 struct ib_reg_wr *wr = &tx_desc->reg_wr; in iser_reg_sig_mr() local 401 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); in iser_reg_sig_mr() 413 memset(wr, 0, sizeof(*wr)); in iser_reg_sig_mr() 414 wr->wr.next = &tx_desc->send_wr; in iser_reg_sig_mr() 415 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in iser_reg_sig_mr() 416 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr() 417 wr->wr.num_sge = 0; in iser_reg_sig_mr() 418 wr->wr.send_flags = 0; in iser_reg_sig_mr() 419 wr->mr = mr; in iser_reg_sig_mr() 420 wr->key = mr->rkey; in iser_reg_sig_mr() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 1500 int ind, const struct ib_ud_wr *wr, in build_mlx_header() argument 1509 mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0, in build_mlx_header() 1512 err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header); in build_mlx_header() 1523 switch (wr->wr.opcode) { in build_mlx_header() 1531 sqp->ud_header.immediate_data = wr->wr.ex.imm_data; in build_mlx_header() 1540 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_mlx_header() 1546 wr->pkey_index, &pkey); in build_mlx_header() 1548 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_mlx_header() 1550 sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? in build_mlx_header() 1551 sqp->qkey : wr->remote_qkey); in build_mlx_header() [all …]
|
/Linux-v5.4/arch/mips/mm/ |
D | tlbex.c | 2047 struct work_registers wr = build_get_work_registers(p); in build_r4000_tlbchange_handler_head() local 2050 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */ in build_r4000_tlbchange_handler_head() 2052 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */ in build_r4000_tlbchange_handler_head() 2061 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update); in build_r4000_tlbchange_handler_head() 2064 UASM_i_MFC0(p, wr.r1, C0_BADVADDR); in build_r4000_tlbchange_handler_head() 2065 UASM_i_LW(p, wr.r2, 0, wr.r2); in build_r4000_tlbchange_handler_head() 2066 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); in build_r4000_tlbchange_handler_head() 2067 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); in build_r4000_tlbchange_handler_head() 2068 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); in build_r4000_tlbchange_handler_head() 2073 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ in build_r4000_tlbchange_handler_head() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 414 const struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument 422 for (i = 0; i < wr->num_sge; i++) { in build_immd() 423 if ((plen + wr->sg_list[i].length) > max) in build_immd() 425 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd() 426 plen += wr->sg_list[i].length; in build_immd() 427 rem = wr->sg_list[i].length; in build_immd() 489 const struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument 495 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send() 497 switch (wr->opcode) { in build_rdma_send() 499 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send() [all …]
|
/Linux-v5.4/arch/sparc/kernel/ |
D | trampoline_32.S | 47 wr %g1, 0x0, %psr ! traps off though 52 wr %g1, 0x0, %wim 56 wr %g3, 0x0, %tbr 71 wr %g1, PSR_ET, %psr ! traps on 101 wr %g1, 0x0, %psr ! traps off though 106 wr %g1, 0x0, %wim 111 wr %g1, 0x0, %tbr 132 wr %g1, PSR_ET, %psr ! traps on 160 wr %g1, 0x0, %psr ! traps off though 165 wr %g1, 0x0, %wim [all …]
|
D | entry.S | 159 wr %l0, 0x0, %psr 181 wr %l4, 0x0, %psr 183 wr %l4, PSR_ET, %psr 200 wr %l0, PSR_ET, %psr 230 wr %g2, 0x0, %psr 232 wr %g2, PSR_ET, %psr 239 wr %g2, PSR_ET, %psr ! keep ET up 249 wr %g2, 0x0, %psr 251 wr %g2, PSR_ET, %psr 255 wr %l0, PSR_ET, %psr [all …]
|
D | rtrap_32.S | 58 wr %t_psr, 0x0, %psr 88 wr %t_psr, 0x0, %psr 96 wr %t_psr, PSR_ET, %psr 130 wr %glob_tmp, 0x0, %wim 154 wr %t_psr, 0x0, %psr 165 wr %t_wim, 0x0, %wim ! or else... 167 wr %t_psr, PSR_ET, %psr 192 wr %g1, 0x0, %wim 210 wr %t_psr, 0x0, %psr 217 wr %t_wim, 0x0, %wim [all …]
|
/Linux-v5.4/drivers/infiniband/sw/siw/ |
D | siw_verbs.c | 697 int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, in siw_post_send() argument 711 *bad_wr = wr; in siw_post_send() 717 *bad_wr = wr; in siw_post_send() 721 if (wr && !qp->kernel_verbs) { in siw_post_send() 724 *bad_wr = wr; in siw_post_send() 729 while (wr) { in siw_post_send() 738 if (wr->num_sge > qp->attrs.sq_max_sges) { in siw_post_send() 739 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge); in siw_post_send() 743 sqe->id = wr->wr_id; in siw_post_send() 745 if ((wr->send_flags & IB_SEND_SIGNALED) || in siw_post_send() [all …]
|
/Linux-v5.4/arch/arm/boot/dts/ |
D | omap3430-sdp.dts | 65 gpmc,cs-wr-off-ns = <186>; 68 gpmc,adv-wr-off-ns = <48>; 74 gpmc,wr-cycle-ns = <186>; 79 gpmc,wr-data-mux-bus-ns = <90>; 80 gpmc,wr-access-ns = <186>; 115 gpmc,cs-wr-off-ns = <36>; 118 gpmc,adv-wr-off-ns = <36>; 124 gpmc,wr-cycle-ns = <72>; 126 gpmc,wr-access-ns = <30>; 161 gpmc,cs-wr-off-ns = <72>; [all …]
|
/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | qp.c | 661 for (i = 0; i < wqe->wr.num_sge; i++) { in rvt_swqe_has_lkey() 1803 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, in rvt_post_recv() argument 1814 *bad_wr = wr; in rvt_post_recv() 1818 for (; wr; wr = wr->next) { in rvt_post_recv() 1823 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) { in rvt_post_recv() 1824 *bad_wr = wr; in rvt_post_recv() 1834 *bad_wr = wr; in rvt_post_recv() 1843 wc.wr_id = wr->wr_id; in rvt_post_recv() 1848 wqe->wr_id = wr->wr_id; in rvt_post_recv() 1849 wqe->num_sge = wr->num_sge; in rvt_post_recv() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mlx4/ |
D | qp.c | 2881 const struct ib_ud_wr *wr, in build_sriov_qp0_header() argument 2888 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_sriov_qp0_header() 2896 if (wr->wr.opcode != IB_WR_SEND) in build_sriov_qp0_header() 2901 for (i = 0; i < wr->wr.num_sge; ++i) in build_sriov_qp0_header() 2902 send_size += wr->wr.sg_list[i].length; in build_sriov_qp0_header() 2927 sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); in build_sriov_qp0_header() 2931 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); in build_sriov_qp0_header() 3029 static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr, in build_mlx_header() argument 3037 struct mlx4_ib_ah *ah = to_mah(wr->ah); in build_mlx_header() 3053 for (i = 0; i < wr->wr.num_sge; ++i) in build_mlx_header() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/bnxt_re/ |
D | ib_verbs.c | 1474 int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr, in bnxt_re_post_srq_recv() argument 1484 while (wr) { in bnxt_re_post_srq_recv() 1486 wqe.num_sge = wr->num_sge; in bnxt_re_post_srq_recv() 1487 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge); in bnxt_re_post_srq_recv() 1488 wqe.wr_id = wr->wr_id; in bnxt_re_post_srq_recv() 1493 *bad_wr = wr; in bnxt_re_post_srq_recv() 1496 wr = wr->next; in bnxt_re_post_srq_recv() 1836 const struct ib_send_wr *wr, in bnxt_re_build_qp1_send_v2() argument 1840 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, in bnxt_re_build_qp1_send_v2() 1937 if (wr->opcode == IB_WR_SEND_WITH_IMM) { in bnxt_re_build_qp1_send_v2() [all …]
|