/Linux-v5.15/net/9p/ |
D | trans_rdma.c | 155 struct p9_trans_rdma *rdma = clnt->trans; in p9_rdma_show_options() local 157 if (rdma->port != P9_PORT) in p9_rdma_show_options() 158 seq_printf(m, ",port=%u", rdma->port); in p9_rdma_show_options() 159 if (rdma->sq_depth != P9_RDMA_SQ_DEPTH) in p9_rdma_show_options() 160 seq_printf(m, ",sq=%u", rdma->sq_depth); in p9_rdma_show_options() 161 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options() 162 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options() 163 if (rdma->timeout != P9_RDMA_TIMEOUT) in p9_rdma_show_options() 164 seq_printf(m, ",timeout=%lu", rdma->timeout); in p9_rdma_show_options() 165 if (rdma->privport) in p9_rdma_show_options() [all …]
|
/Linux-v5.15/drivers/gpu/drm/mediatek/ |
D | mtk_disp_rdma.c | 49 #define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size) argument 90 struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); in rdma_update_bits() local 91 unsigned int tmp = readl(rdma->regs + reg); in rdma_update_bits() 94 writel(tmp, rdma->regs + reg); in rdma_update_bits() 101 struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); in mtk_rdma_enable_vblank() local 103 rdma->vblank_cb = vblank_cb; in mtk_rdma_enable_vblank() 104 rdma->vblank_cb_data = vblank_cb_data; in mtk_rdma_enable_vblank() 111 struct mtk_disp_rdma *rdma = dev_get_drvdata(dev); in mtk_rdma_disable_vblank() local 113 rdma->vblank_cb = NULL; in mtk_rdma_disable_vblank() 114 rdma->vblank_cb_data = NULL; in mtk_rdma_disable_vblank() [all …]
|
/Linux-v5.15/net/sunrpc/xprtrdma/ |
D | svc_rdma_sendto.c | 116 static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_send_cid_init() argument 119 cid->ci_queue_id = rdma->sc_sq_cq->res.id; in svc_rdma_send_cid_init() 120 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_send_cid_init() 124 svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_send_ctxt_alloc() argument 133 size += rdma->sc_max_send_sges * sizeof(struct ib_sge); in svc_rdma_send_ctxt_alloc() 137 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_send_ctxt_alloc() 140 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_send_ctxt_alloc() 141 rdma->sc_max_req_size, DMA_TO_DEVICE); in svc_rdma_send_ctxt_alloc() 142 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_send_ctxt_alloc() 145 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() [all …]
|
D | svc_rdma_recvfrom.c | 118 static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, in svc_rdma_recv_cid_init() argument 121 cid->ci_queue_id = rdma->sc_rq_cq->res.id; in svc_rdma_recv_cid_init() 122 cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids); in svc_rdma_recv_cid_init() 126 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) in svc_rdma_recv_ctxt_alloc() argument 135 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); in svc_rdma_recv_ctxt_alloc() 138 addr = ib_dma_map_single(rdma->sc_pd->device, buffer, in svc_rdma_recv_ctxt_alloc() 139 rdma->sc_max_req_size, DMA_FROM_DEVICE); in svc_rdma_recv_ctxt_alloc() 140 if (ib_dma_mapping_error(rdma->sc_pd->device, addr)) in svc_rdma_recv_ctxt_alloc() 143 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc() 155 ctxt->rc_recv_sge.length = rdma->sc_max_req_size; in svc_rdma_recv_ctxt_alloc() [all …]
|
D | svc_rdma_rw.c | 54 svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) in svc_rdma_get_rw_ctxt() argument 59 spin_lock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt() 60 node = llist_del_first(&rdma->sc_rw_ctxts); in svc_rdma_get_rw_ctxt() 61 spin_unlock(&rdma->sc_rw_ctxt_lock); in svc_rdma_get_rw_ctxt() 83 trace_svcrdma_no_rwctx_err(rdma, sges); in svc_rdma_get_rw_ctxt() 87 static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in __svc_rdma_put_rw_ctxt() argument 95 static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, in svc_rdma_put_rw_ctxt() argument 98 __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts); in svc_rdma_put_rw_ctxt() 106 void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma) in svc_rdma_destroy_rw_ctxts() argument 111 while ((node = llist_del_first(&rdma->sc_rw_ctxts)) != NULL) { in svc_rdma_destroy_rw_ctxts() [all …]
|
D | svc_rdma_transport.c | 274 struct svcxprt_rdma *rdma = cma_id->context; in svc_rdma_cma_handler() local 275 struct svc_xprt *xprt = &rdma->sc_xprt; in svc_rdma_cma_handler() 279 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); in svc_rdma_cma_handler() 538 struct svcxprt_rdma *rdma = in svc_rdma_detach() local 541 rdma_disconnect(rdma->sc_cm_id); in svc_rdma_detach() 546 struct svcxprt_rdma *rdma = in __svc_rdma_free() local 550 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) in __svc_rdma_free() 551 ib_drain_qp(rdma->sc_qp); in __svc_rdma_free() 553 svc_rdma_flush_recv_queues(rdma); in __svc_rdma_free() 555 svc_rdma_destroy_rw_ctxts(rdma); in __svc_rdma_free() [all …]
|
D | svc_rdma_backchannel.c | 75 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, in svc_rdma_bc_sendto() argument 82 rctxt = svc_rdma_recv_ctxt_get(rdma); in svc_rdma_bc_sendto() 86 ret = svc_rdma_map_reply_msg(rdma, sctxt, rctxt, &rqst->rq_snd_buf); in svc_rdma_bc_sendto() 87 svc_rdma_recv_ctxt_put(rdma, rctxt); in svc_rdma_bc_sendto() 96 ret = svc_rdma_send(rdma, sctxt); in svc_rdma_bc_sendto() 101 svc_rdma_send_ctxt_put(rdma, sctxt); in svc_rdma_bc_sendto() 145 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst) in rpcrdma_bc_send_request() argument 153 ctxt = svc_rdma_send_ctxt_get(rdma); in rpcrdma_bc_send_request() 169 rc = svc_rdma_bc_sendto(rdma, rqst, ctxt); in rpcrdma_bc_send_request() 175 svc_rdma_send_ctxt_put(rdma, ctxt); in rpcrdma_bc_send_request() [all …]
|
/Linux-v5.15/drivers/gpu/drm/meson/ |
D | meson_rdma.c | 26 if (!priv->rdma.addr) { in meson_rdma_init() 28 priv->rdma.addr = in meson_rdma_init() 30 &priv->rdma.addr_dma, in meson_rdma_init() 32 if (!priv->rdma.addr) in meson_rdma_init() 36 priv->rdma.offset = 0; in meson_rdma_init() 50 if (!priv->rdma.addr && !priv->rdma.addr_dma) in meson_rdma_free() 56 priv->rdma.addr, priv->rdma.addr_dma); in meson_rdma_free() 58 priv->rdma.addr = NULL; in meson_rdma_free() 59 priv->rdma.addr_dma = (dma_addr_t)0; in meson_rdma_free() 88 priv->rdma.offset = 0; in meson_rdma_reset() [all …]
|
/Linux-v5.15/include/linux/sunrpc/ |
D | svc_rdma.h | 172 extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); 173 extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); 175 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma); 176 extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 178 extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); 183 extern void svc_rdma_destroy_rw_ctxts(struct svcxprt_rdma *rdma); 184 extern int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, 187 extern int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, 190 extern int svc_rdma_process_read_list(struct svcxprt_rdma *rdma, 195 extern void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma); [all …]
|
/Linux-v5.15/Documentation/admin-guide/cgroup-v1/ |
D | rdma.rst | 28 Currently user space applications can easily take away all the rdma verb 31 rdma resources. This can lead to service unavailability. 34 of processes can be limited. Through this controller different rdma 43 by rdma cgroup, which can be extended later if required. 52 of the child processes which shares the address space, rdma resources are 56 rdma resources. Linking resources around css also ensures that cgroups can be 60 Whenever RDMA resource charging occurs, owner rdma cgroup is returned to 61 the caller. Same rdma cgroup should be passed while uncharging the resource. 81 IB stack honors limits enforced by the rdma controller. When application 86 Following resources can be accounted by rdma controller. [all …]
|
/Linux-v5.15/include/trace/events/ |
D | rpcrdma.h | 1335 const struct svcxprt_rdma *rdma, 1339 TP_ARGS(rdma, status), 1343 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1348 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); 1359 const struct svcxprt_rdma *rdma, \ 1362 TP_ARGS(rdma, status)) 1634 const struct svcxprt_rdma *rdma, 1639 TP_ARGS(rdma, dma_addr, length), 1644 __string(device, rdma->sc_cm_id->device->name) 1645 __string(addr, rdma->sc_xprt.xpt_remotebuf) [all …]
|
/Linux-v5.15/Documentation/ABI/stable/ |
D | sysfs-driver-ib_srp | 4 Contact: linux-rdma@vger.kernel.org 77 Contact: linux-rdma@vger.kernel.org 83 Contact: linux-rdma@vger.kernel.org 89 Contact: linux-rdma@vger.kernel.org 97 Contact: linux-rdma@vger.kernel.org 104 Contact: linux-rdma@vger.kernel.org 111 Contact: linux-rdma@vger.kernel.org 117 Contact: linux-rdma@vger.kernel.org 124 Contact: linux-rdma@vger.kernel.org 131 Contact: linux-rdma@vger.kernel.org [all …]
|
D | sysfs-class-infiniband | 9 Contact: linux-rdma@vger.kernel.org 24 Contact: linux-rdma@vger.kernel.org 34 Contact: linux-rdma@vger.kernel.org 49 Contact: linux-rdma@vger.kernel.org 80 Contact: linux-rdma@vger.kernel.org 108 Contact: linux-rdma@vger.kernel.org 203 Contact: linux-rdma@vger.kernel.org 222 Contact: linux-rdma@vger.kernel.org 229 Contact: linux-rdma@vger.kernel.org 241 Contact: linux-rdma@vger.kernel.org [all …]
|
D | sysfs-transport-srp | 4 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org 11 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org 20 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org 36 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org 50 Contact: linux-scsi@vger.kernel.org, linux-rdma@vger.kernel.org
|
/Linux-v5.15/Documentation/devicetree/bindings/display/mediatek/ |
D | mediatek,disp.txt | 32 "mediatek,<chip>-disp-rdma" - read DMA / line buffer 61 "mediatek,<chip>-disp-rdma" 71 - mediatek,rdma-fifo-size: rdma fifo size may be different even in same SOC, add this 72 property to the corresponding rdma 74 mediatek,rdma-fifo-size of mt8173-rdma0 is 8K 75 mediatek,rdma-fifo-size of mt8183-rdma0 is 5K 76 mediatek,rdma-fifo-size of mt8183-rdma1 is 2K 107 rdma0: rdma@1400e000 { 108 compatible = "mediatek,mt8173-disp-rdma"; 115 mediatek,rdma-fifosize = <8192>; [all …]
|
/Linux-v5.15/drivers/net/ethernet/seeq/ |
D | sgiseeq.c | 69 volatile struct hpc_dma_desc rdma; member 213 sp->rx_desc[i].rdma.pbuf = dma_addr; in seeq_init_ring() 215 sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT; in seeq_init_ring() 218 sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR; in seeq_init_ring() 263 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, in sgiseeq_dump_rings() 264 r[i].rdma.pnext); in sgiseeq_dump_rings() 267 i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo, in sgiseeq_dump_rings() 268 r[i].rdma.pnext); in sgiseeq_dump_rings() 355 while (!(rd->rdma.cntinfo & HPCDMA_OWN)) { in sgiseeq_rx() 356 len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3; in sgiseeq_rx() [all …]
|
/Linux-v5.15/Documentation/devicetree/bindings/media/ |
D | mediatek-mdp.txt | 12 "mediatek,mt8173-mdp-rdma" - read DMA 24 "mediatek,mt8173-mdp-rdma" 35 mdp_rdma0: rdma@14001000 { 36 compatible = "mediatek,mt8173-mdp-rdma"; 47 mdp_rdma1: rdma@14002000 { 48 compatible = "mediatek,mt8173-mdp-rdma";
|
/Linux-v5.15/drivers/nvme/host/ |
D | Makefile | 8 obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o 23 nvme-rdma-y += rdma.o
|
/Linux-v5.15/drivers/infiniband/ulp/rtrs/ |
D | rtrs-clt-stats.c | 37 s->rdma.failover_cnt++; in rtrs_clt_inc_failover_cnt() 88 r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma; in rtrs_clt_stats_rdma_to_str() 119 memset(&s->rdma, 0, sizeof(s->rdma)); in rtrs_clt_reset_rdma_stats() 170 s->rdma.dir[d].cnt++; in rtrs_clt_update_rdma_stats() 171 s->rdma.dir[d].size_total += size; in rtrs_clt_update_rdma_stats()
|
D | README | 29 server side for a given client for rdma transfer. A session 50 invalidate each rdma buffer before we hand it over to RNBD server and 95 6. Server and client exchange periodically heartbeat messages (empty rdma 123 on the server side and rdma writes there the user data, user header and the 129 2. When confirming a write request server sends an "empty" rdma message with 140 on the server side and rdma writes there the user data, user header and the 147 2. When confirming a write request server sends an "empty" rdma message with 163 on the server side and rdma writes there the user header and the 169 attaches an invalidation message if requested and finally an "empty" rdma 182 on the server side and rdma writes there the user header and the [all …]
|
/Linux-v5.15/drivers/nvme/target/ |
D | Makefile | 7 obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o 17 nvmet-rdma-y += rdma.o
|
/Linux-v5.15/drivers/net/ethernet/chelsio/cxgb3/ |
D | cxgb3_offload.c | 281 struct rdma_info *rdma = data; in cxgb_rdma_ctl() local 284 rdma->udbell_physbase = pci_resource_start(pdev, 2); in cxgb_rdma_ctl() 285 rdma->udbell_len = pci_resource_len(pdev, 2); in cxgb_rdma_ctl() 286 rdma->tpt_base = in cxgb_rdma_ctl() 288 rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT); in cxgb_rdma_ctl() 289 rdma->pbl_base = in cxgb_rdma_ctl() 291 rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT); in cxgb_rdma_ctl() 292 rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT); in cxgb_rdma_ctl() 293 rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT); in cxgb_rdma_ctl() 294 rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL; in cxgb_rdma_ctl() [all …]
|
/Linux-v5.15/drivers/infiniband/ |
D | Kconfig | 25 need libibumad from rdma-core 26 <https://github.com/linux-rdma/rdma-core>. 38 rdma-core <https://github.com/linux-rdma/rdma-core>.
|
/Linux-v5.15/drivers/macintosh/ |
D | rack-meter.c | 147 struct rackmeter_dma *rdma = rm->dma_buf_v; in rackmeter_do_pause() local 156 memset(rdma->buf1, 0, sizeof(rdma->buf1)); in rackmeter_do_pause() 157 memset(rdma->buf2, 0, sizeof(rdma->buf2)); in rackmeter_do_pause() 372 struct resource ri2s, rdma; in rackmeter_probe() local 432 of_address_to_resource(i2s, 1, &rdma)) { in rackmeter_probe() 442 pr_debug(" dma @0x%08x\n", (unsigned int)rdma.start); in rackmeter_probe() 476 rm->dma_regs = ioremap(rdma.start, 0x100); in rackmeter_probe()
|
/Linux-v5.15/drivers/infiniband/hw/mlx5/ |
D | odp.c | 76 } rdma; member 1311 u32 rkey = pfault->rdma.r_key; in mlx5_ib_mr_rdma_pfault_handler() 1320 pfault->rdma.rdma_va += pfault->bytes_committed; in mlx5_ib_mr_rdma_pfault_handler() 1321 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed, in mlx5_ib_mr_rdma_pfault_handler() 1322 pfault->rdma.rdma_op_len); in mlx5_ib_mr_rdma_pfault_handler() 1325 address = pfault->rdma.rdma_va; in mlx5_ib_mr_rdma_pfault_handler() 1326 length = pfault->rdma.rdma_op_len; in mlx5_ib_mr_rdma_pfault_handler() 1333 length = pfault->rdma.packet_size; in mlx5_ib_mr_rdma_pfault_handler() 1428 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; in mlx5_ib_eq_pf_process() 1430 be32_to_cpu(pf_eqe->rdma.pftype_token) & in mlx5_ib_eq_pf_process() [all …]
|