/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb/ |
D | sge.c | 254 struct sge { struct 282 static void tx_sched_stop(struct sge *sge) in tx_sched_stop() argument 284 struct sched *s = sge->tx_sched; in tx_sched_stop() 297 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port, in t1_sched_update_parms() argument 300 struct sched *s = sge->tx_sched; in t1_sched_update_parms() 320 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) { in t1_sched_update_parms() 343 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val) 345 struct sched *s = sge->tx_sched; 350 t1_sched_update_parms(sge, i, 0, 0); 357 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port, [all …]
|
D | sge.h | 71 struct sge; 73 struct sge *t1_sge_create(struct adapter *, struct sge_params *); 74 int t1_sge_configure(struct sge *, struct sge_params *); 75 int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); 76 void t1_sge_destroy(struct sge *); 82 void t1_sge_start(struct sge *); 83 void t1_sge_stop(struct sge *); 84 int t1_sge_intr_error_handler(struct sge *); 85 void t1_sge_intr_enable(struct sge *); 86 void t1_sge_intr_disable(struct sge *); [all …]
|
/Linux-v4.19/include/rdma/ |
D | rdmavt_mr.h | 120 struct rvt_sge sge; /* progress state for the current SGE */ member 138 rvt_put_mr(ss->sge.mr); in rvt_put_ss() 140 ss->sge = *ss->sg_list++; in rvt_put_ss() 144 static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length) in rvt_get_sge_length() argument 146 u32 len = sge->length; in rvt_get_sge_length() 150 if (len > sge->sge_length) in rvt_get_sge_length() 151 len = sge->sge_length; in rvt_get_sge_length() 159 struct rvt_sge *sge = &ss->sge; in rvt_update_sge() local 161 sge->vaddr += length; in rvt_update_sge() 162 sge->length -= length; in rvt_update_sge() [all …]
|
/Linux-v4.19/drivers/infiniband/sw/rdmavt/ |
D | trace_mr.h | 108 TP_PROTO(struct rvt_sge *sge, struct ib_sge *isge), 109 TP_ARGS(sge, isge), 111 RDI_DEV_ENTRY(ib_to_rvt(sge->mr->pd->device)) 113 __field(struct rvt_sge *, sge) 126 RDI_DEV_ASSIGN(ib_to_rvt(sge->mr->pd->device)); 127 __entry->mr = sge->mr; 128 __entry->sge = sge; 130 __entry->vaddr = sge->vaddr; 132 __entry->lkey = sge->mr->lkey; 133 __entry->sge_length = sge->sge_length; [all …]
|
D | mr.c | 537 rval = rvt_mr_has_lkey(ss->sge.mr, lkey); in rvt_ss_has_lkey() 879 struct ib_sge *sge) in rvt_sge_adjacent() argument 881 if (last_sge && sge->lkey == last_sge->mr->lkey && in rvt_sge_adjacent() 882 ((uint64_t)(last_sge->vaddr + last_sge->length) == sge->addr)) { in rvt_sge_adjacent() 883 if (sge->lkey) { in rvt_sge_adjacent() 884 if (unlikely((sge->addr - last_sge->mr->user_base + in rvt_sge_adjacent() 885 sge->length > last_sge->mr->length))) in rvt_sge_adjacent() 888 last_sge->length += sge->length; in rvt_sge_adjacent() 890 last_sge->sge_length += sge->length; in rvt_sge_adjacent() 891 trace_rvt_sge_adjacent(last_sge, sge); in rvt_sge_adjacent() [all …]
|
/Linux-v4.19/drivers/scsi/esas2r/ |
D | esas2r_io.c | 222 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { in esas2r_build_sg_list_sge() 237 sgelen = (u8)((u8 *)sgc->sge.a64.curr in esas2r_build_sg_list_sge() 238 - (u8 *)sgc->sge.a64.last); in esas2r_build_sg_list_sge() 244 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen); in esas2r_build_sg_list_sge() 247 sgc->sge.a64.curr = in esas2r_build_sg_list_sge() 252 sgc->sge.a64.limit = in esas2r_build_sg_list_sge() 257 sgc->sge.a64.last->length = cpu_to_le32( in esas2r_build_sg_list_sge() 259 sgc->sge.a64.last->address = in esas2r_build_sg_list_sge() 268 if (sgc->sge.a64.chain) { in esas2r_build_sg_list_sge() 269 sgc->sge.a64.chain->length |= in esas2r_build_sg_list_sge() [all …]
|
/Linux-v4.19/drivers/scsi/csiostor/ |
D | csio_wr.c | 58 csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg) in csio_get_flbuf_size() argument 60 sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0_A + in csio_get_flbuf_size() 66 csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf) in csio_wr_fl_bufsz() argument 68 return sge->sge_fl_buf_size[buf->paddr & 0xF]; in csio_wr_fl_bufsz() 75 return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; in csio_wr_qstat_pgsz() 118 struct csio_sge *sge = &wrm->sge; in csio_wr_fill_fl() local 126 buf->len = sge->sge_fl_buf_size[sreg]; in csio_wr_fill_fl() 1047 struct csio_sge *sge = &wrm->sge; in csio_wr_process_fl() local 1073 bufsz = csio_wr_fl_bufsz(sge, buf); in csio_wr_process_fl() 1094 flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align); in csio_wr_process_fl() [all …]
|
/Linux-v4.19/drivers/net/ethernet/huawei/hinic/ |
D | hinic_common.c | 64 void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) in hinic_set_sge() argument 66 sge->hi_addr = upper_32_bits(addr); in hinic_set_sge() 67 sge->lo_addr = lower_32_bits(addr); in hinic_set_sge() 68 sge->len = len; in hinic_set_sge() 77 dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) in hinic_sge_to_dma() argument 79 return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); in hinic_sge_to_dma()
|
D | hinic_rx.c | 172 struct hinic_sge sge; in rx_alloc_pkts() local 191 hinic_set_sge(&sge, dma_addr, skb->len); in rx_alloc_pkts() 200 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); in rx_alloc_pkts() 224 struct hinic_sge sge; in free_all_rx_skbs() local 231 hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); in free_all_rx_skbs() 235 rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); in free_all_rx_skbs() 265 struct hinic_sge sge; in rx_recv_jumbo_pkt() local 274 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); in rx_recv_jumbo_pkt() 276 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); in rx_recv_jumbo_pkt() 315 struct hinic_sge sge; in rxq_recv() local [all …]
|
/Linux-v4.19/drivers/infiniband/hw/qib/ |
D | qib_ruc.c | 192 struct rvt_sge *sge; in qib_ruc_loopback() local 262 sqp->s_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback() 295 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, in qib_ruc_loopback() 308 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, in qib_ruc_loopback() 316 qp->r_sge.sge = wqe->sg_list[0]; in qib_ruc_loopback() 326 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), in qib_ruc_loopback() 332 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; in qib_ruc_loopback() 334 *(u64 *) sqp->s_sge.sge.vaddr = in qib_ruc_loopback() 337 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, in qib_ruc_loopback() 339 rvt_put_mr(qp->r_sge.sge.mr); in qib_ruc_loopback() [all …]
|
D | qib_ud.c | 60 struct rvt_sge *sge; in qib_ud_loopback() local 171 ssge.sge = *swqe->sg_list; in qib_ud_loopback() 173 sge = &ssge.sge; in qib_ud_loopback() 175 u32 len = sge->length; in qib_ud_loopback() 179 if (len > sge->sge_length) in qib_ud_loopback() 180 len = sge->sge_length; in qib_ud_loopback() 182 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1); in qib_ud_loopback() 183 sge->vaddr += len; in qib_ud_loopback() 184 sge->length -= len; in qib_ud_loopback() 185 sge->sge_length -= len; in qib_ud_loopback() [all …]
|
D | qib_verbs.c | 142 struct rvt_sge *sge = &ss->sge; in qib_copy_sge() local 145 u32 len = rvt_get_sge_length(sge, length); in qib_copy_sge() 148 memcpy(sge->vaddr, data, len); in qib_copy_sge() 163 struct rvt_sge sge = ss->sge; in qib_count_sge() local 168 u32 len = sge.length; in qib_count_sge() 172 if (len > sge.sge_length) in qib_count_sge() 173 len = sge.sge_length; in qib_count_sge() 175 if (((long) sge.vaddr & (sizeof(u32) - 1)) || in qib_count_sge() 181 sge.vaddr += len; in qib_count_sge() 182 sge.length -= len; in qib_count_sge() [all …]
|
D | qib_sdma.c | 516 struct rvt_sge *sge; in qib_sdma_verbs_send() local 565 sge = &ss->sge; in qib_sdma_verbs_send() 571 if (len > sge->length) in qib_sdma_verbs_send() 572 len = sge->length; in qib_sdma_verbs_send() 573 if (len > sge->sge_length) in qib_sdma_verbs_send() 574 len = sge->sge_length; in qib_sdma_verbs_send() 577 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr, in qib_sdma_verbs_send() 596 sge->vaddr += len; in qib_sdma_verbs_send() 597 sge->length -= len; in qib_sdma_verbs_send() 598 sge->sge_length -= len; in qib_sdma_verbs_send() [all …]
|
/Linux-v4.19/drivers/infiniband/ulp/iser/ |
D | iser_memory.c | 197 reg->sge.lkey = device->pd->local_dma_lkey; in iser_reg_dma() 207 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); in iser_reg_dma() 208 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); in iser_reg_dma() 211 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_reg_dma() 212 reg->sge.addr, reg->sge.length); in iser_reg_dma() 259 reg->sge.lkey = fmr->fmr->lkey; in iser_fast_reg_fmr() 261 reg->sge.addr = page_vec->fake_mr.iova; in iser_fast_reg_fmr() 262 reg->sge.length = page_vec->fake_mr.length; in iser_fast_reg_fmr() 266 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_fast_reg_fmr() 267 reg->sge.addr, reg->sge.length); in iser_fast_reg_fmr() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.c | 140 struct sge *s = &adap->sge; in alloc_uld_rxqs() 195 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in setup_sge_queues_uld() 211 struct sge *s = &adap->sge; in setup_sge_queues_uld() 240 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_sge_queues_uld() 243 struct sge *s = &adap->sge; in free_sge_queues_uld() 268 struct sge *s = &adap->sge; in cfg_queues_uld() 336 adap->sge.uld_rxq_info[uld_type] = rxq_info; in cfg_queues_uld() 343 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in free_queues_uld() 345 adap->sge.uld_rxq_info[uld_type] = NULL; in free_queues_uld() 354 struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; in request_msix_queue_irqs_uld() [all …]
|
D | sge.c | 148 struct sge *s = &adapter->sge; in fl_mtu_bufsize() 235 const struct sge *s = &adapter->sge; in fl_starving() 319 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl() 435 struct sge *s = &adapter->sge; in get_buf_size() 572 struct sge *s = &adap->sge; in refill_fl() 581 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl() 659 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl() 660 adap->sge.starving_fl); in refill_fl() 887 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_sgl() 901 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_sgl() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/hfi1/ |
D | ruc.c | 174 struct rvt_sge *sge; in ruc_loopback() local 246 sqp->s_sge.sge = wqe->sg_list[0]; in ruc_loopback() 302 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, in ruc_loopback() 315 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, in ruc_loopback() 323 qp->r_sge.sge = wqe->sg_list[0]; in ruc_loopback() 333 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), in ruc_loopback() 339 maddr = (atomic64_t *)qp->r_sge.sge.vaddr; in ruc_loopback() 341 *(u64 *)sqp->s_sge.sge.vaddr = in ruc_loopback() 344 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr, in ruc_loopback() 346 rvt_put_mr(qp->r_sge.sge.mr); in ruc_loopback() [all …]
|
D | ud.c | 81 struct rvt_sge *sge; in ud_loopback() local 220 ssge.sge = *swqe->sg_list; in ud_loopback() 222 sge = &ssge.sge; in ud_loopback() 224 u32 len = sge->length; in ud_loopback() 228 if (len > sge->sge_length) in ud_loopback() 229 len = sge->sge_length; in ud_loopback() 231 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false); in ud_loopback() 232 sge->vaddr += len; in ud_loopback() 233 sge->length -= len; in ud_loopback() 234 sge->sge_length -= len; in ud_loopback() [all …]
|
/Linux-v4.19/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 446 struct rxe_sge *sge = &dma->sge[dma->cur_sge]; in copy_data() local 461 if (sge->length && (offset < sge->length)) { in copy_data() 462 mem = lookup_mem(pd, access, sge->lkey, lookup_local); in copy_data() 472 if (offset >= sge->length) { in copy_data() 477 sge++; in copy_data() 486 if (sge->length) { in copy_data() 487 mem = lookup_mem(pd, access, sge->lkey, in copy_data() 498 if (bytes > sge->length - offset) in copy_data() 499 bytes = sge->length - offset; in copy_data() 502 iova = sge->addr + offset; in copy_data() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 264 const struct sge *s = &adapter->sge; in fl_starving() 325 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) { in unmap_sgl() 457 const struct sge *s = &adapter->sge; in get_buf_size() 607 struct sge *s = &adapter->sge; in refill_fl() 715 set_bit(fl->cntxt_id, adapter->sge.starving_fl); in refill_fl() 930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; in write_sgl() 944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1; in write_sgl() 947 memcpy(sgl->sge, buf, part0); in write_sgl() 1198 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit() 1571 struct sge *s = &adapter->sge; in do_gro() [all …]
|
D | cxgb4vf_main.c | 328 struct sge *s = &adapter->sge; in request_msix_queue_irqs() 366 struct sge *s = &adapter->sge; in free_msix_queue_irqs() 399 struct sge *s = &adapter->sge; in enable_rx() 422 struct sge *s = &adapter->sge; in quiesce_rx() 480 struct sge *s = &adapter->sge; in fwevtq_handler() 536 struct sge *s = &adapter->sge; in setup_sge_queues() 651 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; in setup_rss() 966 static int closest_timer(const struct sge *s, int us) in closest_timer() 982 static int closest_thres(const struct sge *s, int thres) in closest_thres() 1007 ? adapter->sge.timer_val[timer_idx] in qtimer_val() [all …]
|
/Linux-v4.19/drivers/infiniband/core/ |
D | rw.c | 102 reg->sge.addr = reg->mr->iova; in rdma_rw_init_one_mr() 103 reg->sge.length = reg->mr->length; in rdma_rw_init_one_mr() 141 reg->wr.wr.sg_list = ®->sge; in rdma_rw_init_mr_wrs() 155 remote_addr += reg->sge.length; in rdma_rw_init_mr_wrs() 184 struct ib_sge *sge; in rdma_rw_init_map_wrs() local 189 ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); in rdma_rw_init_map_wrs() 208 rdma_wr->wr.sg_list = sge; in rdma_rw_init_map_wrs() 211 sge->addr = ib_sg_dma_address(dev, sg) + offset; in rdma_rw_init_map_wrs() 212 sge->length = ib_sg_dma_len(dev, sg) - offset; in rdma_rw_init_map_wrs() 213 sge->lkey = qp->pd->local_dma_lkey; in rdma_rw_init_map_wrs() [all …]
|
/Linux-v4.19/net/sunrpc/xprtrdma/ |
D | rpc_rdma.c | 508 struct ib_sge *sge; in rpcrdma_unmap_sendctx() local 515 sge = &sc->sc_sges[2]; in rpcrdma_unmap_sendctx() 516 for (count = sc->sc_unmap_count; count; ++sge, --count) in rpcrdma_unmap_sendctx() 518 sge->addr, sge->length, DMA_TO_DEVICE); in rpcrdma_unmap_sendctx() 534 struct ib_sge *sge = sc->sc_sges; in rpcrdma_prepare_hdr_sge() local 538 sge->addr = rdmab_addr(rb); in rpcrdma_prepare_hdr_sge() 539 sge->length = len; in rpcrdma_prepare_hdr_sge() 540 sge->lkey = rdmab_lkey(rb); in rpcrdma_prepare_hdr_sge() 542 ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, in rpcrdma_prepare_hdr_sge() 543 sge->length, DMA_TO_DEVICE); in rpcrdma_prepare_hdr_sge() [all …]
|
/Linux-v4.19/net/rds/ |
D | ib_recv.c | 53 struct ib_sge *sge; in rds_ib_recv_init_ring() local 63 sge = &recv->r_sge[0]; in rds_ib_recv_init_ring() 64 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); in rds_ib_recv_init_ring() 65 sge->length = sizeof(struct rds_header); in rds_ib_recv_init_ring() 66 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ring() 68 sge = &recv->r_sge[1]; in rds_ib_recv_init_ring() 69 sge->addr = 0; in rds_ib_recv_init_ring() 70 sge->length = RDS_FRAG_SIZE; in rds_ib_recv_init_ring() 71 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ring() 310 struct ib_sge *sge; in rds_ib_recv_refill_one() local [all …]
|
/Linux-v4.19/drivers/nvme/target/ |
D | rdma.c | 43 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; member 213 struct ib_sge *sge; in nvmet_rdma_free_inline_pages() local 220 sge = &c->sge[1]; in nvmet_rdma_free_inline_pages() 222 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { in nvmet_rdma_free_inline_pages() 223 if (sge->length) in nvmet_rdma_free_inline_pages() 224 ib_dma_unmap_page(ndev->device, sge->addr, in nvmet_rdma_free_inline_pages() 225 sge->length, DMA_FROM_DEVICE); in nvmet_rdma_free_inline_pages() 235 struct ib_sge *sge; in nvmet_rdma_alloc_inline_pages() local 245 sge = &c->sge[1]; in nvmet_rdma_alloc_inline_pages() 248 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { in nvmet_rdma_alloc_inline_pages() [all …]
|