Home
last modified time | relevance | path

Searched refs:scat (Results 1 – 11 of 11) sorted by relevance

/Linux-v4.19/net/rds/
Dpage.c68 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, in rds_page_remainder_alloc() argument
84 sg_set_page(scat, page, PAGE_SIZE, 0); in rds_page_remainder_alloc()
103 sg_set_page(scat, rem->r_page, bytes, rem->r_offset); in rds_page_remainder_alloc()
104 get_page(sg_page(scat)); in rds_page_remainder_alloc()
147 ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, in rds_page_remainder_alloc()
148 ret ? 0 : scat->length); in rds_page_remainder_alloc()
Dib_fmr.c93 struct scatterlist *scat = sg; in rds_ib_map_fmr() local
111 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); in rds_ib_map_fmr()
112 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); in rds_ib_map_fmr()
151 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); in rds_ib_map_fmr()
152 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); in rds_ib_map_fmr()
170 ibmr->sg = scat; in rds_ib_map_fmr()
Dib_send.c496 struct scatterlist *scat; in rds_ib_xmit() local
516 scat = &rm->data.op_sg[sg]; in rds_ib_xmit()
517 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); in rds_ib_xmit()
625 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; in rds_ib_xmit()
646 && scat != &rm->data.op_sg[rm->data.op_count]) { in rds_ib_xmit()
648 ib_sg_dma_len(dev, scat) - rm->data.op_dmaoff); in rds_ib_xmit()
651 send->s_sge[1].addr = ib_sg_dma_address(dev, scat); in rds_ib_xmit()
657 if (rm->data.op_dmaoff == ib_sg_dma_len(dev, scat)) { in rds_ib_xmit()
658 scat++; in rds_ib_xmit()
699 && scat != &rm->data.op_sg[rm->data.op_count]); in rds_ib_xmit()
[all …]
Drds.h862 int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dsrq.c315 struct mlx4_wqe_data_seg *scat; in mlx4_ib_post_srq_recv() local
347 scat = (struct mlx4_wqe_data_seg *) (next + 1); in mlx4_ib_post_srq_recv()
350 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx4_ib_post_srq_recv()
351 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx4_ib_post_srq_recv()
352 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx4_ib_post_srq_recv()
356 scat[i].byte_count = 0; in mlx4_ib_post_srq_recv()
357 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); in mlx4_ib_post_srq_recv()
358 scat[i].addr = 0; in mlx4_ib_post_srq_recv()
Dqp.c3773 struct mlx4_wqe_data_seg *scat; in _mlx4_ib_post_recv() local
3808 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv()
3816 scat->byte_count = in _mlx4_ib_post_recv()
3819 scat->lkey = cpu_to_be32(wr->sg_list->lkey); in _mlx4_ib_post_recv()
3820 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv()
3821 scat++; in _mlx4_ib_post_recv()
3826 __set_data_seg(scat + i, wr->sg_list + i); in _mlx4_ib_post_recv()
3829 scat[i].byte_count = 0; in _mlx4_ib_post_recv()
3830 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); in _mlx4_ib_post_recv()
3831 scat[i].addr = 0; in _mlx4_ib_post_recv()
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dsrq.c454 struct mlx5_wqe_data_seg *scat; in mlx5_ib_post_srq_recv() local
487 scat = (struct mlx5_wqe_data_seg *)(next + 1); in mlx5_ib_post_srq_recv()
490 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length); in mlx5_ib_post_srq_recv()
491 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey); in mlx5_ib_post_srq_recv()
492 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr); in mlx5_ib_post_srq_recv()
496 scat[i].byte_count = 0; in mlx5_ib_post_srq_recv()
497 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); in mlx5_ib_post_srq_recv()
498 scat[i].addr = 0; in mlx5_ib_post_srq_recv()
Dqp.c4693 struct mlx5_wqe_data_seg *scat; in _mlx5_ib_post_recv() local
4730 scat = get_recv_wqe(qp, ind); in _mlx5_ib_post_recv()
4732 scat++; in _mlx5_ib_post_recv()
4735 set_data_ptr_seg(scat + i, wr->sg_list + i); in _mlx5_ib_post_recv()
4738 scat[i].byte_count = 0; in _mlx5_ib_post_recv()
4739 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); in _mlx5_ib_post_recv()
4740 scat[i].addr = 0; in _mlx5_ib_post_recv()
4744 sig = (struct mlx5_rwqe_sig *)scat; in _mlx5_ib_post_recv()
/Linux-v4.19/drivers/infiniband/ulp/srp/
Dib_srp.c1621 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_fmr() argument
1631 for_each_sg(scat, sg, count, i) { in srp_map_sg_fmr()
1645 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_fr() argument
1652 state->sg = scat; in srp_map_sg_fr()
1673 struct srp_request *req, struct scatterlist *scat, in srp_map_sg_dma() argument
1681 for_each_sg(scat, sg, count, i) { in srp_map_sg_dma()
1747 struct scatterlist *scat, int count) in srp_check_mapping() argument
1782 struct scatterlist *scat; in srp_map_data() local
1805 scat = scsi_sglist(scmnd); in srp_map_data()
1810 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); in srp_map_data()
[all …]
/Linux-v4.19/Documentation/m68k/
Dkernel-options.txt572 Syntax: atascsi=<can_queue>[,<cmd_per_lun>[,<scat-gat>[,<host-id>[,<tagged>]]]]
606 <scat-gat>:
/Linux-v4.19/drivers/infiniband/hw/hns/
Dhns_roce_hw_v1.c361 struct hns_roce_wqe_data_seg *scat = NULL; in hns_roce_v1_post_recv() local
394 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); in hns_roce_v1_post_recv()
397 set_data_seg(scat + i, wr->sg_list + i); in hns_roce_v1_post_recv()