Home
last modified time | relevance | path

Searched refs:nr_segs (Results 1 – 25 of 39) sorted by relevance

12

/Linux-v5.4/include/linux/
Duio.h46 unsigned long nr_segs; member
96 static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs) in iov_length() argument
101 for (seg = 0; seg < nr_segs; seg++) in iov_length()
217 unsigned long nr_segs, size_t count);
219 unsigned long nr_segs, size_t count);
221 unsigned long nr_segs, size_t count);
271 unsigned nr_segs, unsigned fast_segs,
277 unsigned nr_segs, unsigned fast_segs,
Dblk-mq.h316 struct bio *bio, unsigned int nr_segs);
Dcompat.h487 unsigned long nr_segs,
642 unsigned int nr_segs, unsigned int flags);
/Linux-v5.4/block/
Dblk-mq-sched.c227 unsigned int nr_segs, struct request **merged_request) in blk_mq_sched_try_merge() argument
235 if (!bio_attempt_back_merge(rq, bio, nr_segs)) in blk_mq_sched_try_merge()
244 if (!bio_attempt_front_merge(rq, bio, nr_segs)) in blk_mq_sched_try_merge()
263 struct bio *bio, unsigned int nr_segs) in blk_mq_bio_list_merge() argument
281 nr_segs); in blk_mq_bio_list_merge()
286 nr_segs); in blk_mq_bio_list_merge()
310 unsigned int nr_segs) in blk_mq_attempt_merge() argument
316 if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { in blk_mq_attempt_merge()
325 unsigned int nr_segs) in __blk_mq_sched_bio_merge() argument
334 return e->type->ops.bio_merge(hctx, bio, nr_segs); in __blk_mq_sched_bio_merge()
[all …]
Dblk.h110 unsigned int nr_segs) in blk_rq_bio_prep() argument
112 rq->nr_phys_segments = nr_segs; in blk_rq_bio_prep()
175 unsigned int nr_segs);
177 unsigned int nr_segs);
181 unsigned int nr_segs, struct request **same_queue_rq);
225 unsigned int *nr_segs);
227 unsigned int nr_segs);
229 unsigned int nr_segs);
Dblk-mq-sched.h15 unsigned int nr_segs, struct request **merged_request);
17 unsigned int nr_segs);
36 unsigned int nr_segs) in blk_mq_sched_bio_merge() argument
41 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
Dblk-merge.c294 unsigned int *nr_segs) in __blk_queue_split() argument
301 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
305 nr_segs); in __blk_queue_split()
309 nr_segs); in __blk_queue_split()
312 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
350 unsigned int nr_segs; in blk_queue_split() local
352 __blk_queue_split(q, bio, &nr_segs); in blk_queue_split()
572 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument
585 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn()
588 int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_front_merge_fn() argument
[all …]
Dblk-map.c23 unsigned int nr_segs = 0; in blk_rq_append_bio() local
28 nr_segs++; in blk_rq_append_bio()
31 blk_rq_bio_prep(rq, *bio, nr_segs); in blk_rq_append_bio()
33 if (!ll_back_merge_fn(rq, *bio, nr_segs)) { in blk_rq_append_bio()
Dblk-core.c600 unsigned int nr_segs) in bio_attempt_back_merge() argument
604 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
622 unsigned int nr_segs) in bio_attempt_front_merge() argument
626 if (!ll_front_merge_fn(req, bio, nr_segs)) in bio_attempt_front_merge()
693 unsigned int nr_segs, struct request **same_queue_rq) in blk_attempt_plug_merge() argument
722 merged = bio_attempt_back_merge(rq, bio, nr_segs); in blk_attempt_plug_merge()
725 merged = bio_attempt_front_merge(rq, bio, nr_segs); in blk_attempt_plug_merge()
Dblk-mq.c1778 unsigned int nr_segs) in blk_mq_bio_to_request() argument
1785 blk_rq_bio_prep(rq, bio, nr_segs); in blk_mq_bio_to_request()
1955 unsigned int nr_segs; in blk_mq_make_request() local
1959 __blk_queue_split(q, &bio, &nr_segs); in blk_mq_make_request()
1965 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) in blk_mq_make_request()
1968 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) in blk_mq_make_request()
1988 blk_mq_bio_to_request(rq, bio, nr_segs); in blk_mq_make_request()
Dmq-deadline.c463 unsigned int nr_segs) in dd_bio_merge() argument
471 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); in dd_bio_merge()
Dkyber-iosched.c566 unsigned int nr_segs) in kyber_bio_merge() argument
576 merged = blk_mq_bio_list_merge(hctx->queue, rq_list, bio, nr_segs); in kyber_bio_merge()
Dbio.c1094 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data()
1097 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
1100 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data()
/Linux-v5.4/lib/
Diov_iter.c108 i->nr_segs -= i->bvec - bvec; \
118 i->nr_segs -= kvec - i->kvec; \
130 i->nr_segs -= iov - i->iov; \
234 i->nr_segs -= iov - i->iov; in copy_page_to_iter_iovec()
318 i->nr_segs -= iov - i->iov; in copy_page_from_iter_iovec()
437 const struct iovec *iov, unsigned long nr_segs, in iov_iter_init() argument
451 i->nr_segs = nr_segs; in iov_iter_init()
1089 i->nr_segs++; in iov_iter_revert()
1101 i->nr_segs++; in iov_iter_revert()
1120 if (i->nr_segs == 1) in iov_iter_single_seg_count()
[all …]
/Linux-v5.4/drivers/scsi/
Dxen-scsifront.c1058 unsigned int sg_grant, nr_segs; in scsifront_read_backend_params() local
1062 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); in scsifront_read_backend_params()
1063 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); in scsifront_read_backend_params()
1064 nr_segs = min_t(unsigned int, nr_segs, in scsifront_read_backend_params()
1069 dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); in scsifront_read_backend_params()
1070 else if (info->pause && nr_segs < host->sg_tablesize) in scsifront_read_backend_params()
1073 host->sg_tablesize, nr_segs); in scsifront_read_backend_params()
1075 host->sg_tablesize = nr_segs; in scsifront_read_backend_params()
1076 host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; in scsifront_read_backend_params()
/Linux-v5.4/drivers/hwtracing/intel_th/
Dmsu.c73 unsigned int nr_segs; member
326 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_total_sz()
421 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_win_oldest_sg()
660 for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) { in msc_buffer_clear_hw_header()
989 unsigned int nr_segs) in __msc_buffer_win_alloc() argument
995 ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL); in __msc_buffer_win_alloc()
999 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in __msc_buffer_win_alloc()
1009 return nr_segs; in __msc_buffer_win_alloc()
1022 static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) in msc_buffer_set_uc() argument
1027 for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) { in msc_buffer_set_uc()
[all …]
/Linux-v5.4/arch/powerpc/mm/
Ddma-noncoherent.c65 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; in __dma_sync_page_highmem() local
84 } while (seg_nr < nr_segs); in __dma_sync_page_highmem()
/Linux-v5.4/fs/
Dread_write.c765 unsigned long nr_segs, unsigned long fast_segs, in rw_copy_check_uvector() argument
778 if (nr_segs == 0) { in rw_copy_check_uvector()
787 if (nr_segs > UIO_MAXIOV) { in rw_copy_check_uvector()
791 if (nr_segs > fast_segs) { in rw_copy_check_uvector()
792 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); in rw_copy_check_uvector()
798 if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) { in rw_copy_check_uvector()
813 for (seg = 0; seg < nr_segs; seg++) { in rw_copy_check_uvector()
841 const struct compat_iovec __user *uvector, unsigned long nr_segs, in compat_rw_copy_check_uvector() argument
855 if (nr_segs == 0) in compat_rw_copy_check_uvector()
859 if (nr_segs > UIO_MAXIOV) in compat_rw_copy_check_uvector()
[all …]
Dsplice.c1354 unsigned long, nr_segs, unsigned int, flags) in SYSCALL_DEFINE4() argument
1368 error = import_iovec(type, uiov, nr_segs, in SYSCALL_DEFINE4()
1380 unsigned int, nr_segs, unsigned int, flags) in COMPAT_SYSCALL_DEFINE4() argument
1394 error = compat_import_iovec(type, iov32, nr_segs, in COMPAT_SYSCALL_DEFINE4()
/Linux-v5.4/fs/fuse/
Ddev.c644 unsigned long nr_segs; member
700 BUG_ON(!cs->nr_segs); in fuse_copy_fill()
706 cs->nr_segs--; in fuse_copy_fill()
708 if (cs->nr_segs == cs->pipe->buffers) in fuse_copy_fill()
724 cs->nr_segs++; in fuse_copy_fill()
799 BUG_ON(!cs->nr_segs); in fuse_try_move_page()
803 cs->nr_segs--; in fuse_try_move_page()
884 if (cs->nr_segs == cs->pipe->buffers) in fuse_ref_page()
900 cs->nr_segs++; in fuse_ref_page()
1358 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { in fuse_dev_splice_read()
[all …]
/Linux-v5.4/drivers/nvme/target/
Dio-cmd-file.c91 unsigned long nr_segs, size_t count, int ki_flags) in nvmet_file_submit_bvec() argument
108 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec()
/Linux-v5.4/drivers/block/xen-blkback/
Dblkback.c759 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs, in xen_blkbk_unmap_and_respond()
947 pending_req->nr_segs, in xen_blkbk_map_seg()
963 nseg = pending_req->nr_segs; in xen_blkbk_parse_indirect()
1293 pending_req->nr_segs = nseg; in dispatch_rw_block_io()
1416 pending_req->nr_segs); in dispatch_rw_block_io()
Dcommon.h345 int nr_segs; member
/Linux-v5.4/mm/
Dswap.c151 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, in get_kernel_pages() argument
156 for (seg = 0; seg < nr_segs; seg++) { in get_kernel_pages()
/Linux-v5.4/sound/core/
Dpcm_native.c3108 if (to->nr_segs > 1024 || to->nr_segs != runtime->channels) in snd_pcm_readv()
3113 bufs = kmalloc_array(to->nr_segs, sizeof(void *), GFP_KERNEL); in snd_pcm_readv()
3116 for (i = 0; i < to->nr_segs; ++i) in snd_pcm_readv()
3144 if (from->nr_segs > 128 || from->nr_segs != runtime->channels || in snd_pcm_writev()
3148 bufs = kmalloc_array(from->nr_segs, sizeof(void *), GFP_KERNEL); in snd_pcm_writev()
3151 for (i = 0; i < from->nr_segs; ++i) in snd_pcm_writev()

12