Home
last modified time | relevance | path

Searched refs:bv (Results 1 – 25 of 85) sorted by relevance

1234

/Linux-v5.10/drivers/gpu/drm/i915/gt/
Dgen7_renderclear.c50 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) in batch_get_defaults() argument
53 bv->max_primitives = 280; in batch_get_defaults()
54 bv->max_urb_entries = MAX_URB_ENTRIES; in batch_get_defaults()
55 bv->surface_height = 16 * 16; in batch_get_defaults()
56 bv->surface_width = 32 * 2 * 16; in batch_get_defaults()
58 bv->max_primitives = 128; in batch_get_defaults()
59 bv->max_urb_entries = MAX_URB_ENTRIES / 2; in batch_get_defaults()
60 bv->surface_height = 16 * 8; in batch_get_defaults()
61 bv->surface_width = 32 * 16; in batch_get_defaults()
63 bv->cmd_size = bv->max_primitives * 4096; in batch_get_defaults()
[all …]
/Linux-v5.10/include/linux/
Dbvec.h49 struct bio_vec bv; member
100 static inline bool bvec_iter_advance(const struct bio_vec *bv, in bvec_iter_advance() argument
114 while (bytes && bytes >= bv[idx].bv_len) { in bvec_iter_advance()
115 bytes -= bv[idx].bv_len; in bvec_iter_advance()
151 return &iter_all->bv; in bvec_init_iter_all()
157 struct bio_vec *bv = &iter_all->bv; in bvec_advance() local
160 bv->bv_page++; in bvec_advance()
161 bv->bv_offset = 0; in bvec_advance()
163 bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); in bvec_advance()
164 bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; in bvec_advance()
[all …]
Dbio.h183 struct bio_vec bv; in bio_segments() local
202 bio_for_each_segment(bv, bio, iter) in bio_segments()
253 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_first_bvec() argument
255 *bv = bio_iovec(bio); in bio_get_first_bvec()
258 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) in bio_get_last_bvec() argument
264 *bv = bio_iovec(bio); in bio_get_last_bvec()
275 *bv = bio->bi_io_vec[idx]; in bio_get_last_bvec()
282 bv->bv_len = iter.bi_bvec_done; in bio_get_last_bvec()
/Linux-v5.10/drivers/md/bcache/
Dutil.c234 struct bio_vec *bv = bio->bi_io_vec; in bch_bio_map() local
239 bv->bv_offset = base ? offset_in_page(base) : 0; in bch_bio_map()
242 for (; size; bio->bi_vcnt++, bv++) { in bch_bio_map()
243 bv->bv_offset = 0; in bch_bio_map()
244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
247 bv->bv_page = is_vmalloc_addr(base) in bch_bio_map()
251 base += bv->bv_len; in bch_bio_map()
254 size -= bv->bv_len; in bch_bio_map()
271 struct bio_vec *bv; in bch_bio_alloc_pages() local
277 for (i = 0, bv = bio->bi_io_vec; i < bio->bi_vcnt; bv++, i++) { in bch_bio_alloc_pages()
[all …]
Ddebug.c111 struct bio_vec bv, cbv; in bch_data_verify() local
129 bio_for_each_segment(bv, bio, iter) { in bch_data_verify()
130 void *p1 = kmap_atomic(bv.bv_page); in bch_data_verify()
136 cache_set_err_on(memcmp(p1 + bv.bv_offset, in bch_data_verify()
137 p2 + bv.bv_offset, in bch_data_verify()
138 bv.bv_len), in bch_data_verify()
145 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
/Linux-v5.10/block/
Dbio.c154 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) in bvec_free() argument
163 mempool_free(bv, pool); in bvec_free()
167 kmem_cache_free(bvs->slab, bv); in bvec_free()
535 struct bio_vec bv; in zero_fill_bio_iter() local
538 __bio_for_each_segment(bv, bio, iter, start) { in zero_fill_bio_iter()
539 char *data = bvec_kmap_irq(&bv, &flags); in zero_fill_bio_iter()
540 memset(data, 0, bv.bv_len); in zero_fill_bio_iter()
541 flush_dcache_page(bv.bv_page); in zero_fill_bio_iter()
559 struct bio_vec bv; in bio_truncate() local
570 bio_for_each_segment(bv, bio, iter) { in bio_truncate()
[all …]
Dblk-merge.c198 const struct bio_vec *bv, unsigned *nsegs, in bvec_split_segs() argument
203 unsigned len = min(bv->bv_len, max_len); in bvec_split_segs()
208 seg_size = get_max_segment_size(q, bv->bv_page, in bvec_split_segs()
209 bv->bv_offset + total_len); in bvec_split_segs()
216 if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) in bvec_split_segs()
223 return len > 0 || bv->bv_len > max_len; in bvec_split_segs()
250 struct bio_vec bv, bvprv, *bvprvp = NULL; in blk_bio_segment_split() local
256 bio_for_each_bvec(bv, bio, iter) { in blk_bio_segment_split()
261 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) in blk_bio_segment_split()
265 sectors + (bv.bv_len >> 9) <= max_sectors && in blk_bio_segment_split()
[all …]
Dblk-crypto-fallback.c164 struct bio_vec bv; in blk_crypto_clone_bio() local
177 bio_for_each_segment(bv, bio_src, iter) in blk_crypto_clone_bio()
178 bio->bi_io_vec[bio->bi_vcnt++] = bv; in blk_crypto_clone_bio()
214 struct bio_vec bv; in blk_crypto_split_bio_if_needed() local
217 bio_for_each_segment(bv, bio, iter) { in blk_crypto_split_bio_if_needed()
218 num_sectors += bv.bv_len >> SECTOR_SHIFT; in blk_crypto_split_bio_if_needed()
385 struct bio_vec bv; in blk_crypto_fallback_decrypt_bio() local
413 __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) { in blk_crypto_fallback_decrypt_bio()
414 struct page *page = bv.bv_page; in blk_crypto_fallback_decrypt_bio()
416 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
[all …]
Dblk-crypto.c209 struct bio_vec bv; in bio_crypt_check_alignment() local
211 bio_for_each_segment(bv, bio, iter) { in bio_crypt_check_alignment()
212 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in bio_crypt_check_alignment()
Dbio-integrity.c168 struct bio_vec bv; in bio_integrity_process() local
179 __bio_for_each_segment(bv, bio, bviter, *proc_iter) { in bio_integrity_process()
180 void *kaddr = kmap_atomic(bv.bv_page); in bio_integrity_process()
182 iter.data_buf = kaddr + bv.bv_offset; in bio_integrity_process()
183 iter.data_size = bv.bv_len; in bio_integrity_process()
/Linux-v5.10/arch/parisc/kernel/
Dentry.S170 bv,n 0(%r3)
806 bv %r0(%r2)
815 bv %r0(%r2)
975 bv %r0(%r20)
1782 bv %r0(%r2)
1979 bv %r0(%r19) /* jumps to schedule() */
2010 bv %r0(%rp)
2078 bv,n (%r1)
2174 bv,n (%r1)
2205 bv %r0(%ret0)
[all …]
Dreal2.S92 bv 0(%r31)
114 bv 0(%rp)
134 bv 0(%r2)
149 bv 0(%r2)
190 bv 0(%r2)
227 bv 0(%r2)
274 bv 0(%r31)
287 bv 0(%rp)
302 bv %r0(%r2)
Dhpmc.S156 bv (r3) /* call pdce_proc */
171 bv (%r3) /* call pdce_proc */
201 bv (%r3) /* call pdce_proc */
223 bv (%r5)
271 bv (%r3) /* call pdce_proc */
Dpacache.S175 2: bv %r0(%r2)
184 bv,n %r0(%r2)
245 bv %r0(%r2)
307 bv %r0(%r2)
370 bv %r0(%r2)
481 bv %r0(%r2)
682 bv %r0(%r2)
760 bv %r0(%r2)
819 bv %r0(%r2)
878 bv %r0(%r2)
[all …]
/Linux-v5.10/drivers/net/ethernet/netronome/nfp/bpf/
Dmain.c66 struct nfp_bpf_vnic *bv; in nfp_bpf_vnic_alloc() local
79 bv = kzalloc(sizeof(*bv), GFP_KERNEL); in nfp_bpf_vnic_alloc()
80 if (!bv) in nfp_bpf_vnic_alloc()
82 nn->app_priv = bv; in nfp_bpf_vnic_alloc()
88 bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); in nfp_bpf_vnic_alloc()
89 bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); in nfp_bpf_vnic_alloc()
99 struct nfp_bpf_vnic *bv = nn->app_priv; in nfp_bpf_vnic_free() local
101 WARN_ON(bv->tc_prog); in nfp_bpf_vnic_free()
102 kfree(bv); in nfp_bpf_vnic_free()
111 struct nfp_bpf_vnic *bv; in nfp_bpf_setup_tc_block_cb() local
[all …]
/Linux-v5.10/drivers/s390/block/
Ddasd_fba.c449 struct bio_vec bv; in dasd_fba_build_cp_regular() local
470 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular()
471 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular()
474 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular()
475 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular()
476 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular()
512 rq_for_each_segment(bv, req, iter) { in dasd_fba_build_cp_regular()
513 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_fba_build_cp_regular()
518 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular()
520 dst = copy + bv.bv_offset; in dasd_fba_build_cp_regular()
[all …]
Ddasd_diag.c516 struct bio_vec bv; in dasd_diag_build_cp() local
536 rq_for_each_segment(bv, req, iter) { in dasd_diag_build_cp()
537 if (bv.bv_len & (blksize - 1)) in dasd_diag_build_cp()
540 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_diag_build_cp()
555 rq_for_each_segment(bv, req, iter) { in dasd_diag_build_cp()
556 dst = page_address(bv.bv_page) + bv.bv_offset; in dasd_diag_build_cp()
557 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_diag_build_cp()
/Linux-v5.10/fs/crypto/
Dbio.c31 struct bio_vec *bv; in fscrypt_decrypt_bio() local
34 bio_for_each_segment_all(bv, bio, iter_all) { in fscrypt_decrypt_bio()
35 struct page *page = bv->bv_page; in fscrypt_decrypt_bio()
36 int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len, in fscrypt_decrypt_bio()
37 bv->bv_offset); in fscrypt_decrypt_bio()
/Linux-v5.10/arch/parisc/lib/
Dstring.S45 bv r0(rp)
48 bv,n r0(rp)
64 2: bv,n r0(rp)
82 2: bv,n r0(rp)
100 bv,n r0(rp)
133 4: bv,n r0(rp)
/Linux-v5.10/fs/cifs/
Dmisc.c815 if (ctx->bv) { in cifs_aio_ctx_release()
820 set_page_dirty(ctx->bv[i].bv_page); in cifs_aio_ctx_release()
821 put_page(ctx->bv[i].bv_page); in cifs_aio_ctx_release()
823 kvfree(ctx->bv); in cifs_aio_ctx_release()
844 struct bio_vec *bv = NULL; in setup_aio_ctx_iter() local
853 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT) in setup_aio_ctx_iter()
854 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL); in setup_aio_ctx_iter()
856 if (!bv) { in setup_aio_ctx_iter()
857 bv = vmalloc(array_size(max_pages, sizeof(*bv))); in setup_aio_ctx_iter()
858 if (!bv) in setup_aio_ctx_iter()
[all …]
/Linux-v5.10/drivers/md/
Ddm-ebs-target.c64 static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bvec_iter *iter) in __ebs_rw_bvec() argument
69 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec()
74 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec()
77 pa = page_address(bv->bv_page) + bv->bv_offset; in __ebs_rw_bvec()
100 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec()
102 flush_dcache_page(bv->bv_page); in __ebs_rw_bvec()
123 struct bio_vec bv; in __ebs_rw_bio() local
126 bio_for_each_bvec(bv, bio, iter) { in __ebs_rw_bio()
127 rr = __ebs_rw_bvec(ec, rw, &bv, &iter); in __ebs_rw_bio()
/Linux-v5.10/fs/orangefs/
Dinode.c24 struct bio_vec bv; in orangefs_writepage_locked() local
51 bv.bv_page = page; in orangefs_writepage_locked()
52 bv.bv_len = wlen; in orangefs_writepage_locked()
53 bv.bv_offset = off % PAGE_SIZE; in orangefs_writepage_locked()
55 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen); in orangefs_writepage_locked()
86 struct bio_vec *bv; member
104 ow->bv[i].bv_page = ow->pages[i]; in orangefs_writepages_work()
105 ow->bv[i].bv_len = min(page_offset(ow->pages[i]) + PAGE_SIZE, in orangefs_writepages_work()
109 ow->bv[i].bv_offset = ow->off - in orangefs_writepages_work()
112 ow->bv[i].bv_offset = 0; in orangefs_writepages_work()
[all …]
/Linux-v5.10/fs/verity/
Dverify.c229 struct bio_vec *bv; in fsverity_verify_bio() local
246 bio_for_each_segment_all(bv, bio, iter_all) in fsverity_verify_bio()
251 bio_for_each_segment_all(bv, bio, iter_all) { in fsverity_verify_bio()
252 struct page *page = bv->bv_page; in fsverity_verify_bio()
/Linux-v5.10/drivers/nvdimm/
Dblk.c81 struct bio_vec bv; in nd_blk_rw_integrity() local
84 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); in nd_blk_rw_integrity()
91 cur_len = min(len, bv.bv_len); in nd_blk_rw_integrity()
92 iobuf = kmap_atomic(bv.bv_page); in nd_blk_rw_integrity()
93 err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset, in nd_blk_rw_integrity()
/Linux-v5.10/arch/parisc/boot/compressed/
Dhead.S71 bv,n 0(%r3)
84 bv,n 0(%ret0)

1234