Lines Matching refs:bvec

57 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
147 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument
149 return bvec->bv_len != PAGE_SIZE; in is_partial_io()
152 static inline bool is_partial_io(struct bio_vec *bvec) in is_partial_io() argument
182 static void update_position(u32 *index, int *offset, struct bio_vec *bvec) in update_position() argument
184 *index += (*offset + bvec->bv_len) / PAGE_SIZE; in update_position()
185 *offset = (*offset + bvec->bv_len) % PAGE_SIZE; in update_position()
605 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_async() argument
616 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) { in read_from_bdev_async()
686 struct bio_vec bvec; in writeback_store() local
688 bvec.bv_page = page; in writeback_store()
689 bvec.bv_len = PAGE_SIZE; in writeback_store()
690 bvec.bv_offset = 0; in writeback_store()
731 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) { in writeback_store()
743 bio_add_page(&bio, bvec.bv_page, bvec.bv_len, in writeback_store()
744 bvec.bv_offset); in writeback_store()
809 struct bio_vec bvec; member
820 read_from_bdev_async(zram, &zw->bvec, entry, bio); in zram_sync_read()
828 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_sync() argument
833 work.bvec = *bvec; in read_from_bdev_sync()
846 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, in read_from_bdev_sync() argument
854 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, in read_from_bdev() argument
859 return read_from_bdev_sync(zram, bvec, entry, parent); in read_from_bdev()
861 return read_from_bdev_async(zram, bvec, entry, parent); in read_from_bdev()
865 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec, in read_from_bdev() argument
1256 struct bio_vec bvec; in __zram_bvec_read() local
1263 bvec.bv_page = page; in __zram_bvec_read()
1264 bvec.bv_len = PAGE_SIZE; in __zram_bvec_read()
1265 bvec.bv_offset = 0; in __zram_bvec_read()
1266 return read_from_bdev(zram, &bvec, in __zram_bvec_read()
1311 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument
1317 page = bvec->bv_page; in zram_bvec_read()
1318 if (is_partial_io(bvec)) { in zram_bvec_read()
1325 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec)); in zram_bvec_read()
1329 if (is_partial_io(bvec)) { in zram_bvec_read()
1332 memcpy_to_bvec(bvec, src + offset); in zram_bvec_read()
1336 if (is_partial_io(bvec)) in zram_bvec_read()
1342 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in __zram_bvec_write() argument
1351 struct page *page = bvec->bv_page; in __zram_bvec_write()
1469 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, in zram_bvec_write() argument
1476 vec = *bvec; in zram_bvec_write()
1477 if (is_partial_io(bvec)) { in zram_bvec_write()
1492 memcpy_from_bvec(dst + offset, bvec); in zram_bvec_write()
1502 if (is_partial_io(bvec)) in zram_bvec_write()
1550 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, in zram_bvec_rw() argument
1557 ret = zram_bvec_read(zram, bvec, index, offset, bio); in zram_bvec_rw()
1558 flush_dcache_page(bvec->bv_page); in zram_bvec_rw()
1561 ret = zram_bvec_write(zram, bvec, index, offset, bio); in zram_bvec_rw()
1582 struct bio_vec bvec; in __zram_make_request() local
1601 bio_for_each_segment(bvec, bio, iter) { in __zram_make_request()
1602 struct bio_vec bv = bvec; in __zram_make_request()
1603 unsigned int unwritten = bvec.bv_len; in __zram_make_request()