/Linux-v6.1/block/ |
D | blk-core.c | 491 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 515 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod() 538 if (bio_sectors(bio)) { in blk_partition_remap() 554 int nr_sectors = bio_sectors(bio); in blk_check_zone_append() 735 if (!bio_sectors(bio)) { in submit_bio_noacct() 821 count_vm_events(PGPGIN, bio_sectors(bio)); in submit_bio() 823 count_vm_events(PGPGOUT, bio_sectors(bio)); in submit_bio() 954 bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), in bio_start_io_acct_time() 967 return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), in bio_start_io_acct()
|
D | blk-merge.c | 129 if (bio_sectors(bio) <= max_discard_sectors) in bio_split_discard() 154 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) in bio_split_write_zeroes() 629 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn() 648 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn() 664 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 911 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge() 989 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
|
D | bio-integrity.c | 216 if (!bio_sectors(bio)) in bio_integrity_prep() 232 intervals = bio_integrity_intervals(bi, bio_sectors(bio)); in bio_integrity_prep() 392 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in bio_integrity_trim()
|
D | bounce.c | 228 if (sectors < bio_sectors(bio_orig)) { in __blk_queue_bounce()
|
D | blk-cgroup.h | 380 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); in blkcg_bio_issue_init()
|
D | blk-crypto-fallback.c | 223 if (num_sectors < bio_sectors(bio)) { in blk_crypto_fallback_split_bio_if_needed()
|
D | bio.c | 1580 BUG_ON(sectors >= bio_sectors(bio)); in bio_split() 1616 offset + size > bio_sectors(bio))) in bio_trim()
|
/Linux-v6.1/drivers/md/bcache/ |
D | request.c | 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in bch_data_insert_start() 222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in bch_data_insert_start() 398 bio_sectors(bio) & (c->cache->sb.block_size - 1)) { in check_should_bypass() 454 bch_rescale_priorities(c, bio_sectors(bio)); in check_should_bypass() 457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass() 525 unsigned int bio_sectors = bio_sectors(bio); in cache_lookup_fn() local 536 BUG_ON(bio_sectors <= sectors); in cache_lookup_fn() [all …]
|
D | writeback.h | 117 bio_sectors(bio))) in should_writeback()
|
/Linux-v6.1/drivers/md/ |
D | dm-zone.c | 134 return !op_is_flush(bio->bi_opf) && bio_sectors(bio); in dm_is_zone_write() 496 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in dm_need_zone_wp_tracking() 535 orig_bio_details.nr_sectors = bio_sectors(clone); in dm_zone_map_bio() 639 if (WARN_ON_ONCE(zwp_offset < bio_sectors(orig_bio))) in dm_zone_endio() 644 zwp_offset - bio_sectors(orig_bio); in dm_zone_endio()
|
D | dm.c | 509 sectors = bio_sectors(bio); in dm_io_acct() 1334 unsigned bio_sectors = bio_sectors(bio); in dm_accept_partial_bio() local 1339 BUG_ON(bio_sectors > *tio->len_ptr); in dm_accept_partial_bio() 1340 BUG_ON(n_sectors > bio_sectors); in dm_accept_partial_bio() 1342 *tio->len_ptr -= bio_sectors - n_sectors; in dm_accept_partial_bio() 1351 io->sector_offset = bio_sectors(io->orig_bio); in dm_accept_partial_bio() 1474 io->sector_offset = bio_sectors(ci->bio); in setup_split_accounting() 1732 ci->sector_count = bio_sectors(bio); in init_clone_info()
|
D | dm-log-writes.c | 669 if (!bio_sectors(bio) && !flush_bio) in log_writes_map() 703 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map() 715 if (flush_bio && !bio_sectors(bio)) { in log_writes_map()
|
D | dm-zoned.h | 46 #define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
|
D | dm-ebs-target.c | 49 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks()
|
D | raid10.c | 1234 if (max_sectors < bio_sectors(bio)) { in raid10_read_request() 1538 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request() 1701 if (bio_sectors(bio) < stripe_size*2) in raid10_handle_discard() 1719 split_size = bio_sectors(bio) - remainder; in raid10_handle_discard() 1901 int sectors = bio_sectors(bio); in raid10_make_request() 2481 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write() 2504 bio_sectors(tbio)); in sync_request_write() 2635 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write() 2641 bio_sectors(wbio2)); in recovery_request_write()
|
D | dm-crypt.c | 1144 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size) in dm_crypt_integrity_io_alloc() 1151 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); in dm_crypt_integrity_io_alloc() 2062 sector += bio_sectors(clone); in kcryptd_crypt_write_convert() 3389 if (bio_sectors(bio)) in crypt_map() 3416 unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); in crypt_map() 3421 if (bio_sectors(bio) > cc->tag_pool_max_sectors) in crypt_map()
|
D | raid1.c | 1197 r1_bio->sectors = bio_sectors(bio); in init_r1bio() 1301 if (max_sectors < bio_sectors(bio)) { in raid1_read_request() 1501 if (max_sectors < bio_sectors(bio)) { in raid1_write_request() 1614 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request() 2263 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
|
D | dm-integrity.c | 1624 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight() 1872 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; in dm_integrity_map() 1894 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_map() 1896 dio->range.logical_sector, bio_sectors(bio), in dm_integrity_map() 1900 …if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1… in dm_integrity_map() 1903 dio->range.logical_sector, bio_sectors(bio)); in dm_integrity_map() 1922 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map() 2130 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue()
|
D | raid0.c | 547 if (sectors < bio_sectors(bio)) { in raid0_make_request()
|
/Linux-v6.1/include/trace/events/ |
D | block.h | 273 __entry->nr_sector = bio_sectors(bio); 301 __entry->nr_sector = bio_sectors(bio); 497 __entry->nr_sector = bio_sectors(bio);
|
/Linux-v6.1/fs/btrfs/ |
D | raid56.h | 119 struct sector_ptr *bio_sectors; member
|
D | raid56.c | 149 if (!rbio->bio_sectors[i].page) in cache_rbio_pages() 155 rbio->bio_sectors[i].page, in cache_rbio_pages() 156 rbio->bio_sectors[i].pgoff, in cache_rbio_pages() 895 sector = &rbio->bio_sectors[index]; in sector_in_rbio() 934 sizeof(*rbio->bio_sectors) * num_sectors + in alloc_rbio() 969 CONSUME_ALLOC(rbio->bio_sectors, num_sectors); in alloc_rbio() 1105 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio() 2329 rbio->bio_sectors[index].page = page; in raid56_add_scrub_pages() 2330 rbio->bio_sectors[index].pgoff = pgoff; in raid56_add_scrub_pages()
|
/Linux-v6.1/include/linux/ |
D | bio.h | 40 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) macro 392 if (sectors >= bio_sectors(bio)) in bio_next_split()
|
/Linux-v6.1/drivers/nvme/target/ |
D | io-cmd-bdev.c | 209 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
|
/Linux-v6.1/fs/ext4/ |
D | page-io.c | 332 (unsigned) bio_sectors(bio), in ext4_end_bio()
|