/Linux-v6.1/block/ |
D | blk-core.c | 323 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter() 491 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 522 current->comm, bio->bi_bdev, bio->bi_opf, in bio_check_eod() 577 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append() 709 bio->bi_opf |= REQ_NOWAIT; in submit_bio_noacct() 715 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) in submit_bio_noacct() 732 if (op_is_flush(bio->bi_opf) && in submit_bio_noacct() 734 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_noacct() 1185 sizeof_field(struct bio, bi_opf)); in blk_dev_init()
|
D | blk-merge.c | 356 split->bi_opf |= REQ_NOMERGE; in __bio_split_to_limits() 731 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge() 732 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge() 733 bio->bi_opf |= ff; in blk_rq_set_mixed_merge() 935 const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge() 959 const blk_opf_t ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
|
D | fops.c | 95 bio.bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_simple() 232 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO() 347 bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; in __blkdev_direct_IO_async() 352 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_async()
|
D | blk-cgroup.h | 224 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0; in bio_issue_as_root_blkg() 372 if (bio->bi_opf & REQ_CGROUP_PUNT) in blkcg_punt_bio_submit()
|
D | bio.c | 246 bio->bi_opf = opf; in bio_init() 303 bio->bi_opf = opf; in bio_reset() 744 if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) { in bio_put() 798 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone() 829 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); in bio_init_clone() 1319 bio->bi_opf |= REQ_SYNC; in submit_bio_wait()
|
D | blk-mq-sched.h | 40 return !(bio->bi_opf & REQ_NOMERGE_FLAGS); in bio_mergeable()
|
D | bio-integrity.c | 85 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc() 111 bio->bi_opf &= ~REQ_INTEGRITY; in bio_integrity_free()
|
D | blk-map.c | 236 if (bio->bi_opf & REQ_ALLOC_CACHE) { in blk_mq_map_bio_put() 795 bio->bi_opf &= ~REQ_OP_MASK; in blk_rq_map_kern() 796 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
|
/Linux-v6.1/drivers/md/bcache/ |
D | writeback.h | 123 return (op_is_sync(bio->bi_opf) || in should_writeback() 124 bio->bi_opf & (REQ_META|REQ_PRIO) || in should_writeback()
|
D | request.c | 202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); in bch_data_insert_start() 391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { in check_should_bypass() 392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && in check_should_bypass() 653 bio->bi_opf & REQ_PREFLUSH)) { in backing_request_endio() 744 s->iop.flush_journal = op_is_flush(bio->bi_opf); in search_alloc() 1019 if (bio->bi_opf & REQ_PREFLUSH) { in cached_dev_write()
|
/Linux-v6.1/include/trace/events/ |
D | block.h | 275 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 302 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 459 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 500 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
D | bcache.h | 31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 241 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
/Linux-v6.1/drivers/md/ |
D | md-multipath.c | 86 else if (!(bio->bi_opf & REQ_RAHEAD)) { in multipath_end_request() 106 if (unlikely(bio->bi_opf & REQ_PREFLUSH) in multipath_make_request() 126 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; in multipath_make_request() 319 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; in multipathd()
|
D | dm-zone.c | 134 return !op_is_flush(bio->bi_opf) && bio_sectors(bio); in dm_is_zone_write() 402 clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE | in dm_zone_map_bio_begin() 403 (clone->bi_opf & (~REQ_OP_MASK)); in dm_zone_map_bio_begin() 496 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in dm_need_zone_wp_tracking()
|
D | dm-raid1.c | 263 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, in mirror_flush() 537 .bi_opf = REQ_OP_READ, in read_async_bio() 649 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); in do_write() 651 .bi_opf = REQ_OP_WRITE | op_flags, in do_write() 660 io_req.bi_opf = REQ_OP_DISCARD | op_flags; in do_write() 698 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes() 1208 if (bio->bi_opf & REQ_RAHEAD) in mirror_map() 1245 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io() 1254 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
|
D | dm.c | 491 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); in bio_is_flush_with_data() 625 clone->bi_opf &= ~REQ_DM_POLL_LIST; in alloc_tio() 912 (bio->bi_opf & REQ_POLLED)); in dm_handle_requeue() 919 if (bio->bi_opf & REQ_POLLED) { in dm_handle_requeue() 993 bio->bi_opf &= ~REQ_PREFLUSH; in __dm_io_complete() 1104 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); in swap_bios_limit() 1662 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) { in dm_queue_poll_io() 1663 bio->bi_opf |= REQ_DM_POLL_LIST; in dm_queue_poll_io() 1699 if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && in __split_and_process_bio() 1710 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); in __split_and_process_bio() [all …]
|
D | dm-zero.c | 40 if (bio->bi_opf & REQ_RAHEAD) { in zero_map()
|
D | dm-io.c | 493 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) { in dp_init() 523 io_req->bi_opf, &dp, sync_error_bits); in dm_io() 526 io_req->bi_opf, &dp, io_req->notify.fn, in dm_io()
|
D | raid1.c | 471 (bio->bi_opf & MD_FAILFAST) && in raid1_end_write_request() 1224 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; in raid1_read_request() 1254 bio->bi_opf & REQ_NOWAIT)) { in raid1_read_request() 1327 read_bio->bi_opf |= MD_FAILFAST; in raid1_read_request() 1357 if (bio->bi_opf & REQ_NOWAIT) { in raid1_write_request() 1379 bio->bi_opf & REQ_NOWAIT)) { in raid1_write_request() 1482 if (bio->bi_opf & REQ_NOWAIT) { in raid1_write_request() 1562 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); in raid1_write_request() 1566 mbio->bi_opf |= MD_FAILFAST; in raid1_write_request() 1602 if (unlikely(bio->bi_opf & REQ_PREFLUSH) in raid1_make_request() [all …]
|
D | dm-flakey.c | 19 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) 312 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_data()
|
/Linux-v6.1/include/linux/ |
D | blk_types.h | 255 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits member 472 return bio->bi_opf & REQ_OP_MASK; in bio_op() 479 bio->bi_opf = op | op_flags; in bio_set_op_attrs()
|
D | dm-io.h | 61 blk_opf_t bi_opf; /* Request type and flags */ member
|
D | bio.h | 346 if (bio->bi_opf & REQ_INTEGRITY) in bio_integrity() 780 bio->bi_opf |= REQ_POLLED; in bio_set_polled() 782 bio->bi_opf |= REQ_NOWAIT; in bio_set_polled() 788 bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE); in bio_clear_polled()
|
/Linux-v6.1/fs/xfs/ |
D | xfs_bio_io.c | 42 prev->bi_opf, GFP_KERNEL); in xfs_rw_bdev()
|
/Linux-v6.1/Documentation/admin-guide/device-mapper/ |
D | dm-flakey.rst | 63 Perform the replacement only if bio->bi_opf has all the
|