/Linux-v5.10/block/ |
D | elevator.c | 54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 264 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) in elv_rb_add() 266 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) in elv_rb_add() 291 if (sector < blk_rq_pos(rq)) in elv_rb_find() 293 else if (sector > blk_rq_pos(rq)) in elv_rb_find() 377 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
|
D | blk-merge.c | 575 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { in ll_back_merge_fn() 610 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in req_attempt_discard_merge() 632 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in ll_merge_requests_fn() 712 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) in blk_try_req_merge() 896 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 898 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge() 977 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in bio_attempt_discard_merge()
|
D | bfq-iosched.c | 464 s1 = blk_rq_pos(rq1); in bfq_choose_req() 465 s2 = blk_rq_pos(rq2); in bfq_choose_req() 570 if (sector > blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup() 572 else if (sector < blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup() 636 blk_rq_pos(bfqq->next_rq), &parent, &p); in bfq_pos_tree_add_move() 914 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq() 2131 return abs(blk_rq_pos(rq) - last_pos); in get_sdist() 2268 blk_rq_pos(req) < in bfq_request_merged() 2269 blk_rq_pos(container_of(rb_prev(&req->rb_node), in bfq_request_merged() 2398 return blk_rq_pos(io_struct); in bfq_io_struct_pos() [all …]
|
D | blk-core.c | 233 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), in print_req_error() 273 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags() 1320 rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in blk_account_io_start() 1630 rq->__sector = blk_rq_pos(rq_src); in blk_rq_prep_clone()
|
/Linux-v5.10/include/linux/ |
D | blktrace_api.h | 131 if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) in blk_rq_trace_sector() 133 return blk_rq_pos(rq); in blk_rq_trace_sector()
|
D | t10-pi.h | 48 return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; in t10_pi_ref_tag()
|
D | elevator.h | 163 #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
D | blkdev.h | 983 static inline sector_t blk_rq_pos(const struct request *rq) in blk_rq_pos() function 1022 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); in blk_rq_zone_no() 1027 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); in blk_rq_zone_is_seq()
|
/Linux-v5.10/drivers/block/ |
D | z2ram.c | 73 unsigned long start = blk_rq_pos(req) << 9; in z2_queue_rq() 81 (unsigned long long)blk_rq_pos(req), in z2_queue_rq()
|
D | swim3.c | 348 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; in swim3_queue_rq() 349 x = ((long)blk_rq_pos(req)) % fs->secpercyl; in swim3_queue_rq() 632 (long)blk_rq_pos(fs->cur_req)); in xfer_timeout() 750 (long)blk_rq_pos(req), err); in swim3_interrupt()
|
D | virtio_blk.c | 138 range[0].sector = cpu_to_le64(blk_rq_pos(req)); in virtblk_setup_discard_write_zeroes() 258 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req)); in virtio_queue_rq()
|
D | xsysace.c | 670 (unsigned long long)blk_rq_pos(req), in ace_fsm_dostate() 677 ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); in ace_fsm_dostate()
|
/Linux-v5.10/drivers/s390/block/ |
D | dasd_fba.c | 347 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_discard() 349 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_discard() 464 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_regular() 466 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_regular() 502 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); in dasd_fba_build_cp_regular()
|
D | dasd_diag.c | 531 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_diag_build_cp() 533 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_diag_build_cp()
|
D | dasd_eckd.c | 3099 first_trk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_ese_format() 3102 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_format() 3182 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_ese_read() 3185 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_ese_read() 4550 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; in dasd_eckd_build_cp() 4553 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_eckd_build_cp() 4632 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; in dasd_eckd_build_cp_raw() 4633 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % in dasd_eckd_build_cp_raw() 4646 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; in dasd_eckd_build_cp_raw() 4647 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / in dasd_eckd_build_cp_raw() [all …]
|
/Linux-v5.10/drivers/md/ |
D | dm-rq.c | 134 blk_rq_pos(orig), tio->n_sectors, true, in rq_end_stats() 401 blk_rq_pos(rq)); in map_request() 451 blk_rq_pos(orig), tio->n_sectors, false, 0, in dm_start_request()
|
/Linux-v5.10/drivers/ide/ |
D | ide-lib.c | 75 (unsigned long long)blk_rq_pos(rq)); in ide_dump_ata_error()
|
/Linux-v5.10/include/scsi/ |
D | scsi_cmnd.h | 285 return blk_rq_pos(scmd->request); in scsi_get_lba()
|
/Linux-v5.10/drivers/scsi/ |
D | sd.c | 878 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_unmap_cmnd() 914 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same16_cmnd() 946 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same10_cmnd() 977 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_zeroes_cmnd() 1073 u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_write_same_cmnd() 1219 sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); in sd_setup_read_write_cmnd() 1239 if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { in sd_setup_read_write_cmnd() 1244 if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { in sd_setup_read_write_cmnd() 1311 (unsigned long long)blk_rq_pos(rq), in sd_setup_read_write_cmnd() 1993 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); in sd_completed_bytes()
|
D | sr.c | 360 blk_rq_pos(SCpnt->request)) << 9; in sr_done() 471 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || in sr_init_command() 487 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); in sr_init_command()
|
D | sd_zbc.c | 249 sector_t sector = blk_rq_pos(rq); in sd_zbc_cmnd_checks() 389 sector_t sector = blk_rq_pos(rq); in sd_zbc_setup_zone_mgmt_cmnd()
|
/Linux-v5.10/include/trace/events/ |
D | block.h | 133 __entry->sector = blk_rq_pos(rq); 636 __entry->sector = blk_rq_pos(rq);
|
/Linux-v5.10/drivers/mtd/ |
D | mtd_blkdevs.c | 72 block = blk_rq_pos(req) << 9 >> tr->blkshift; in do_blktrans_request() 81 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > in do_blktrans_request()
|
/Linux-v5.10/drivers/nvme/host/ |
D | zns.c | 250 c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); in nvme_setup_zone_mgmt_send()
|
/Linux-v5.10/drivers/mmc/core/ |
D | block.c | 1049 from = blk_rq_pos(req); in mmc_blk_issue_discard_rq() 1087 from = blk_rq_pos(req); in mmc_blk_issue_secdiscard_rq() 1166 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) in mmc_apply_rel_rw() 1266 brq->data.blk_addr = blk_rq_pos(req); in mmc_blk_data_prep() 1290 (blk_rq_pos(req) + blk_rq_sectors(req) == in mmc_blk_data_prep() 1531 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_rw_rq_prep()
|