/Linux-v4.19/block/ |
D | elevator.c | 52 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 319 if (blk_rq_pos(rq) < blk_rq_pos(__rq)) in elv_rb_add() 321 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) in elv_rb_add() 346 if (sector < blk_rq_pos(rq)) in elv_rb_find() 348 else if (sector > blk_rq_pos(rq)) in elv_rb_find() 385 if (blk_rq_pos(rq) >= boundary) { in elv_dispatch_sort() 386 if (blk_rq_pos(pos) < boundary) in elv_dispatch_sort() 389 if (blk_rq_pos(pos) >= boundary) in elv_dispatch_sort() 392 if (blk_rq_pos(rq) >= blk_rq_pos(pos)) in elv_dispatch_sort() 496 __rq = elv_rqhash_find(q, blk_rq_pos(rq)); in elv_attempt_insert_merge()
|
D | blk-merge.c | 518 blk_rq_get_max_sectors(req, blk_rq_pos(req))) { in ll_back_merge_fn() 571 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in req_attempt_discard_merge() 602 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in ll_merge_requests_fn() 691 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) in attempt_merge() 848 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge() 850 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
|
D | bfq-iosched.c | 459 s1 = blk_rq_pos(rq1); in bfq_choose_req() 460 s2 = blk_rq_pos(rq2); in bfq_choose_req() 565 if (sector > blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup() 567 else if (sector < blk_rq_pos(bfqq->next_rq)) in bfq_rq_pos_tree_lookup() 618 blk_rq_pos(bfqq->next_rq), &parent, &p); in bfq_pos_tree_add_move() 860 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq() 1751 return abs(blk_rq_pos(rq) - last_pos); in get_sdist() 1885 blk_rq_pos(req) < in bfq_request_merged() 1886 blk_rq_pos(container_of(rb_prev(&req->rb_node), in bfq_request_merged() 2002 return blk_rq_pos(io_struct); in bfq_io_struct_pos() [all …]
|
D | cfq-iosched.c | 1107 s1 = blk_rq_pos(rq1); in cfq_choose_req() 1108 s2 = blk_rq_pos(rq2); in cfq_choose_req() 1221 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); in cfq_find_next_rq() 2297 if (sector > blk_rq_pos(cfqq->next_rq)) in cfq_prio_tree_lookup() 2299 else if (sector < blk_rq_pos(cfqq->next_rq)) in cfq_prio_tree_lookup() 2330 blk_rq_pos(cfqq->next_rq), &parent, &p); in cfq_prio_tree_add() 2484 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); in cfq_activate_request() 2760 if (blk_rq_pos(rq) >= cfqd->last_position) in cfq_dist_from_last() 2761 return blk_rq_pos(rq) - cfqd->last_position; in cfq_dist_from_last() 2763 return cfqd->last_position - blk_rq_pos(rq); in cfq_dist_from_last() [all …]
|
D | blk-core.c | 261 (unsigned long long)blk_rq_pos(req)); in print_req_error() 287 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags() 1852 blk_rq_get_max_sectors(req, blk_rq_pos(req))) in bio_attempt_discard_merge() 2792 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in blk_account_io_start() 3491 dst->__sector = blk_rq_pos(src); in __blk_rq_prep_clone() 3619 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb))); in plug_rq_cmp()
|
D | deadline-iosched.c | 141 BUG_ON(sector != blk_rq_pos(__rq)); in deadline_merge()
|
/Linux-v4.19/include/linux/ |
D | t10-pi.h | 43 return blk_rq_pos(rq) >> in t10_pi_ref_tag()
|
D | blktrace_api.h | 119 return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); in blk_rq_trace_sector()
|
D | elevator.h | 257 #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
D | blkdev.h | 1041 static inline sector_t blk_rq_pos(const struct request *rq) in blk_rq_pos() function 1071 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); in blk_rq_zone_no() 1076 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); in blk_rq_zone_is_seq()
|
/Linux-v4.19/drivers/block/ |
D | z2ram.c | 75 unsigned long start = blk_rq_pos(req) << 9; in do_z2_request() 82 (unsigned long long)blk_rq_pos(req), in do_z2_request()
|
D | swim3.c | 344 (long)blk_rq_pos(req), blk_rq_sectors(req), in start_request() 350 if (blk_rq_pos(req) >= fs->total_secs) { in start_request() 352 (long)blk_rq_pos(req), (long)fs->total_secs); in start_request() 378 fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; in start_request() 379 x = ((long)blk_rq_pos(req)) % fs->secpercyl; in start_request() 667 (long)blk_rq_pos(fs->cur_req)); in xfer_timeout() 787 (long)blk_rq_pos(req), err); in swim3_interrupt()
|
D | swim.c | 559 if (blk_rq_pos(req) >= fs->total_secs) in do_fd_request() 571 err = floppy_read_sectors(fs, blk_rq_pos(req), in do_fd_request()
|
D | ataflop.c | 1430 fd_request ? blk_rq_pos(fd_request) : 0 )); in redo_fd_request() 1478 if (blk_rq_pos(fd_request) + 1 > UDT->blocks) { in redo_fd_request() 1488 ReqBlock = blk_rq_pos(fd_request); in redo_fd_request()
|
/Linux-v4.19/drivers/s390/block/ |
D | dasd_fba.c | 346 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_discard() 348 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_discard() 463 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_fba_build_cp_regular() 465 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_fba_build_cp_regular() 501 block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); in dasd_fba_build_cp_regular()
|
D | dasd_diag.c | 522 first_rec = blk_rq_pos(req) >> block->s2b_shift; in dasd_diag_build_cp() 524 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; in dasd_diag_build_cp()
|
/Linux-v4.19/drivers/md/ |
D | dm-rq.c | 170 blk_rq_pos(orig), tio->n_sectors, true, in rq_end_stats() 501 blk_rq_pos(rq)); in map_request() 552 blk_rq_pos(orig), tio->n_sectors, false, 0, in dm_start_request() 674 pos = blk_rq_pos(rq); in dm_old_request_fn()
|
/Linux-v4.19/drivers/ide/ |
D | ide-lib.c | 74 (unsigned long long)blk_rq_pos(rq)); in ide_dump_ata_error()
|
/Linux-v4.19/drivers/scsi/ |
D | sr.c | 358 blk_rq_pos(SCpnt->request)) << 9; in sr_done() 474 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || in sr_init_command() 490 block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); in sr_init_command()
|
D | sd_zbc.c | 122 sector_t lba, sector = blk_rq_pos(rq); in sd_zbc_setup_report_cmnd() 236 sector_t sector = blk_rq_pos(rq); in sd_zbc_setup_reset_cmnd()
|
D | sd.c | 756 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_unmap_cmnd() 790 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same16_cmnd() 820 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_same10_cmnd() 851 u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); in sd_setup_write_zeroes_cmnd() 947 sector_t sector = blk_rq_pos(rq); in sd_setup_write_same_cmnd() 1016 sector_t block = blk_rq_pos(rq); in sd_setup_read_write_cmnd() 1904 start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); in sd_completed_bytes()
|
/Linux-v4.19/include/scsi/ |
D | scsi_cmnd.h | 307 return blk_rq_pos(scmd->request); in scsi_get_lba()
|
/Linux-v4.19/drivers/mtd/ |
D | mtd_blkdevs.c | 83 block = blk_rq_pos(req) << 9 >> tr->blkshift; in do_blktrans_request() 92 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > in do_blktrans_request()
|
/Linux-v4.19/include/trace/events/ |
D | block.h | 133 __entry->sector = blk_rq_pos(rq); 622 __entry->sector = blk_rq_pos(rq);
|
/Linux-v4.19/drivers/mmc/core/ |
D | block.c | 1132 from = blk_rq_pos(req); in mmc_blk_issue_discard_rq() 1176 from = blk_rq_pos(req); in mmc_blk_issue_secdiscard_rq() 1255 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) in mmc_apply_rel_rw() 1355 brq->data.blk_addr = blk_rq_pos(req); in mmc_blk_data_prep() 1379 (blk_rq_pos(req) + blk_rq_sectors(req) == in mmc_blk_data_prep() 1598 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_rw_rq_prep()
|