Lines Matching refs:io_req
480 struct io_thread_req *io_req = (*irq_req_buffer)[count]; in ubd_handler() local
482 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler()
483 blk_queue_max_discard_sectors(io_req->req->q, 0); in ubd_handler()
484 blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); in ubd_handler()
485 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q); in ubd_handler()
487 blk_mq_end_request(io_req->req, io_req->error); in ubd_handler()
488 kfree(io_req); in ubd_handler()
1255 static void ubd_map_req(struct ubd *dev, struct io_thread_req *io_req, in ubd_map_req() argument
1261 unsigned long byte_offset = io_req->offset; in ubd_map_req()
1265 io_req->io_desc[0].buffer = NULL; in ubd_map_req()
1266 io_req->io_desc[0].length = blk_rq_bytes(req); in ubd_map_req()
1269 BUG_ON(i >= io_req->desc_cnt); in ubd_map_req()
1271 io_req->io_desc[i].buffer = bvec_virt(&bvec); in ubd_map_req()
1272 io_req->io_desc[i].length = bvec.bv_len; in ubd_map_req()
1278 for (i = 0; i < io_req->desc_cnt; i++) { in ubd_map_req()
1279 cowify_req(io_req, &io_req->io_desc[i], byte_offset, in ubd_map_req()
1282 byte_offset += io_req->io_desc[i].length; in ubd_map_req()
1291 struct io_thread_req *io_req; in ubd_alloc_req() local
1294 io_req = kmalloc(sizeof(*io_req) + in ubd_alloc_req()
1297 if (!io_req) in ubd_alloc_req()
1300 io_req->req = req; in ubd_alloc_req()
1302 io_req->fds[0] = dev->cow.fd; in ubd_alloc_req()
1304 io_req->fds[0] = dev->fd; in ubd_alloc_req()
1305 io_req->error = 0; in ubd_alloc_req()
1306 io_req->sectorsize = SECTOR_SIZE; in ubd_alloc_req()
1307 io_req->fds[1] = dev->fd; in ubd_alloc_req()
1308 io_req->offset = (u64) blk_rq_pos(req) << SECTOR_SHIFT; in ubd_alloc_req()
1309 io_req->offsets[0] = 0; in ubd_alloc_req()
1310 io_req->offsets[1] = dev->cow.data_offset; in ubd_alloc_req()
1313 io_req->io_desc[i].sector_mask = 0; in ubd_alloc_req()
1314 io_req->io_desc[i].cow_offset = -1; in ubd_alloc_req()
1317 return io_req; in ubd_alloc_req()
1323 struct io_thread_req *io_req; in ubd_submit_request() local
1334 io_req = ubd_alloc_req(dev, req, segs); in ubd_submit_request()
1335 if (!io_req) in ubd_submit_request()
1338 io_req->desc_cnt = segs; in ubd_submit_request()
1340 ubd_map_req(dev, io_req, req); in ubd_submit_request()
1342 ret = os_write_file(thread_fd, &io_req, sizeof(io_req)); in ubd_submit_request()
1343 if (ret != sizeof(io_req)) { in ubd_submit_request()
1346 kfree(io_req); in ubd_submit_request()