Lines Matching full:req
74 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req) in nvmet_execute_identify_cns_cs_ctrl() argument
76 u8 zasl = req->sq->ctrl->subsys->zasl; in nvmet_execute_identify_cns_cs_ctrl()
77 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_cns_cs_ctrl()
92 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_cns_cs_ctrl()
96 nvmet_req_complete(req, status); in nvmet_execute_identify_cns_cs_ctrl()
99 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) in nvmet_execute_identify_cns_cs_ns() argument
105 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_cns_cs_ns()
106 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_cns_cs_ns()
117 status = nvmet_req_find_ns(req); in nvmet_execute_identify_cns_cs_ns()
121 if (!bdev_is_zoned(req->ns->bdev)) { in nvmet_execute_identify_cns_cs_ns()
122 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_cns_cs_ns()
126 nvmet_ns_revalidate(req->ns); in nvmet_execute_identify_cns_cs_ns()
127 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> in nvmet_execute_identify_cns_cs_ns()
128 req->ns->blksize_shift; in nvmet_execute_identify_cns_cs_ns()
130 id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); in nvmet_execute_identify_cns_cs_ns()
131 id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); in nvmet_execute_identify_cns_cs_ns()
134 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); in nvmet_execute_identify_cns_cs_ns()
137 nvmet_req_complete(req, status); in nvmet_execute_identify_cns_cs_ns()
140 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) in nvmet_bdev_validate_zone_mgmt_recv() argument
142 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv()
143 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_validate_zone_mgmt_recv()
145 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv()
146 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); in nvmet_bdev_validate_zone_mgmt_recv()
151 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); in nvmet_bdev_validate_zone_mgmt_recv()
155 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { in nvmet_bdev_validate_zone_mgmt_recv()
156 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); in nvmet_bdev_validate_zone_mgmt_recv()
160 switch (req->cmd->zmr.pr) { in nvmet_bdev_validate_zone_mgmt_recv()
165 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); in nvmet_bdev_validate_zone_mgmt_recv()
169 switch (req->cmd->zmr.zrasf) { in nvmet_bdev_validate_zone_mgmt_recv()
180 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv()
189 struct nvmet_req *req; member
217 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); in nvmet_bdev_report_zone_cb()
218 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); in nvmet_bdev_report_zone_cb()
219 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); in nvmet_bdev_report_zone_cb()
224 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, in nvmet_bdev_report_zone_cb()
237 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) in nvmet_req_nr_zones_from_slba() argument
239 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba()
241 return blkdev_nr_zones(req->ns->bdev->bd_disk) - in nvmet_req_nr_zones_from_slba()
242 (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); in nvmet_req_nr_zones_from_slba()
245 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) in get_nr_zones_from_buf() argument
256 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zone_zmgmt_recv_work() local
257 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_zone_zmgmt_recv_work()
258 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); in nvmet_bdev_zone_zmgmt_recv_work()
259 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_zone_zmgmt_recv_work()
264 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), in nvmet_bdev_zone_zmgmt_recv_work()
267 .zrasf = req->cmd->zmr.zrasf, in nvmet_bdev_zone_zmgmt_recv_work()
269 .req = req, in nvmet_bdev_zone_zmgmt_recv_work()
272 status = nvmet_bdev_validate_zone_mgmt_recv(req); in nvmet_bdev_zone_zmgmt_recv_work()
281 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, in nvmet_bdev_zone_zmgmt_recv_work()
292 if (req->cmd->zmr.pr) in nvmet_bdev_zone_zmgmt_recv_work()
296 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); in nvmet_bdev_zone_zmgmt_recv_work()
299 nvmet_req_complete(req, status); in nvmet_bdev_zone_zmgmt_recv_work()
302 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) in nvmet_bdev_execute_zone_mgmt_recv() argument
304 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); in nvmet_bdev_execute_zone_mgmt_recv()
305 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_recv()
339 struct nvmet_req *req; member
346 switch (zsa_req_op(data->req->cmd->zms.zsa)) { in zmgmt_send_scan_cb()
383 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) in nvmet_bdev_zone_mgmt_emulate_all() argument
385 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zone_mgmt_emulate_all()
392 .req = req, in nvmet_bdev_zone_mgmt_emulate_all()
416 bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; in nvmet_bdev_zone_mgmt_emulate_all()
436 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) in nvmet_bdev_execute_zmgmt_send_all() argument
440 switch (zsa_req_op(req->cmd->zms.zsa)) { in nvmet_bdev_execute_zmgmt_send_all()
442 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, in nvmet_bdev_execute_zmgmt_send_all()
443 get_capacity(req->ns->bdev->bd_disk), in nvmet_bdev_execute_zmgmt_send_all()
451 return nvmet_bdev_zone_mgmt_emulate_all(req); in nvmet_bdev_execute_zmgmt_send_all()
454 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_execute_zmgmt_send_all()
463 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zmgmt_send_work() local
464 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work()
465 enum req_opf op = zsa_req_op(req->cmd->zms.zsa); in nvmet_bdev_zmgmt_send_work()
466 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zmgmt_send_work()
472 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_zmgmt_send_work()
478 if (req->cmd->zms.select_all) { in nvmet_bdev_zmgmt_send_work()
479 status = nvmet_bdev_execute_zmgmt_send_all(req); in nvmet_bdev_zmgmt_send_work()
484 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
490 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
500 nvmet_req_complete(req, status); in nvmet_bdev_zmgmt_send_work()
503 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) in nvmet_bdev_execute_zone_mgmt_send() argument
505 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); in nvmet_bdev_execute_zone_mgmt_send()
506 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_send()
511 struct nvmet_req *req = bio->bi_private; in nvmet_bdev_zone_append_bio_done() local
514 req->cqe->result.u64 = in nvmet_bdev_zone_append_bio_done()
515 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); in nvmet_bdev_zone_append_bio_done()
518 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bdev_zone_append_bio_done()
519 nvmet_req_bio_put(req, bio); in nvmet_bdev_zone_append_bio_done()
522 void nvmet_bdev_execute_zone_append(struct nvmet_req *req) in nvmet_bdev_execute_zone_append() argument
524 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append()
532 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_bdev_execute_zone_append()
535 if (!req->sg_cnt) { in nvmet_bdev_execute_zone_append()
536 nvmet_req_complete(req, 0); in nvmet_bdev_execute_zone_append()
540 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append()
541 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
546 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append()
547 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
552 if (nvmet_use_inline_bvec(req)) { in nvmet_bdev_execute_zone_append()
553 bio = &req->z.inline_bio; in nvmet_bdev_execute_zone_append()
554 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_zone_append()
556 bio = bio_alloc(GFP_KERNEL, req->sg_cnt); in nvmet_bdev_execute_zone_append()
561 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_zone_append()
563 bio->bi_private = req; in nvmet_bdev_execute_zone_append()
564 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_bdev_execute_zone_append()
567 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { in nvmet_bdev_execute_zone_append()
581 if (total_len != nvmet_rw_data_len(req)) { in nvmet_bdev_execute_zone_append()
590 nvmet_req_bio_put(req, bio); in nvmet_bdev_execute_zone_append()
592 nvmet_req_complete(req, status); in nvmet_bdev_execute_zone_append()
595 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) in nvmet_bdev_zns_parse_io_cmd() argument
597 struct nvme_command *cmd = req->cmd; in nvmet_bdev_zns_parse_io_cmd()
601 req->execute = nvmet_bdev_execute_zone_append; in nvmet_bdev_zns_parse_io_cmd()
604 req->execute = nvmet_bdev_execute_zone_mgmt_recv; in nvmet_bdev_zns_parse_io_cmd()
607 req->execute = nvmet_bdev_execute_zone_mgmt_send; in nvmet_bdev_zns_parse_io_cmd()
610 return nvmet_bdev_parse_io_cmd(req); in nvmet_bdev_zns_parse_io_cmd()