Lines Matching refs:req
73 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req) in nvmet_execute_identify_cns_cs_ctrl() argument
75 u8 zasl = req->sq->ctrl->subsys->zasl; in nvmet_execute_identify_cns_cs_ctrl()
76 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_cns_cs_ctrl()
91 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_cns_cs_ctrl()
95 nvmet_req_complete(req, status); in nvmet_execute_identify_cns_cs_ctrl()
98 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req) in nvmet_execute_identify_cns_cs_ns() argument
105 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_cns_cs_ns()
106 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_cns_cs_ns()
117 status = nvmet_req_find_ns(req); in nvmet_execute_identify_cns_cs_ns()
121 if (!bdev_is_zoned(req->ns->bdev)) { in nvmet_execute_identify_cns_cs_ns()
122 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_cns_cs_ns()
126 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_cns_cs_ns()
127 mutex_lock(&req->ns->subsys->lock); in nvmet_execute_identify_cns_cs_ns()
128 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_cns_cs_ns()
129 mutex_unlock(&req->ns->subsys->lock); in nvmet_execute_identify_cns_cs_ns()
131 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> in nvmet_execute_identify_cns_cs_ns()
132 req->ns->blksize_shift; in nvmet_execute_identify_cns_cs_ns()
135 mor = bdev_max_open_zones(req->ns->bdev); in nvmet_execute_identify_cns_cs_ns()
142 mar = bdev_max_active_zones(req->ns->bdev); in nvmet_execute_identify_cns_cs_ns()
150 status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns)); in nvmet_execute_identify_cns_cs_ns()
153 nvmet_req_complete(req, status); in nvmet_execute_identify_cns_cs_ns()
156 static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) in nvmet_bdev_validate_zone_mgmt_recv() argument
158 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv()
159 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_validate_zone_mgmt_recv()
161 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv()
162 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); in nvmet_bdev_validate_zone_mgmt_recv()
167 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); in nvmet_bdev_validate_zone_mgmt_recv()
171 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { in nvmet_bdev_validate_zone_mgmt_recv()
172 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); in nvmet_bdev_validate_zone_mgmt_recv()
176 switch (req->cmd->zmr.pr) { in nvmet_bdev_validate_zone_mgmt_recv()
181 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); in nvmet_bdev_validate_zone_mgmt_recv()
185 switch (req->cmd->zmr.zrasf) { in nvmet_bdev_validate_zone_mgmt_recv()
196 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv()
205 struct nvmet_req *req; member
233 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); in nvmet_bdev_report_zone_cb()
234 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); in nvmet_bdev_report_zone_cb()
235 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); in nvmet_bdev_report_zone_cb()
240 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, in nvmet_bdev_report_zone_cb()
253 static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req) in nvmet_req_nr_zones_from_slba() argument
255 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba()
257 return bdev_nr_zones(req->ns->bdev) - in nvmet_req_nr_zones_from_slba()
258 (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); in nvmet_req_nr_zones_from_slba()
261 static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize) in get_nr_zones_from_buf() argument
272 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zone_zmgmt_recv_work() local
273 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_zone_zmgmt_recv_work()
274 unsigned long req_slba_nr_zones = nvmet_req_nr_zones_from_slba(req); in nvmet_bdev_zone_zmgmt_recv_work()
275 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_zone_zmgmt_recv_work()
280 .out_nr_zones = get_nr_zones_from_buf(req, out_bufsize), in nvmet_bdev_zone_zmgmt_recv_work()
283 .zrasf = req->cmd->zmr.zrasf, in nvmet_bdev_zone_zmgmt_recv_work()
285 .req = req, in nvmet_bdev_zone_zmgmt_recv_work()
288 status = nvmet_bdev_validate_zone_mgmt_recv(req); in nvmet_bdev_zone_zmgmt_recv_work()
297 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, in nvmet_bdev_zone_zmgmt_recv_work()
308 if (req->cmd->zmr.pr) in nvmet_bdev_zone_zmgmt_recv_work()
312 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); in nvmet_bdev_zone_zmgmt_recv_work()
315 nvmet_req_complete(req, status); in nvmet_bdev_zone_zmgmt_recv_work()
318 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req) in nvmet_bdev_execute_zone_mgmt_recv() argument
320 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); in nvmet_bdev_execute_zone_mgmt_recv()
321 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_recv()
355 struct nvmet_req *req; member
362 switch (zsa_req_op(data->req->cmd->zms.zsa)) { in zmgmt_send_scan_cb()
399 static u16 nvmet_bdev_zone_mgmt_emulate_all(struct nvmet_req *req) in nvmet_bdev_zone_mgmt_emulate_all() argument
401 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zone_mgmt_emulate_all()
407 .req = req, in nvmet_bdev_zone_mgmt_emulate_all()
431 zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC, in nvmet_bdev_zone_mgmt_emulate_all()
451 static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) in nvmet_bdev_execute_zmgmt_send_all() argument
455 switch (zsa_req_op(req->cmd->zms.zsa)) { in nvmet_bdev_execute_zmgmt_send_all()
457 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, in nvmet_bdev_execute_zmgmt_send_all()
458 get_capacity(req->ns->bdev->bd_disk), in nvmet_bdev_execute_zmgmt_send_all()
466 return nvmet_bdev_zone_mgmt_emulate_all(req); in nvmet_bdev_execute_zmgmt_send_all()
469 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_execute_zmgmt_send_all()
478 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zmgmt_send_work() local
479 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work()
480 enum req_op op = zsa_req_op(req->cmd->zms.zsa); in nvmet_bdev_zmgmt_send_work()
481 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zmgmt_send_work()
487 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_zmgmt_send_work()
493 if (req->cmd->zms.select_all) { in nvmet_bdev_zmgmt_send_work()
494 status = nvmet_bdev_execute_zmgmt_send_all(req); in nvmet_bdev_zmgmt_send_work()
499 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
505 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
515 nvmet_req_complete(req, status); in nvmet_bdev_zmgmt_send_work()
518 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req) in nvmet_bdev_execute_zone_mgmt_send() argument
520 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); in nvmet_bdev_execute_zone_mgmt_send()
521 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_send()
526 struct nvmet_req *req = bio->bi_private; in nvmet_bdev_zone_append_bio_done() local
529 req->cqe->result.u64 = in nvmet_bdev_zone_append_bio_done()
530 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); in nvmet_bdev_zone_append_bio_done()
533 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bdev_zone_append_bio_done()
534 nvmet_req_bio_put(req, bio); in nvmet_bdev_zone_append_bio_done()
537 void nvmet_bdev_execute_zone_append(struct nvmet_req *req) in nvmet_bdev_execute_zone_append() argument
539 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append()
548 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_bdev_execute_zone_append()
551 if (!req->sg_cnt) { in nvmet_bdev_execute_zone_append()
552 nvmet_req_complete(req, 0); in nvmet_bdev_execute_zone_append()
556 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append()
557 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
562 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append()
563 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
568 if (nvmet_use_inline_bvec(req)) { in nvmet_bdev_execute_zone_append()
569 bio = &req->z.inline_bio; in nvmet_bdev_execute_zone_append()
570 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_zone_append()
571 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_zone_append()
573 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append()
578 bio->bi_private = req; in nvmet_bdev_execute_zone_append()
579 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_bdev_execute_zone_append()
582 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { in nvmet_bdev_execute_zone_append()
596 if (total_len != nvmet_rw_data_len(req)) { in nvmet_bdev_execute_zone_append()
605 nvmet_req_bio_put(req, bio); in nvmet_bdev_execute_zone_append()
607 nvmet_req_complete(req, status); in nvmet_bdev_execute_zone_append()
610 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req) in nvmet_bdev_zns_parse_io_cmd() argument
612 struct nvme_command *cmd = req->cmd; in nvmet_bdev_zns_parse_io_cmd()
616 req->execute = nvmet_bdev_execute_zone_append; in nvmet_bdev_zns_parse_io_cmd()
619 req->execute = nvmet_bdev_execute_zone_mgmt_recv; in nvmet_bdev_zns_parse_io_cmd()
622 req->execute = nvmet_bdev_execute_zone_mgmt_send; in nvmet_bdev_zns_parse_io_cmd()
625 return nvmet_bdev_parse_io_cmd(req); in nvmet_bdev_zns_parse_io_cmd()