Lines Matching +full:a +full:- +full:za +full:- +full:z
1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe ZNS-ZBD command implementation.
21 * Zone Append Size Limit (zasl) is expressed as a power of 2 value in nvmet_zasl()
24 return ilog2(zone_append_sects >> (NVMET_MPSMIN_SHIFT - 9)); in nvmet_zasl()
27 static int validate_conv_zones_cb(struct blk_zone *z, in validate_conv_zones_cb() argument
30 if (z->type == BLK_ZONE_TYPE_CONVENTIONAL) in validate_conv_zones_cb()
31 return -EOPNOTSUPP; in validate_conv_zones_cb()
37 struct request_queue *q = ns->bdev->bd_disk->queue; in nvmet_bdev_zns_enable()
39 struct gendisk *bd_disk = ns->bdev->bd_disk; in nvmet_bdev_zns_enable()
42 if (ns->subsys->zasl) { in nvmet_bdev_zns_enable()
43 if (ns->subsys->zasl > zasl) in nvmet_bdev_zns_enable()
46 ns->subsys->zasl = zasl; in nvmet_bdev_zns_enable()
49 * Generic zoned block devices may have a smaller last zone which is in nvmet_bdev_zns_enable()
53 if (get_capacity(bd_disk) & (bdev_zone_sectors(ns->bdev) - 1)) in nvmet_bdev_zns_enable()
56 * ZNS does not define a conventional zone type. If the underlying in nvmet_bdev_zns_enable()
57 * device has a bitmap set indicating the existence of conventional in nvmet_bdev_zns_enable()
61 if (ns->bdev->bd_disk->queue->conv_zones_bitmap) in nvmet_bdev_zns_enable()
64 ret = blkdev_report_zones(ns->bdev, 0, blkdev_nr_zones(bd_disk), in nvmet_bdev_zns_enable()
69 ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); in nvmet_bdev_zns_enable()
76 u8 zasl = req->sq->ctrl->subsys->zasl; in nvmet_execute_identify_cns_cs_ctrl()
77 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_cns_cs_ctrl()
87 if (ctrl->ops->get_mdts) in nvmet_execute_identify_cns_cs_ctrl()
88 id->zasl = min_t(u8, ctrl->ops->get_mdts(ctrl), zasl); in nvmet_execute_identify_cns_cs_ctrl()
90 id->zasl = zasl; in nvmet_execute_identify_cns_cs_ctrl()
105 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_cns_cs_ns()
106 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_cns_cs_ns()
121 if (!bdev_is_zoned(req->ns->bdev)) { in nvmet_execute_identify_cns_cs_ns()
122 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_cns_cs_ns()
126 nvmet_ns_revalidate(req->ns); in nvmet_execute_identify_cns_cs_ns()
127 zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >> in nvmet_execute_identify_cns_cs_ns()
128 req->ns->blksize_shift; in nvmet_execute_identify_cns_cs_ns()
129 id_zns->lbafe[0].zsze = cpu_to_le64(zsze); in nvmet_execute_identify_cns_cs_ns()
130 id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev)); in nvmet_execute_identify_cns_cs_ns()
131 id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev)); in nvmet_execute_identify_cns_cs_ns()
142 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv()
143 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_validate_zone_mgmt_recv()
145 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_validate_zone_mgmt_recv()
146 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); in nvmet_bdev_validate_zone_mgmt_recv()
151 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); in nvmet_bdev_validate_zone_mgmt_recv()
155 if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { in nvmet_bdev_validate_zone_mgmt_recv()
156 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); in nvmet_bdev_validate_zone_mgmt_recv()
160 switch (req->cmd->zmr.pr) { in nvmet_bdev_validate_zone_mgmt_recv()
165 req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); in nvmet_bdev_validate_zone_mgmt_recv()
169 switch (req->cmd->zmr.zrasf) { in nvmet_bdev_validate_zone_mgmt_recv()
180 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv()
196 static int nvmet_bdev_report_zone_cb(struct blk_zone *z, unsigned i, void *d) in nvmet_bdev_report_zone_cb() argument
209 if (rz->zrasf != NVME_ZRASF_ZONE_REPORT_ALL && in nvmet_bdev_report_zone_cb()
210 z->cond != nvme_zrasf_to_blk_zcond[rz->zrasf]) in nvmet_bdev_report_zone_cb()
213 if (rz->nr_zones < rz->out_nr_zones) { in nvmet_bdev_report_zone_cb()
217 zdesc.zcap = nvmet_sect_to_lba(rz->req->ns, z->capacity); in nvmet_bdev_report_zone_cb()
218 zdesc.zslba = nvmet_sect_to_lba(rz->req->ns, z->start); in nvmet_bdev_report_zone_cb()
219 zdesc.wp = nvmet_sect_to_lba(rz->req->ns, z->wp); in nvmet_bdev_report_zone_cb()
220 zdesc.za = z->reset ? 1 << 2 : 0; in nvmet_bdev_report_zone_cb()
221 zdesc.zs = z->cond << 4; in nvmet_bdev_report_zone_cb()
222 zdesc.zt = z->type; in nvmet_bdev_report_zone_cb()
224 status = nvmet_copy_to_sgl(rz->req, rz->out_buf_offset, &zdesc, in nvmet_bdev_report_zone_cb()
227 return -EINVAL; in nvmet_bdev_report_zone_cb()
229 rz->out_buf_offset += sizeof(zdesc); in nvmet_bdev_report_zone_cb()
232 rz->nr_zones++; in nvmet_bdev_report_zone_cb()
239 unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_req_nr_zones_from_slba()
241 return blkdev_nr_zones(req->ns->bdev->bd_disk) - in nvmet_req_nr_zones_from_slba()
242 (sect >> ilog2(bdev_zone_sectors(req->ns->bdev))); in nvmet_req_nr_zones_from_slba()
250 return (bufsize - sizeof(struct nvme_zone_report)) / in get_nr_zones_from_buf()
256 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zone_zmgmt_recv_work()
257 sector_t start_sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_zone_zmgmt_recv_work()
259 u32 out_bufsize = (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; in nvmet_bdev_zone_zmgmt_recv_work()
267 .zrasf = req->cmd->zmr.zrasf, in nvmet_bdev_zone_zmgmt_recv_work()
281 ret = blkdev_report_zones(req->ns->bdev, start_sect, req_slba_nr_zones, in nvmet_bdev_zone_zmgmt_recv_work()
292 if (req->cmd->zmr.pr) in nvmet_bdev_zone_zmgmt_recv_work()
304 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zone_zmgmt_recv_work); in nvmet_bdev_execute_zone_mgmt_recv()
305 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_recv()
329 case -EINVAL: in blkdev_zone_mgmt_errno_to_nvme_status()
330 case -EIO: in blkdev_zone_mgmt_errno_to_nvme_status()
342 static int zmgmt_send_scan_cb(struct blk_zone *z, unsigned i, void *d) in zmgmt_send_scan_cb() argument
346 switch (zsa_req_op(data->req->cmd->zms.zsa)) { in zmgmt_send_scan_cb()
348 switch (z->cond) { in zmgmt_send_scan_cb()
356 switch (z->cond) { in zmgmt_send_scan_cb()
365 switch (z->cond) { in zmgmt_send_scan_cb()
375 return -EINVAL; in zmgmt_send_scan_cb()
378 set_bit(i, data->zbitmap); in zmgmt_send_scan_cb()
385 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zone_mgmt_emulate_all()
386 unsigned int nr_zones = blkdev_nr_zones(bdev->bd_disk); in nvmet_bdev_zone_mgmt_emulate_all()
396 GFP_NOIO, q->node); in nvmet_bdev_zone_mgmt_emulate_all()
398 ret = -ENOMEM; in nvmet_bdev_zone_mgmt_emulate_all()
406 ret = -EIO; in nvmet_bdev_zone_mgmt_emulate_all()
413 while (sector < get_capacity(bdev->bd_disk)) { in nvmet_bdev_zone_mgmt_emulate_all()
416 bio->bi_opf = zsa_req_op(req->cmd->zms.zsa) | REQ_SYNC; in nvmet_bdev_zone_mgmt_emulate_all()
417 bio->bi_iter.bi_sector = sector; in nvmet_bdev_zone_mgmt_emulate_all()
419 /* This may take a while, so be nice to others */ in nvmet_bdev_zone_mgmt_emulate_all()
440 switch (zsa_req_op(req->cmd->zms.zsa)) { in nvmet_bdev_execute_zmgmt_send_all()
442 ret = blkdev_zone_mgmt(req->ns->bdev, REQ_OP_ZONE_RESET, 0, in nvmet_bdev_execute_zmgmt_send_all()
443 get_capacity(req->ns->bdev->bd_disk), in nvmet_bdev_execute_zmgmt_send_all()
454 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_execute_zmgmt_send_all()
463 struct nvmet_req *req = container_of(w, struct nvmet_req, z.zmgmt_work); in nvmet_bdev_zmgmt_send_work()
464 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work()
465 enum req_opf op = zsa_req_op(req->cmd->zms.zsa); in nvmet_bdev_zmgmt_send_work()
466 struct block_device *bdev = req->ns->bdev; in nvmet_bdev_zmgmt_send_work()
472 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); in nvmet_bdev_zmgmt_send_work()
478 if (req->cmd->zms.select_all) { in nvmet_bdev_zmgmt_send_work()
483 if (sect >= get_capacity(bdev->bd_disk)) { in nvmet_bdev_zmgmt_send_work()
484 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
489 if (sect & (zone_sectors - 1)) { in nvmet_bdev_zmgmt_send_work()
490 req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); in nvmet_bdev_zmgmt_send_work()
505 INIT_WORK(&req->z.zmgmt_work, nvmet_bdev_zmgmt_send_work); in nvmet_bdev_execute_zone_mgmt_send()
506 queue_work(zbd_wq, &req->z.zmgmt_work); in nvmet_bdev_execute_zone_mgmt_send()
511 struct nvmet_req *req = bio->bi_private; in nvmet_bdev_zone_append_bio_done()
513 if (bio->bi_status == BLK_STS_OK) { in nvmet_bdev_zone_append_bio_done()
514 req->cqe->result.u64 = in nvmet_bdev_zone_append_bio_done()
515 nvmet_sect_to_lba(req->ns, bio->bi_iter.bi_sector); in nvmet_bdev_zone_append_bio_done()
518 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bdev_zone_append_bio_done()
524 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append()
535 if (!req->sg_cnt) { in nvmet_bdev_execute_zone_append()
540 if (sect >= get_capacity(req->ns->bdev->bd_disk)) { in nvmet_bdev_execute_zone_append()
541 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
546 if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { in nvmet_bdev_execute_zone_append()
547 req->error_loc = offsetof(struct nvme_rw_command, slba); in nvmet_bdev_execute_zone_append()
553 bio = &req->z.inline_bio; in nvmet_bdev_execute_zone_append()
554 bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); in nvmet_bdev_execute_zone_append()
556 bio = bio_alloc(GFP_KERNEL, req->sg_cnt); in nvmet_bdev_execute_zone_append()
559 bio->bi_opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_zone_append()
560 bio->bi_end_io = nvmet_bdev_zone_append_bio_done; in nvmet_bdev_execute_zone_append()
561 bio_set_dev(bio, req->ns->bdev); in nvmet_bdev_execute_zone_append()
562 bio->bi_iter.bi_sector = sect; in nvmet_bdev_execute_zone_append()
563 bio->bi_private = req; in nvmet_bdev_execute_zone_append()
564 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_bdev_execute_zone_append()
565 bio->bi_opf |= REQ_FUA; in nvmet_bdev_execute_zone_append()
567 for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { in nvmet_bdev_execute_zone_append()
569 unsigned int l = sg->length; in nvmet_bdev_execute_zone_append()
570 unsigned int o = sg->offset; in nvmet_bdev_execute_zone_append()
574 if (ret != sg->length) { in nvmet_bdev_execute_zone_append()
578 total_len += sg->length; in nvmet_bdev_execute_zone_append()
597 struct nvme_command *cmd = req->cmd; in nvmet_bdev_zns_parse_io_cmd()
599 switch (cmd->common.opcode) { in nvmet_bdev_zns_parse_io_cmd()
601 req->execute = nvmet_bdev_execute_zone_append; in nvmet_bdev_zns_parse_io_cmd()
604 req->execute = nvmet_bdev_execute_zone_mgmt_recv; in nvmet_bdev_zns_parse_io_cmd()
607 req->execute = nvmet_bdev_execute_zone_mgmt_send; in nvmet_bdev_zns_parse_io_cmd()