Lines Matching +full:cmd +full:- +full:timeout +full:- +full:ms
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2011-2014, Intel Corporation.
4 * Copyright (c) 2017-2021 Christoph Hellwig.
13 * ignoring the upper bits in the compat case to match behaviour of 32-bit
27 int ret = -ENOMEM; in nvme_add_user_metadata()
29 struct bio *bio = req->bio; in nvme_add_user_metadata()
35 ret = -EFAULT; in nvme_add_user_metadata()
45 bip->bip_iter.bi_size = len; in nvme_add_user_metadata()
46 bip->bip_iter.bi_sector = seed; in nvme_add_user_metadata()
50 ret = -ENOMEM; in nvme_add_user_metadata()
54 req->cmd_flags |= REQ_INTEGRITY; in nvme_add_user_metadata()
67 ret = -EFAULT; in nvme_finish_user_metadata()
73 struct nvme_command *cmd, blk_opf_t rq_flags, in nvme_alloc_user_request() argument
78 req = blk_mq_alloc_request(q, nvme_req_op(cmd) | rq_flags, blk_flags); in nvme_alloc_user_request()
81 nvme_init_request(req, cmd); in nvme_alloc_user_request()
82 nvme_req(req)->flags |= NVME_REQ_USERCMD; in nvme_alloc_user_request()
91 struct request_queue *q = req->q; in nvme_map_user_request()
92 struct nvme_ns *ns = q->queuedata; in nvme_map_user_request()
93 struct block_device *bdev = ns ? ns->disk->part0 : NULL; in nvme_map_user_request()
98 if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { in nvme_map_user_request()
101 /* fixedbufs is only for non-vectored io */ in nvme_map_user_request()
103 return -EINVAL; in nvme_map_user_request()
117 bio = req->bio; in nvme_map_user_request()
142 struct nvme_command *cmd, u64 ubuffer, in nvme_submit_user_cmd() argument
144 u32 meta_seed, u64 *result, unsigned timeout, bool vec) in nvme_submit_user_cmd() argument
153 req = nvme_alloc_user_request(q, cmd, 0, 0); in nvme_submit_user_cmd()
157 req->timeout = timeout; in nvme_submit_user_cmd()
165 bio = req->bio; in nvme_submit_user_cmd()
166 ctrl = nvme_req(req)->ctrl; in nvme_submit_user_cmd()
171 *result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_submit_user_cmd()
180 nvme_passthru_end(ctrl, effects, cmd, ret); in nvme_submit_user_cmd()
193 return -EFAULT; in nvme_submit_io()
195 return -EINVAL; in nvme_submit_io()
203 return -EINVAL; in nvme_submit_io()
206 length = (io.nblocks + 1) << ns->lba_shift; in nvme_submit_io()
209 ns->ms == sizeof(struct t10_pi_tuple)) { in nvme_submit_io()
215 return -EINVAL; in nvme_submit_io()
219 meta_len = (io.nblocks + 1) * ns->ms; in nvme_submit_io()
223 if (ns->features & NVME_NS_EXT_LBAS) { in nvme_submit_io()
228 return -EINVAL; in nvme_submit_io()
234 c.rw.nsid = cpu_to_le32(ns->head->ns_id); in nvme_submit_io()
243 return nvme_submit_user_cmd(ns->queue, &c, in nvme_submit_io()
252 if (ns && nsid != ns->head->ns_id) { in nvme_validate_passthru_nsid()
253 dev_err(ctrl->device, in nvme_validate_passthru_nsid()
254 "%s: nsid (%u) in cmd does not match nsid (%u)" in nvme_validate_passthru_nsid()
256 current->comm, nsid, ns->head->ns_id); in nvme_validate_passthru_nsid()
266 struct nvme_passthru_cmd cmd; in nvme_user_cmd() local
268 unsigned timeout = 0; in nvme_user_cmd() local
273 return -EACCES; in nvme_user_cmd()
274 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd()
275 return -EFAULT; in nvme_user_cmd()
276 if (cmd.flags) in nvme_user_cmd()
277 return -EINVAL; in nvme_user_cmd()
278 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) in nvme_user_cmd()
279 return -EINVAL; in nvme_user_cmd()
282 c.common.opcode = cmd.opcode; in nvme_user_cmd()
283 c.common.flags = cmd.flags; in nvme_user_cmd()
284 c.common.nsid = cpu_to_le32(cmd.nsid); in nvme_user_cmd()
285 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); in nvme_user_cmd()
286 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); in nvme_user_cmd()
287 c.common.cdw10 = cpu_to_le32(cmd.cdw10); in nvme_user_cmd()
288 c.common.cdw11 = cpu_to_le32(cmd.cdw11); in nvme_user_cmd()
289 c.common.cdw12 = cpu_to_le32(cmd.cdw12); in nvme_user_cmd()
290 c.common.cdw13 = cpu_to_le32(cmd.cdw13); in nvme_user_cmd()
291 c.common.cdw14 = cpu_to_le32(cmd.cdw14); in nvme_user_cmd()
292 c.common.cdw15 = cpu_to_le32(cmd.cdw15); in nvme_user_cmd()
294 if (cmd.timeout_ms) in nvme_user_cmd()
295 timeout = msecs_to_jiffies(cmd.timeout_ms); in nvme_user_cmd()
297 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd()
298 cmd.addr, cmd.data_len, in nvme_user_cmd()
299 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, in nvme_user_cmd()
300 0, &result, timeout, false); in nvme_user_cmd()
303 if (put_user(result, &ucmd->result)) in nvme_user_cmd()
304 return -EFAULT; in nvme_user_cmd()
313 struct nvme_passthru_cmd64 cmd; in nvme_user_cmd64() local
315 unsigned timeout = 0; in nvme_user_cmd64() local
319 return -EACCES; in nvme_user_cmd64()
320 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) in nvme_user_cmd64()
321 return -EFAULT; in nvme_user_cmd64()
322 if (cmd.flags) in nvme_user_cmd64()
323 return -EINVAL; in nvme_user_cmd64()
324 if (!nvme_validate_passthru_nsid(ctrl, ns, cmd.nsid)) in nvme_user_cmd64()
325 return -EINVAL; in nvme_user_cmd64()
328 c.common.opcode = cmd.opcode; in nvme_user_cmd64()
329 c.common.flags = cmd.flags; in nvme_user_cmd64()
330 c.common.nsid = cpu_to_le32(cmd.nsid); in nvme_user_cmd64()
331 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); in nvme_user_cmd64()
332 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); in nvme_user_cmd64()
333 c.common.cdw10 = cpu_to_le32(cmd.cdw10); in nvme_user_cmd64()
334 c.common.cdw11 = cpu_to_le32(cmd.cdw11); in nvme_user_cmd64()
335 c.common.cdw12 = cpu_to_le32(cmd.cdw12); in nvme_user_cmd64()
336 c.common.cdw13 = cpu_to_le32(cmd.cdw13); in nvme_user_cmd64()
337 c.common.cdw14 = cpu_to_le32(cmd.cdw14); in nvme_user_cmd64()
338 c.common.cdw15 = cpu_to_le32(cmd.cdw15); in nvme_user_cmd64()
340 if (cmd.timeout_ms) in nvme_user_cmd64()
341 timeout = msecs_to_jiffies(cmd.timeout_ms); in nvme_user_cmd64()
343 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, in nvme_user_cmd64()
344 cmd.addr, cmd.data_len, in nvme_user_cmd64()
345 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, in nvme_user_cmd64()
346 0, &cmd.result, timeout, vec); in nvme_user_cmd64()
349 if (put_user(cmd.result, &ucmd->result)) in nvme_user_cmd64()
350 return -EFAULT; in nvme_user_cmd64()
377 void *meta; /* kernel-resident buffer */
387 return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; in nvme_uring_cmd_pdu()
393 struct request *req = pdu->req; in nvme_uring_task_meta_cb()
397 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) in nvme_uring_task_meta_cb()
398 status = -EINTR; in nvme_uring_task_meta_cb()
400 status = nvme_req(req)->status; in nvme_uring_task_meta_cb()
402 result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_uring_task_meta_cb()
404 if (pdu->meta_len) in nvme_uring_task_meta_cb()
405 status = nvme_finish_user_metadata(req, pdu->u.meta_buffer, in nvme_uring_task_meta_cb()
406 pdu->u.meta, pdu->meta_len, status); in nvme_uring_task_meta_cb()
407 if (req->bio) in nvme_uring_task_meta_cb()
408 blk_rq_unmap_user(req->bio); in nvme_uring_task_meta_cb()
418 if (pdu->bio) in nvme_uring_task_cb()
419 blk_rq_unmap_user(pdu->bio); in nvme_uring_task_cb()
421 io_uring_cmd_done(ioucmd, pdu->nvme_status, pdu->u.result); in nvme_uring_task_cb()
427 struct io_uring_cmd *ioucmd = req->end_io_data; in nvme_uring_cmd_end_io()
429 void *cookie = READ_ONCE(ioucmd->cookie); in nvme_uring_cmd_end_io()
431 req->bio = pdu->bio; in nvme_uring_cmd_end_io()
432 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) in nvme_uring_cmd_end_io()
433 pdu->nvme_status = -EINTR; in nvme_uring_cmd_end_io()
435 pdu->nvme_status = nvme_req(req)->status; in nvme_uring_cmd_end_io()
436 pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64); in nvme_uring_cmd_end_io()
453 struct io_uring_cmd *ioucmd = req->end_io_data; in nvme_uring_cmd_end_io_meta()
455 void *cookie = READ_ONCE(ioucmd->cookie); in nvme_uring_cmd_end_io_meta()
457 req->bio = pdu->bio; in nvme_uring_cmd_end_io_meta()
458 pdu->req = req; in nvme_uring_cmd_end_io_meta()
476 const struct nvme_uring_cmd *cmd = ioucmd->cmd; in nvme_uring_cmd_io() local
477 struct request_queue *q = ns ? ns->queue : ctrl->admin_q; in nvme_uring_cmd_io()
487 return -EACCES; in nvme_uring_cmd_io()
489 c.common.opcode = READ_ONCE(cmd->opcode); in nvme_uring_cmd_io()
490 c.common.flags = READ_ONCE(cmd->flags); in nvme_uring_cmd_io()
492 return -EINVAL; in nvme_uring_cmd_io()
495 c.common.nsid = cpu_to_le32(cmd->nsid); in nvme_uring_cmd_io()
497 return -EINVAL; in nvme_uring_cmd_io()
499 c.common.cdw2[0] = cpu_to_le32(READ_ONCE(cmd->cdw2)); in nvme_uring_cmd_io()
500 c.common.cdw2[1] = cpu_to_le32(READ_ONCE(cmd->cdw3)); in nvme_uring_cmd_io()
503 c.common.cdw10 = cpu_to_le32(READ_ONCE(cmd->cdw10)); in nvme_uring_cmd_io()
504 c.common.cdw11 = cpu_to_le32(READ_ONCE(cmd->cdw11)); in nvme_uring_cmd_io()
505 c.common.cdw12 = cpu_to_le32(READ_ONCE(cmd->cdw12)); in nvme_uring_cmd_io()
506 c.common.cdw13 = cpu_to_le32(READ_ONCE(cmd->cdw13)); in nvme_uring_cmd_io()
507 c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14)); in nvme_uring_cmd_io()
508 c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15)); in nvme_uring_cmd_io()
510 d.metadata = READ_ONCE(cmd->metadata); in nvme_uring_cmd_io()
511 d.addr = READ_ONCE(cmd->addr); in nvme_uring_cmd_io()
512 d.data_len = READ_ONCE(cmd->data_len); in nvme_uring_cmd_io()
513 d.metadata_len = READ_ONCE(cmd->metadata_len); in nvme_uring_cmd_io()
514 d.timeout_ms = READ_ONCE(cmd->timeout_ms); in nvme_uring_cmd_io()
527 req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; in nvme_uring_cmd_io()
538 if (unlikely(!req->bio)) { in nvme_uring_cmd_io()
544 WRITE_ONCE(ioucmd->cookie, req->bio); in nvme_uring_cmd_io()
545 req->bio->bi_opf |= REQ_POLLED; in nvme_uring_cmd_io()
548 /* to free bio on completion, as req->bio will be null at that time */ in nvme_uring_cmd_io()
549 pdu->bio = req->bio; in nvme_uring_cmd_io()
550 pdu->meta_len = d.metadata_len; in nvme_uring_cmd_io()
551 req->end_io_data = ioucmd; in nvme_uring_cmd_io()
552 if (pdu->meta_len) { in nvme_uring_cmd_io()
553 pdu->u.meta = meta; in nvme_uring_cmd_io()
554 pdu->u.meta_buffer = nvme_to_user_ptr(d.metadata); in nvme_uring_cmd_io()
555 req->end_io = nvme_uring_cmd_end_io_meta; in nvme_uring_cmd_io()
557 req->end_io = nvme_uring_cmd_end_io; in nvme_uring_cmd_io()
560 return -EIOCBQUEUED; in nvme_uring_cmd_io()
563 static bool is_ctrl_ioctl(unsigned int cmd) in is_ctrl_ioctl() argument
565 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) in is_ctrl_ioctl()
567 if (is_sed_ioctl(cmd)) in is_ctrl_ioctl()
572 static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd, in nvme_ctrl_ioctl() argument
575 switch (cmd) { in nvme_ctrl_ioctl()
581 return sed_ioctl(ctrl->opal_dev, cmd, argp); in nvme_ctrl_ioctl()
603 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd, in nvme_ns_ioctl() argument
606 switch (cmd) { in nvme_ns_ioctl()
609 return ns->head->ns_id; in nvme_ns_ioctl()
611 return nvme_user_cmd(ns->ctrl, ns, argp); in nvme_ns_ioctl()
613 * struct nvme_user_io can have different padding on some 32-bit ABIs. in nvme_ns_ioctl()
623 return nvme_user_cmd64(ns->ctrl, ns, argp, false); in nvme_ns_ioctl()
625 return nvme_user_cmd64(ns->ctrl, ns, argp, true); in nvme_ns_ioctl()
627 return -ENOTTY; in nvme_ns_ioctl()
631 static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg) in __nvme_ioctl() argument
633 if (is_ctrl_ioctl(cmd)) in __nvme_ioctl()
634 return nvme_ctrl_ioctl(ns->ctrl, cmd, arg); in __nvme_ioctl()
635 return nvme_ns_ioctl(ns, cmd, arg); in __nvme_ioctl()
639 unsigned int cmd, unsigned long arg) in nvme_ioctl() argument
641 struct nvme_ns *ns = bdev->bd_disk->private_data; in nvme_ioctl()
643 return __nvme_ioctl(ns, cmd, (void __user *)arg); in nvme_ioctl()
646 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) in nvme_ns_chr_ioctl() argument
649 container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev); in nvme_ns_chr_ioctl()
651 return __nvme_ioctl(ns, cmd, (void __user *)arg); in nvme_ns_chr_ioctl()
660 return -EOPNOTSUPP; in nvme_uring_cmd_checks()
667 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_ns_uring_cmd()
670 BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu)); in nvme_ns_uring_cmd()
676 switch (ioucmd->cmd_op) { in nvme_ns_uring_cmd()
684 ret = -ENOTTY; in nvme_ns_uring_cmd()
692 struct nvme_ns *ns = container_of(file_inode(ioucmd->file)->i_cdev, in nvme_ns_chr_uring_cmd()
708 bio = READ_ONCE(ioucmd->cookie); in nvme_ns_chr_uring_cmd_iopoll()
709 ns = container_of(file_inode(ioucmd->file)->i_cdev, in nvme_ns_chr_uring_cmd_iopoll()
711 q = ns->queue; in nvme_ns_chr_uring_cmd_iopoll()
712 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev) in nvme_ns_chr_uring_cmd_iopoll()
718 static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, in nvme_ns_head_ctrl_ioctl() argument
720 __releases(&head->srcu) in nvme_ns_head_ctrl_ioctl()
722 struct nvme_ctrl *ctrl = ns->ctrl; in nvme_ns_head_ctrl_ioctl()
725 nvme_get_ctrl(ns->ctrl); in nvme_ns_head_ctrl_ioctl()
726 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_ctrl_ioctl()
727 ret = nvme_ctrl_ioctl(ns->ctrl, cmd, argp); in nvme_ns_head_ctrl_ioctl()
734 unsigned int cmd, unsigned long arg) in nvme_ns_head_ioctl() argument
736 struct nvme_ns_head *head = bdev->bd_disk->private_data; in nvme_ns_head_ioctl()
739 int srcu_idx, ret = -EWOULDBLOCK; in nvme_ns_head_ioctl()
741 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_ioctl()
751 if (is_ctrl_ioctl(cmd)) in nvme_ns_head_ioctl()
752 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); in nvme_ns_head_ioctl()
754 ret = nvme_ns_ioctl(ns, cmd, argp); in nvme_ns_head_ioctl()
756 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_ioctl()
760 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, in nvme_ns_head_chr_ioctl() argument
763 struct cdev *cdev = file_inode(file)->i_cdev; in nvme_ns_head_chr_ioctl()
768 int srcu_idx, ret = -EWOULDBLOCK; in nvme_ns_head_chr_ioctl()
770 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_ioctl()
775 if (is_ctrl_ioctl(cmd)) in nvme_ns_head_chr_ioctl()
776 return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); in nvme_ns_head_chr_ioctl()
778 ret = nvme_ns_ioctl(ns, cmd, argp); in nvme_ns_head_chr_ioctl()
780 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_ioctl()
787 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; in nvme_ns_head_chr_uring_cmd()
789 int srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_uring_cmd()
791 int ret = -EINVAL; in nvme_ns_head_chr_uring_cmd()
795 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_uring_cmd()
803 struct cdev *cdev = file_inode(ioucmd->file)->i_cdev; in nvme_ns_head_chr_uring_cmd_iopoll()
805 int srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_chr_uring_cmd_iopoll()
813 bio = READ_ONCE(ioucmd->cookie); in nvme_ns_head_chr_uring_cmd_iopoll()
814 q = ns->queue; in nvme_ns_head_chr_uring_cmd_iopoll()
815 if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio in nvme_ns_head_chr_uring_cmd_iopoll()
816 && bio->bi_bdev) in nvme_ns_head_chr_uring_cmd_iopoll()
820 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_chr_uring_cmd_iopoll()
827 struct nvme_ctrl *ctrl = ioucmd->file->private_data; in nvme_dev_uring_cmd()
832 return -EOPNOTSUPP; in nvme_dev_uring_cmd()
838 switch (ioucmd->cmd_op) { in nvme_dev_uring_cmd()
846 ret = -ENOTTY; in nvme_dev_uring_cmd()
857 down_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
858 if (list_empty(&ctrl->namespaces)) { in nvme_dev_user_cmd()
859 ret = -ENOTTY; in nvme_dev_user_cmd()
863 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); in nvme_dev_user_cmd()
864 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { in nvme_dev_user_cmd()
865 dev_warn(ctrl->device, in nvme_dev_user_cmd()
867 ret = -EINVAL; in nvme_dev_user_cmd()
871 dev_warn(ctrl->device, in nvme_dev_user_cmd()
873 kref_get(&ns->kref); in nvme_dev_user_cmd()
874 up_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
881 up_read(&ctrl->namespaces_rwsem); in nvme_dev_user_cmd()
885 long nvme_dev_ioctl(struct file *file, unsigned int cmd, in nvme_dev_ioctl() argument
888 struct nvme_ctrl *ctrl = file->private_data; in nvme_dev_ioctl()
891 switch (cmd) { in nvme_dev_ioctl()
900 return -EACCES; in nvme_dev_ioctl()
901 dev_warn(ctrl->device, "resetting controller\n"); in nvme_dev_ioctl()
905 return -EACCES; in nvme_dev_ioctl()
909 return -EACCES; in nvme_dev_ioctl()
913 return -ENOTTY; in nvme_dev_ioctl()