Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
25 * A non-zero value being sufficient to indicate general consideration of any
99 struct nvme_tcp_ctrl *ctrl; member
127 struct nvme_ctrl ctrl; member
142 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
144 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
149 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
157 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
158 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
163 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
168 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
178 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
190 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
191 req->data_len <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
196 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
201 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
206 return min_t(size_t, iov_iter_single_seg_count(&req->iter), in nvme_tcp_req_cur_length()
207 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
213 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
217 int len) in nvme_tcp_pdu_last_send() argument
219 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
231 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
232 vec = &rq->special_vec; in nvme_tcp_init_iter()
237 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
241 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
246 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
247 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
250 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); in nvme_tcp_init_iter()
251 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
255 int len) in nvme_tcp_advance_req() argument
257 req->data_sent += len; in nvme_tcp_advance_req()
258 req->pdu_sent += len; in nvme_tcp_advance_req()
259 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
260 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
261 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
262 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
279 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
280 !llist_empty(&queue->req_list) || queue->more_requests; in nvme_tcp_queue_more()
286 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
289 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
290 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
297 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
298 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
299 queue->more_requests = !last; in nvme_tcp_queue_request()
301 queue->more_requests = false; in nvme_tcp_queue_request()
302 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
306 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
314 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
316 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
325 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
329 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
335 list_del(&req->entry); in nvme_tcp_fetch_request()
347 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
352 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
353 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
358 void *pdu, size_t len) in nvme_tcp_hdgst() argument
362 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
363 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
374 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
375 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
378 return -EPROTO; in nvme_tcp_verify_hdgst()
381 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
382 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
383 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
385 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
388 return -EIO; in nvme_tcp_verify_hdgst()
398 u32 len; in nvme_tcp_check_ddgst() local
400 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
401 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
403 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
404 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
407 return -EPROTO; in nvme_tcp_check_ddgst()
409 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
419 page_frag_free(req->pdu); in nvme_tcp_exit_request()
426 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_init_request() local
429 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
430 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
433 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
436 if (!req->pdu) in nvme_tcp_init_request()
437 return -ENOMEM; in nvme_tcp_init_request()
439 pdu = req->pdu; in nvme_tcp_init_request()
440 req->queue = queue; in nvme_tcp_init_request()
441 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
442 nvme_req(rq)->cmd = &pdu->cmd; in nvme_tcp_init_request()
450 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_hctx() local
451 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
453 hctx->driver_data = queue; in nvme_tcp_init_hctx()
460 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_admin_hctx() local
461 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
463 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
470 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
471 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
477 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
479 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
480 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
481 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
484 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
486 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
489 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
490 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
499 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
501 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
503 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
504 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
505 return -EINVAL; in nvme_tcp_process_nvme_cqe()
509 if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) in nvme_tcp_process_nvme_cqe()
510 req->status = cqe->status; in nvme_tcp_process_nvme_cqe()
512 if (!nvme_try_complete_req(rq, req->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
514 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
524 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
526 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
528 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
529 return -ENOENT; in nvme_tcp_handle_c2h_data()
533 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
535 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
536 return -EIO; in nvme_tcp_handle_c2h_data()
539 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
541 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
542 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
543 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
545 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
546 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
547 return -EPROTO; in nvme_tcp_handle_c2h_data()
556 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
566 cqe->command_id))) in nvme_tcp_handle_comp()
567 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
568 &cqe->result); in nvme_tcp_handle_comp()
578 struct nvme_tcp_data_pdu *data = req->pdu; in nvme_tcp_setup_h2c_data_pdu()
579 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
584 req->pdu_len = le32_to_cpu(pdu->r2t_length); in nvme_tcp_setup_h2c_data_pdu()
585 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
587 if (unlikely(!req->pdu_len)) { in nvme_tcp_setup_h2c_data_pdu()
588 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
589 "req %d r2t len is %u, probably a bug...\n", in nvme_tcp_setup_h2c_data_pdu()
590 rq->tag, req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
591 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
594 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { in nvme_tcp_setup_h2c_data_pdu()
595 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
596 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_setup_h2c_data_pdu()
597 rq->tag, req->pdu_len, req->data_len, in nvme_tcp_setup_h2c_data_pdu()
598 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
599 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
602 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { in nvme_tcp_setup_h2c_data_pdu()
603 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
605 rq->tag, le32_to_cpu(pdu->r2t_offset), in nvme_tcp_setup_h2c_data_pdu()
606 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
607 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
611 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
612 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
613 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
614 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
615 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
616 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
617 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
618 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
619 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
620 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
621 data->ttag = pdu->ttag; in nvme_tcp_setup_h2c_data_pdu()
622 data->command_id = nvme_cid(rq); in nvme_tcp_setup_h2c_data_pdu()
623 data->data_offset = pdu->r2t_offset; in nvme_tcp_setup_h2c_data_pdu()
624 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
635 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
637 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
639 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
640 return -ENOENT; in nvme_tcp_handle_r2t()
648 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_handle_r2t()
649 req->offset = 0; in nvme_tcp_handle_r2t()
657 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
660 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
661 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
665 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
669 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
670 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
672 *len -= rcv_len; in nvme_tcp_recv_pdu()
673 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
676 hdr = queue->pdu; in nvme_tcp_recv_pdu()
677 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
678 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
684 if (queue->data_digest) { in nvme_tcp_recv_pdu()
685 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
690 switch (hdr->type) { in nvme_tcp_recv_pdu()
692 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
695 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
698 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
700 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
701 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
702 return -EINVAL; in nvme_tcp_recv_pdu()
715 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
717 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
719 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
725 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
729 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
730 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
736 if (!req->curr_bio) { in nvme_tcp_recv_data()
737 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
739 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
741 return -EIO; in nvme_tcp_recv_data()
748 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
750 if (queue->data_digest) in nvme_tcp_recv_data()
752 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
755 &req->iter, recv_len); in nvme_tcp_recv_data()
757 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
759 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
763 *len -= recv_len; in nvme_tcp_recv_data()
765 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
768 if (!queue->data_remaining) { in nvme_tcp_recv_data()
769 if (queue->data_digest) { in nvme_tcp_recv_data()
770 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
771 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
773 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
775 le16_to_cpu(req->status)); in nvme_tcp_recv_data()
776 queue->nr_cqe++; in nvme_tcp_recv_data()
786 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
788 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
789 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
790 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
791 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
798 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
800 *len -= recv_len; in nvme_tcp_recv_ddgst()
801 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
804 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
806 pdu->command_id); in nvme_tcp_recv_ddgst()
809 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); in nvme_tcp_recv_ddgst()
811 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
813 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
814 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
817 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
819 pdu->command_id); in nvme_tcp_recv_ddgst()
822 nvme_tcp_end_request(rq, le16_to_cpu(req->status)); in nvme_tcp_recv_ddgst()
823 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
831 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
833 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
834 size_t consumed = len; in nvme_tcp_recv_skb()
837 while (len) { in nvme_tcp_recv_skb()
840 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
843 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
846 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
849 result = -EFAULT; in nvme_tcp_recv_skb()
852 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
854 queue->rd_enabled = false; in nvme_tcp_recv_skb()
855 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
867 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
868 queue = sk->sk_user_data; in nvme_tcp_data_ready()
869 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
870 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
871 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
872 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
879 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
880 queue = sk->sk_user_data; in nvme_tcp_write_space()
882 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
883 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
885 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
892 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
893 queue = sk->sk_user_data; in nvme_tcp_state_change()
897 switch (sk->sk_state) { in nvme_tcp_state_change()
903 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
906 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
908 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
911 queue->state_change(sk); in nvme_tcp_state_change()
913 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
918 queue->request = NULL; in nvme_tcp_done_send_req()
928 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
929 int req_data_len = req->data_len; in nvme_tcp_try_send_data()
934 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
935 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
936 int req_data_sent = req->data_sent; in nvme_tcp_try_send_data()
939 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
945 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
948 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
954 if (queue->data_digest) in nvme_tcp_try_send_data()
955 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
967 if (last && ret == len) { in nvme_tcp_try_send_data()
968 if (queue->data_digest) { in nvme_tcp_try_send_data()
969 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
970 &req->ddgst); in nvme_tcp_try_send_data()
971 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
972 req->offset = 0; in nvme_tcp_try_send_data()
979 return -EAGAIN; in nvme_tcp_try_send_data()
984 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
985 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_try_send_cmd_pdu()
988 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
997 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
998 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1000 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
1001 offset_in_page(pdu) + req->offset, len, flags); in nvme_tcp_try_send_cmd_pdu()
1005 len -= ret; in nvme_tcp_try_send_cmd_pdu()
1006 if (!len) { in nvme_tcp_try_send_cmd_pdu()
1008 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
1009 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1010 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1016 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
1018 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
1023 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
1024 struct nvme_tcp_data_pdu *pdu = req->pdu; in nvme_tcp_try_send_data_pdu()
1026 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
1029 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1030 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1032 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
1033 offset_in_page(pdu) + req->offset, len, in nvme_tcp_try_send_data_pdu()
1038 len -= ret; in nvme_tcp_try_send_data_pdu()
1039 if (!len) { in nvme_tcp_try_send_data_pdu()
1040 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1041 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1042 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1045 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1047 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1052 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1053 size_t offset = req->offset; in nvme_tcp_try_send_ddgst()
1057 .iov_base = (u8 *)&req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1058 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1066 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1075 req->offset += ret; in nvme_tcp_try_send_ddgst()
1076 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1084 if (!queue->request) { in nvme_tcp_try_send()
1085 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1086 if (!queue->request) in nvme_tcp_try_send()
1089 req = queue->request; in nvme_tcp_try_send()
1091 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1099 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1105 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1111 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1114 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1117 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1119 if (ret != -EPIPE && ret != -ECONNRESET) in nvme_tcp_try_send()
1120 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1128 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1129 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1136 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1137 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1152 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1154 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1172 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1177 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1179 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1180 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1192 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1193 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1195 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1197 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1198 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1200 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1204 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1207 return -ENOMEM; in nvme_tcp_alloc_crypto()
1210 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1212 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1214 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1217 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1219 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1220 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1223 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1226 if (!async->pdu) in nvme_tcp_alloc_async_req()
1227 return -ENOMEM; in nvme_tcp_alloc_async_req()
1229 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1235 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1236 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1238 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1241 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1244 sock_release(queue->sock); in nvme_tcp_free_queue()
1245 kfree(queue->pdu); in nvme_tcp_free_queue()
1246 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1247 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1261 return -ENOMEM; in nvme_tcp_init_connection()
1265 ret = -ENOMEM; in nvme_tcp_init_connection()
1269 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1270 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1271 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1272 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1273 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1274 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1275 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1276 if (queue->hdr_digest) in nvme_tcp_init_connection()
1277 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1278 if (queue->data_digest) in nvme_tcp_init_connection()
1279 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1283 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1290 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1295 ret = -EINVAL; in nvme_tcp_init_connection()
1296 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1298 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1302 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1304 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1308 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1310 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1314 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1315 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1316 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1317 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1319 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1324 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1325 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1326 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1327 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1329 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1334 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1336 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1355 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1359 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1364 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1369 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1370 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1375 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1381 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1382 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1383 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1388 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1393 n = qid - 1; in nvme_tcp_set_queue_io_cpu()
1395 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; in nvme_tcp_set_queue_io_cpu()
1397 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - in nvme_tcp_set_queue_io_cpu()
1398 ctrl->io_queues[HCTX_TYPE_READ] - 1; in nvme_tcp_set_queue_io_cpu()
1399 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1405 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1406 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1409 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1410 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1411 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1412 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1413 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1414 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1415 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1418 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1420 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1423 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1424 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1426 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1432 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1435 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1442 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1445 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1448 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1449 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1452 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1454 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1456 queue->request = NULL; in nvme_tcp_alloc_queue()
1457 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1458 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1459 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1460 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1461 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1463 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1464 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1465 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1467 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1474 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_alloc_queue()
1475 char *iface = nctrl->opts->host_iface; in nvme_tcp_alloc_queue()
1478 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1481 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1488 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1489 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1490 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1493 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1501 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1502 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1503 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1507 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1510 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1511 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1513 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1522 queue->rd_enabled = true; in nvme_tcp_alloc_queue()
1523 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1526 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1527 queue->sock->sk->sk_user_data = queue; in nvme_tcp_alloc_queue()
1528 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_alloc_queue()
1529 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_alloc_queue()
1530 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_alloc_queue()
1531 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_alloc_queue()
1532 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_alloc_queue()
1533 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_alloc_queue()
1535 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_alloc_queue()
1537 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1542 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1544 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1546 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1549 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1550 queue->sock = NULL; in nvme_tcp_alloc_queue()
1552 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1553 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1559 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_calls()
1561 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1562 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_calls()
1563 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_calls()
1564 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_calls()
1565 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_calls()
1566 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1571 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1573 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1578 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue() local
1579 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue()
1581 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue()
1582 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1584 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue()
1589 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1598 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); in nvme_tcp_start_queue()
1600 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) in nvme_tcp_start_queue()
1601 __nvme_tcp_stop_queue(&ctrl->queues[idx]); in nvme_tcp_start_queue()
1602 dev_err(nctrl->device, in nvme_tcp_start_queue()
1611 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_tagset() local
1616 set = &ctrl->admin_tag_set; in nvme_tcp_alloc_tagset()
1618 set->ops = &nvme_tcp_admin_mq_ops; in nvme_tcp_alloc_tagset()
1619 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_tcp_alloc_tagset()
1620 set->reserved_tags = NVMF_RESERVED_TAGS; in nvme_tcp_alloc_tagset()
1621 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1622 set->flags = BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1623 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1624 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1625 set->nr_hw_queues = 1; in nvme_tcp_alloc_tagset()
1626 set->timeout = NVME_ADMIN_TIMEOUT; in nvme_tcp_alloc_tagset()
1628 set = &ctrl->tag_set; in nvme_tcp_alloc_tagset()
1630 set->ops = &nvme_tcp_mq_ops; in nvme_tcp_alloc_tagset()
1631 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset()
1632 set->reserved_tags = NVMF_RESERVED_TAGS; in nvme_tcp_alloc_tagset()
1633 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1634 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1635 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1636 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1637 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_tcp_alloc_tagset()
1638 set->timeout = NVME_IO_TIMEOUT; in nvme_tcp_alloc_tagset()
1639 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_tcp_alloc_tagset()
1649 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1651 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1652 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
1653 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1654 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1657 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1660 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1664 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1665 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1668 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1672 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1673 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1676 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_start_io_queues() argument
1680 for (i = 1; i < ctrl->queue_count; i++) { in nvme_tcp_start_io_queues()
1681 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1689 for (i--; i >= 1; i--) in nvme_tcp_start_io_queues()
1690 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1694 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1698 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_tcp_alloc_admin_queue()
1702 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1709 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1713 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1717 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1718 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
1719 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues()
1727 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1728 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1733 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_nr_io_queues() argument
1737 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1738 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1739 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1747 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_set_io_queues() local
1748 struct nvmf_ctrl_options *opts = nctrl->opts; in nvme_tcp_set_io_queues()
1750 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { in nvme_tcp_set_io_queues()
1756 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; in nvme_tcp_set_io_queues()
1757 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_set_io_queues()
1758 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1759 min(opts->nr_write_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1760 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1767 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1768 min(opts->nr_io_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1769 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1772 if (opts->nr_poll_queues && nr_io_queues) { in nvme_tcp_set_io_queues()
1774 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_tcp_set_io_queues()
1775 min(opts->nr_poll_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1779 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1784 nr_io_queues = nvme_tcp_nr_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1785 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1790 dev_err(ctrl->device, in nvme_tcp_alloc_io_queues()
1792 return -ENOMEM; in nvme_tcp_alloc_io_queues()
1795 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1796 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1799 nvme_tcp_set_io_queues(ctrl, nr_io_queues); in nvme_tcp_alloc_io_queues()
1801 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1804 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1806 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1808 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_destroy_io_queues()
1809 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_destroy_io_queues()
1811 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1814 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
1818 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1823 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); in nvme_tcp_configure_io_queues()
1824 if (IS_ERR(ctrl->tagset)) { in nvme_tcp_configure_io_queues()
1825 ret = PTR_ERR(ctrl->tagset); in nvme_tcp_configure_io_queues()
1829 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); in nvme_tcp_configure_io_queues()
1830 if (IS_ERR(ctrl->connect_q)) { in nvme_tcp_configure_io_queues()
1831 ret = PTR_ERR(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1836 ret = nvme_tcp_start_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1841 nvme_start_queues(ctrl); in nvme_tcp_configure_io_queues()
1842 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
1848 ret = -ENODEV; in nvme_tcp_configure_io_queues()
1851 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
1852 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
1853 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1859 nvme_stop_queues(ctrl); in nvme_tcp_configure_io_queues()
1860 nvme_sync_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1861 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1863 nvme_cancel_tagset(ctrl); in nvme_tcp_configure_io_queues()
1865 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1868 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_configure_io_queues()
1870 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1874 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
1876 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
1878 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_destroy_admin_queue()
1879 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_destroy_admin_queue()
1880 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_destroy_admin_queue()
1882 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
1885 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
1889 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1894 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); in nvme_tcp_configure_admin_queue()
1895 if (IS_ERR(ctrl->admin_tagset)) { in nvme_tcp_configure_admin_queue()
1896 error = PTR_ERR(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1900 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1901 if (IS_ERR(ctrl->fabrics_q)) { in nvme_tcp_configure_admin_queue()
1902 error = PTR_ERR(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1906 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1907 if (IS_ERR(ctrl->admin_q)) { in nvme_tcp_configure_admin_queue()
1908 error = PTR_ERR(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1913 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1917 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
1921 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1923 error = nvme_init_ctrl_finish(ctrl); in nvme_tcp_configure_admin_queue()
1930 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1931 blk_sync_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1933 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1934 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_configure_admin_queue()
1937 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1940 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1943 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1945 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1949 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
1952 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1953 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1954 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
1955 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_teardown_admin_queue()
1957 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1958 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
1961 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
1964 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
1966 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_io_queues()
1967 nvme_start_freeze(ctrl); in nvme_tcp_teardown_io_queues()
1968 nvme_stop_queues(ctrl); in nvme_tcp_teardown_io_queues()
1969 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1970 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1971 nvme_cancel_tagset(ctrl); in nvme_tcp_teardown_io_queues()
1973 nvme_start_queues(ctrl); in nvme_tcp_teardown_io_queues()
1974 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
1977 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
1980 if (ctrl->state != NVME_CTRL_CONNECTING) { in nvme_tcp_reconnect_or_remove()
1981 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || in nvme_tcp_reconnect_or_remove()
1982 ctrl->state == NVME_CTRL_LIVE); in nvme_tcp_reconnect_or_remove()
1986 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
1987 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
1988 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
1989 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
1990 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
1992 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
1993 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
1997 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
1999 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
2002 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2006 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
2007 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2008 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
2012 if (!nvme_ctrl_sgl_supported(ctrl)) { in nvme_tcp_setup_ctrl()
2013 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2014 dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); in nvme_tcp_setup_ctrl()
2018 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
2019 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2020 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
2021 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
2023 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
2024 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2025 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
2026 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
2027 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2030 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2031 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2036 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
2038 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
2042 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_setup_ctrl()
2043 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_setup_ctrl()
2045 ret = -EINVAL; in nvme_tcp_setup_ctrl()
2049 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
2053 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2054 nvme_stop_queues(ctrl); in nvme_tcp_setup_ctrl()
2055 nvme_sync_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2056 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2057 nvme_cancel_tagset(ctrl); in nvme_tcp_setup_ctrl()
2058 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2061 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_setup_ctrl()
2062 blk_sync_queue(ctrl->admin_q); in nvme_tcp_setup_ctrl()
2063 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_setup_ctrl()
2064 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_setup_ctrl()
2065 nvme_tcp_destroy_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2073 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2075 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2077 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
2080 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
2081 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2083 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2088 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
2089 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2090 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
2097 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2099 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2100 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2102 nvme_start_queues(ctrl); in nvme_tcp_error_recovery_work()
2103 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2104 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_error_recovery_work()
2106 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2107 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2108 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_error_recovery_work()
2109 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_error_recovery_work()
2113 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
2116 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2118 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_teardown_ctrl()
2119 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_teardown_ctrl()
2121 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2122 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_ctrl()
2124 nvme_shutdown_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2126 nvme_disable_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2127 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2130 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2132 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2137 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2140 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2141 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2143 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2144 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2145 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_reset_ctrl_work()
2146 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_reset_ctrl_work()
2150 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
2156 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2157 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
2162 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2164 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2168 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2171 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2173 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2174 kfree(ctrl); in nvme_tcp_free_ctrl()
2179 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2181 sg->addr = 0; in nvme_tcp_set_sg_null()
2182 sg->length = 0; in nvme_tcp_set_sg_null()
2183 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2190 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2192 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2193 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2194 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2200 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2202 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2203 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2204 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2210 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2211 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2212 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2213 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2217 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2218 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2219 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2220 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2221 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2223 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2224 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2225 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2228 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2229 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2230 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2231 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2233 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2239 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2241 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2243 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_tcp_complete_timed_out()
2252 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2253 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_timeout()
2255 dev_warn(ctrl->device, in nvme_tcp_timeout()
2257 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2259 if (ctrl->state != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2264 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2265 * - connect requests in nvme_tcp_timeout()
2266 * - initialization admin requests in nvme_tcp_timeout()
2267 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2281 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2289 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_map_data()
2290 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2292 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2297 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2298 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2300 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2309 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_setup_cmd_pdu()
2310 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2318 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2319 req->status = cpu_to_le16(NVME_SC_SUCCESS); in nvme_tcp_setup_cmd_pdu()
2320 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2321 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2322 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2323 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2324 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2326 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2327 if (req->curr_bio && req->data_len) in nvme_tcp_setup_cmd_pdu()
2331 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2332 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2334 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2335 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2336 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2337 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2338 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2339 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2342 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2343 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2344 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2345 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2350 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2360 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2362 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2363 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2369 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2370 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2371 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2373 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2376 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2377 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2385 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2392 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_map_queues() local
2393 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_tcp_map_queues()
2395 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_tcp_map_queues()
2397 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2398 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2399 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2400 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2401 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2402 set->map[HCTX_TYPE_READ].queue_offset = in nvme_tcp_map_queues()
2403 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2406 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2407 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2408 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2409 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2410 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2411 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_tcp_map_queues()
2413 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); in nvme_tcp_map_queues()
2414 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); in nvme_tcp_map_queues()
2416 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_tcp_map_queues()
2418 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
2419 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_map_queues()
2420 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_tcp_map_queues()
2421 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_map_queues()
2422 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2423 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2426 dev_info(ctrl->ctrl.device, in nvme_tcp_map_queues()
2428 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_tcp_map_queues()
2429 ctrl->io_queues[HCTX_TYPE_READ], in nvme_tcp_map_queues()
2430 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2437 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2438 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2440 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2443 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2444 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2447 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2448 return queue->nr_cqe; in nvme_tcp_poll()
2488 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2492 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2493 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2505 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2508 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2509 if (!ctrl) in nvme_tcp_create_ctrl()
2510 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2512 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2513 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2514 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2515 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2516 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2517 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2519 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2521 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2522 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2524 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2525 opts->trsvcid = in nvme_tcp_create_ctrl()
2527 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2528 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2531 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2535 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2538 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2542 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2544 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2547 opts->host_traddr); in nvme_tcp_create_ctrl()
2552 if (opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_create_ctrl()
2553 if (!__dev_get_by_name(&init_net, opts->host_iface)) { in nvme_tcp_create_ctrl()
2555 opts->host_iface); in nvme_tcp_create_ctrl()
2556 ret = -ENODEV; in nvme_tcp_create_ctrl()
2561 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2562 ret = -EALREADY; in nvme_tcp_create_ctrl()
2566 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2568 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2569 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2573 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2577 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2579 ret = -EINTR; in nvme_tcp_create_ctrl()
2583 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2587 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", in nvme_tcp_create_ctrl()
2588 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_tcp_create_ctrl()
2591 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2594 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2597 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2598 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2600 ret = -EIO; in nvme_tcp_create_ctrl()
2603 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2605 kfree(ctrl); in nvme_tcp_create_ctrl()
2626 return -ENOMEM; in nvme_tcp_init_module()
2634 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2639 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2640 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()