Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
83 struct nvme_tcp_ctrl *ctrl; member
111 struct nvme_ctrl ctrl; member
125 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
127 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
132 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
140 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
141 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
146 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
151 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
156 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
161 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
176 bytes <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
181 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
186 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
191 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, in nvme_tcp_req_cur_length()
192 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
197 return req->iter.iov_offset; in nvme_tcp_req_offset()
203 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
207 int len) in nvme_tcp_pdu_last_send() argument
209 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
221 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
222 vec = &rq->special_vec; in nvme_tcp_init_iter()
227 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
229 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
231 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
232 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
235 iov_iter_bvec(&req->iter, dir, vec, nsegs, size); in nvme_tcp_init_iter()
236 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
240 int len) in nvme_tcp_advance_req() argument
242 req->data_sent += len; in nvme_tcp_advance_req()
243 req->pdu_sent += len; in nvme_tcp_advance_req()
244 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
245 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
246 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
247 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
254 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
256 spin_lock(&queue->lock); in nvme_tcp_queue_request()
257 list_add_tail(&req->entry, &queue->send_list); in nvme_tcp_queue_request()
258 spin_unlock(&queue->lock); in nvme_tcp_queue_request()
260 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
268 spin_lock(&queue->lock); in nvme_tcp_fetch_request()
269 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
272 list_del(&req->entry); in nvme_tcp_fetch_request()
273 spin_unlock(&queue->lock); in nvme_tcp_fetch_request()
286 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
291 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
292 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
297 void *pdu, size_t len) in nvme_tcp_hdgst() argument
301 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
302 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
313 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
314 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
317 return -EPROTO; in nvme_tcp_verify_hdgst()
320 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
321 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
322 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
324 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
327 return -EIO; in nvme_tcp_verify_hdgst()
337 u32 len; in nvme_tcp_check_ddgst() local
339 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
340 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
342 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
343 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
346 return -EPROTO; in nvme_tcp_check_ddgst()
348 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
358 page_frag_free(req->pdu); in nvme_tcp_exit_request()
365 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_init_request() local
367 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
368 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
371 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
374 if (!req->pdu) in nvme_tcp_init_request()
375 return -ENOMEM; in nvme_tcp_init_request()
377 req->queue = queue; in nvme_tcp_init_request()
378 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
386 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_hctx() local
387 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
389 hctx->driver_data = queue; in nvme_tcp_init_hctx()
396 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_admin_hctx() local
397 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
399 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
406 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
407 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
413 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
415 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
416 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
417 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
420 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
422 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
425 queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
433 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
435 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
437 nvme_tcp_queue_id(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
438 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
439 return -EINVAL; in nvme_tcp_process_nvme_cqe()
442 nvme_end_request(rq, cqe->status, cqe->result); in nvme_tcp_process_nvme_cqe()
443 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
453 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
455 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
457 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
458 return -ENOENT; in nvme_tcp_handle_c2h_data()
462 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
464 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
465 return -EIO; in nvme_tcp_handle_c2h_data()
468 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
470 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
471 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
472 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
474 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
475 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
476 return -EPROTO; in nvme_tcp_handle_c2h_data()
485 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
495 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) in nvme_tcp_handle_comp()
496 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
497 &cqe->result); in nvme_tcp_handle_comp()
507 struct nvme_tcp_data_pdu *data = req->pdu; in nvme_tcp_setup_h2c_data_pdu()
508 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
513 req->pdu_len = le32_to_cpu(pdu->r2t_length); in nvme_tcp_setup_h2c_data_pdu()
514 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
516 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { in nvme_tcp_setup_h2c_data_pdu()
517 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
518 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_setup_h2c_data_pdu()
519 rq->tag, req->pdu_len, req->data_len, in nvme_tcp_setup_h2c_data_pdu()
520 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
521 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
524 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { in nvme_tcp_setup_h2c_data_pdu()
525 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
527 rq->tag, le32_to_cpu(pdu->r2t_offset), in nvme_tcp_setup_h2c_data_pdu()
528 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
529 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
533 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
534 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
535 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
536 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
537 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
538 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
539 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
540 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
541 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
542 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
543 data->ttag = pdu->ttag; in nvme_tcp_setup_h2c_data_pdu()
544 data->command_id = rq->tag; in nvme_tcp_setup_h2c_data_pdu()
545 data->data_offset = cpu_to_le32(req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
546 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
557 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
559 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
561 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_handle_r2t()
562 return -ENOENT; in nvme_tcp_handle_r2t()
570 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_handle_r2t()
571 req->offset = 0; in nvme_tcp_handle_r2t()
579 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
582 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
583 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
587 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
591 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
592 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
594 *len -= rcv_len; in nvme_tcp_recv_pdu()
595 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
598 hdr = queue->pdu; in nvme_tcp_recv_pdu()
599 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
600 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
606 if (queue->data_digest) { in nvme_tcp_recv_pdu()
607 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
612 switch (hdr->type) { in nvme_tcp_recv_pdu()
614 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
617 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
620 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
622 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
623 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
624 return -EINVAL; in nvme_tcp_recv_pdu()
636 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
638 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
642 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
644 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
646 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_recv_data()
647 return -ENOENT; in nvme_tcp_recv_data()
654 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
658 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
659 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
665 if (!req->curr_bio) { in nvme_tcp_recv_data()
666 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
668 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
670 return -EIO; in nvme_tcp_recv_data()
677 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
679 if (queue->data_digest) in nvme_tcp_recv_data()
681 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
684 &req->iter, recv_len); in nvme_tcp_recv_data()
686 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
688 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
692 *len -= recv_len; in nvme_tcp_recv_data()
694 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
697 if (!queue->data_remaining) { in nvme_tcp_recv_data()
698 if (queue->data_digest) { in nvme_tcp_recv_data()
699 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
700 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
702 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
704 queue->nr_cqe++; in nvme_tcp_recv_data()
714 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
716 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
717 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
718 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
719 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
726 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
728 *len -= recv_len; in nvme_tcp_recv_ddgst()
729 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
732 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
733 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
735 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
736 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
737 return -EIO; in nvme_tcp_recv_ddgst()
740 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
742 pdu->command_id); in nvme_tcp_recv_ddgst()
745 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
753 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
755 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
756 size_t consumed = len; in nvme_tcp_recv_skb()
759 while (len) { in nvme_tcp_recv_skb()
762 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
765 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
768 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
771 result = -EFAULT; in nvme_tcp_recv_skb()
774 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
776 queue->rd_enabled = false; in nvme_tcp_recv_skb()
777 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
789 read_lock(&sk->sk_callback_lock); in nvme_tcp_data_ready()
790 queue = sk->sk_user_data; in nvme_tcp_data_ready()
791 if (likely(queue && queue->rd_enabled)) in nvme_tcp_data_ready()
792 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
793 read_unlock(&sk->sk_callback_lock); in nvme_tcp_data_ready()
800 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
801 queue = sk->sk_user_data; in nvme_tcp_write_space()
803 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
804 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
806 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
813 read_lock(&sk->sk_callback_lock); in nvme_tcp_state_change()
814 queue = sk->sk_user_data; in nvme_tcp_state_change()
818 switch (sk->sk_state) { in nvme_tcp_state_change()
825 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
828 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
830 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
833 queue->state_change(sk); in nvme_tcp_state_change()
835 read_unlock(&sk->sk_callback_lock); in nvme_tcp_state_change()
840 queue->request = NULL; in nvme_tcp_done_send_req()
850 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
855 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
856 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
859 if (last && !queue->data_digest) in nvme_tcp_try_send_data()
866 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
869 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
876 if (queue->data_digest) in nvme_tcp_try_send_data()
877 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
881 if (last && ret == len) { in nvme_tcp_try_send_data()
882 if (queue->data_digest) { in nvme_tcp_try_send_data()
883 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
884 &req->ddgst); in nvme_tcp_try_send_data()
885 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
886 req->offset = 0; in nvme_tcp_try_send_data()
893 return -EAGAIN; in nvme_tcp_try_send_data()
898 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
899 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_try_send_cmd_pdu()
903 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
906 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
907 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
909 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
910 offset_in_page(pdu) + req->offset, len, flags); in nvme_tcp_try_send_cmd_pdu()
914 len -= ret; in nvme_tcp_try_send_cmd_pdu()
915 if (!len) { in nvme_tcp_try_send_cmd_pdu()
917 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
918 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
919 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
926 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
928 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
933 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
934 struct nvme_tcp_data_pdu *pdu = req->pdu; in nvme_tcp_try_send_data_pdu()
936 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
939 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
940 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
942 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
943 offset_in_page(pdu) + req->offset, len, in nvme_tcp_try_send_data_pdu()
948 len -= ret; in nvme_tcp_try_send_data_pdu()
949 if (!len) { in nvme_tcp_try_send_data_pdu()
950 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
951 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
952 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
953 if (!req->data_sent) in nvme_tcp_try_send_data_pdu()
957 req->offset += ret; in nvme_tcp_try_send_data_pdu()
959 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
964 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
968 .iov_base = &req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
969 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
972 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
976 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) { in nvme_tcp_try_send_ddgst()
981 req->offset += ret; in nvme_tcp_try_send_ddgst()
982 return -EAGAIN; in nvme_tcp_try_send_ddgst()
990 if (!queue->request) { in nvme_tcp_try_send()
991 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
992 if (!queue->request) in nvme_tcp_try_send()
995 req = queue->request; in nvme_tcp_try_send()
997 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1005 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1011 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1017 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1020 if (ret == -EAGAIN) in nvme_tcp_try_send()
1027 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1028 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1035 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1036 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1055 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_io_work()
1057 if (result != -EPIPE) in nvme_tcp_io_work()
1058 nvme_tcp_fail_request(queue->request); in nvme_tcp_io_work()
1072 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1077 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1079 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1080 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1092 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1093 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1095 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1097 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1098 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1100 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1104 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1107 return -ENOMEM; in nvme_tcp_alloc_crypto()
1110 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1112 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1114 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1117 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1119 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1120 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1123 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1126 if (!async->pdu) in nvme_tcp_alloc_async_req()
1127 return -ENOMEM; in nvme_tcp_alloc_async_req()
1129 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1135 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1136 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1138 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1141 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1144 sock_release(queue->sock); in nvme_tcp_free_queue()
1145 kfree(queue->pdu); in nvme_tcp_free_queue()
1159 return -ENOMEM; in nvme_tcp_init_connection()
1163 ret = -ENOMEM; in nvme_tcp_init_connection()
1167 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1168 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1169 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1170 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1171 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1172 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1173 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1174 if (queue->hdr_digest) in nvme_tcp_init_connection()
1175 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1176 if (queue->data_digest) in nvme_tcp_init_connection()
1177 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1181 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1188 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1193 ret = -EINVAL; in nvme_tcp_init_connection()
1194 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1196 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1200 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1202 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1206 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1208 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1212 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1213 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1214 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1215 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1217 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1222 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1223 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1224 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1225 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1227 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1232 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1234 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1249 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1250 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1254 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1255 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1256 spin_lock_init(&queue->lock); in nvme_tcp_alloc_queue()
1257 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1258 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1261 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1263 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1266 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1267 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1269 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1276 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT, in nvme_tcp_alloc_queue()
1279 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1286 ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, in nvme_tcp_alloc_queue()
1289 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1299 ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER, in nvme_tcp_alloc_queue()
1302 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1308 if (nctrl->opts->tos >= 0) { in nvme_tcp_alloc_queue()
1309 opt = nctrl->opts->tos; in nvme_tcp_alloc_queue()
1310 ret = kernel_setsockopt(queue->sock, SOL_IP, IP_TOS, in nvme_tcp_alloc_queue()
1313 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1319 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1323 n = (qid - 1) % num_online_cpus(); in nvme_tcp_alloc_queue()
1324 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_alloc_queue()
1325 queue->request = NULL; in nvme_tcp_alloc_queue()
1326 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1327 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1328 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1329 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1330 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1332 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1333 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1334 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1336 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1343 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1344 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1345 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1348 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1356 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1357 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1358 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1362 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1365 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1366 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1368 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1377 queue->rd_enabled = true; in nvme_tcp_alloc_queue()
1378 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1381 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1382 queue->sock->sk->sk_user_data = queue; in nvme_tcp_alloc_queue()
1383 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_alloc_queue()
1384 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_alloc_queue()
1385 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_alloc_queue()
1386 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_alloc_queue()
1387 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_alloc_queue()
1388 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_alloc_queue()
1390 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_alloc_queue()
1392 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1397 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1399 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1401 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1404 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1405 queue->sock = NULL; in nvme_tcp_alloc_queue()
1411 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_calls()
1413 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1414 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_calls()
1415 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_calls()
1416 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_calls()
1417 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_calls()
1418 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1423 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1425 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1430 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue() local
1431 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue()
1433 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1441 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1450 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); in nvme_tcp_start_queue()
1452 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) in nvme_tcp_start_queue()
1453 __nvme_tcp_stop_queue(&ctrl->queues[idx]); in nvme_tcp_start_queue()
1454 dev_err(nctrl->device, in nvme_tcp_start_queue()
1463 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_tagset() local
1468 set = &ctrl->admin_tag_set; in nvme_tcp_alloc_tagset()
1470 set->ops = &nvme_tcp_admin_mq_ops; in nvme_tcp_alloc_tagset()
1471 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_tcp_alloc_tagset()
1472 set->reserved_tags = 2; /* connect + keep-alive */ in nvme_tcp_alloc_tagset()
1473 set->numa_node = NUMA_NO_NODE; in nvme_tcp_alloc_tagset()
1474 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1475 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1476 set->nr_hw_queues = 1; in nvme_tcp_alloc_tagset()
1477 set->timeout = ADMIN_TIMEOUT; in nvme_tcp_alloc_tagset()
1479 set = &ctrl->tag_set; in nvme_tcp_alloc_tagset()
1481 set->ops = &nvme_tcp_mq_ops; in nvme_tcp_alloc_tagset()
1482 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset()
1483 set->reserved_tags = 1; /* fabric connect */ in nvme_tcp_alloc_tagset()
1484 set->numa_node = NUMA_NO_NODE; in nvme_tcp_alloc_tagset()
1485 set->flags = BLK_MQ_F_SHOULD_MERGE; in nvme_tcp_alloc_tagset()
1486 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1487 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1488 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_tcp_alloc_tagset()
1489 set->timeout = NVME_IO_TIMEOUT; in nvme_tcp_alloc_tagset()
1490 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_tcp_alloc_tagset()
1500 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1502 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1503 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1504 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1507 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1510 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1514 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1515 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1518 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1522 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1523 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1526 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_start_io_queues() argument
1530 for (i = 1; i < ctrl->queue_count; i++) { in nvme_tcp_start_io_queues()
1531 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1539 for (i--; i >= 1; i--) in nvme_tcp_start_io_queues()
1540 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1544 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1548 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_tcp_alloc_admin_queue()
1552 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1559 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1563 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1567 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1568 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
1569 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues()
1577 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1578 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1583 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_nr_io_queues() argument
1587 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1588 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1589 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1597 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_set_io_queues() local
1598 struct nvmf_ctrl_options *opts = nctrl->opts; in nvme_tcp_set_io_queues()
1600 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { in nvme_tcp_set_io_queues()
1606 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; in nvme_tcp_set_io_queues()
1607 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_set_io_queues()
1608 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1609 min(opts->nr_write_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1610 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1617 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1618 min(opts->nr_io_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1619 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1622 if (opts->nr_poll_queues && nr_io_queues) { in nvme_tcp_set_io_queues()
1624 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_tcp_set_io_queues()
1625 min(opts->nr_poll_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1629 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1634 nr_io_queues = nvme_tcp_nr_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1635 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1639 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1640 if (ctrl->queue_count < 2) in nvme_tcp_alloc_io_queues()
1643 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1646 nvme_tcp_set_io_queues(ctrl, nr_io_queues); in nvme_tcp_alloc_io_queues()
1648 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1651 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1653 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1655 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_destroy_io_queues()
1656 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_destroy_io_queues()
1658 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1661 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
1665 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1670 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); in nvme_tcp_configure_io_queues()
1671 if (IS_ERR(ctrl->tagset)) { in nvme_tcp_configure_io_queues()
1672 ret = PTR_ERR(ctrl->tagset); in nvme_tcp_configure_io_queues()
1676 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); in nvme_tcp_configure_io_queues()
1677 if (IS_ERR(ctrl->connect_q)) { in nvme_tcp_configure_io_queues()
1678 ret = PTR_ERR(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1682 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
1683 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
1686 ret = nvme_tcp_start_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1694 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1697 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_configure_io_queues()
1699 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1703 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
1705 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
1707 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_destroy_admin_queue()
1708 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_destroy_admin_queue()
1709 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_destroy_admin_queue()
1711 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
1714 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
1718 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1723 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); in nvme_tcp_configure_admin_queue()
1724 if (IS_ERR(ctrl->admin_tagset)) { in nvme_tcp_configure_admin_queue()
1725 error = PTR_ERR(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1729 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1730 if (IS_ERR(ctrl->fabrics_q)) { in nvme_tcp_configure_admin_queue()
1731 error = PTR_ERR(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1735 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1736 if (IS_ERR(ctrl->admin_q)) { in nvme_tcp_configure_admin_queue()
1737 error = PTR_ERR(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1742 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1746 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
1750 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1752 error = nvme_init_identify(ctrl); in nvme_tcp_configure_admin_queue()
1759 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1762 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1765 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1768 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1770 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1774 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
1777 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1778 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
1779 if (ctrl->admin_tagset) { in nvme_tcp_teardown_admin_queue()
1780 blk_mq_tagset_busy_iter(ctrl->admin_tagset, in nvme_tcp_teardown_admin_queue()
1781 nvme_cancel_request, ctrl); in nvme_tcp_teardown_admin_queue()
1782 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); in nvme_tcp_teardown_admin_queue()
1785 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1786 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
1789 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
1792 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
1794 nvme_stop_queues(ctrl); in nvme_tcp_teardown_io_queues()
1795 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1796 if (ctrl->tagset) { in nvme_tcp_teardown_io_queues()
1797 blk_mq_tagset_busy_iter(ctrl->tagset, in nvme_tcp_teardown_io_queues()
1798 nvme_cancel_request, ctrl); in nvme_tcp_teardown_io_queues()
1799 blk_mq_tagset_wait_completed_request(ctrl->tagset); in nvme_tcp_teardown_io_queues()
1802 nvme_start_queues(ctrl); in nvme_tcp_teardown_io_queues()
1803 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
1806 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
1809 if (ctrl->state != NVME_CTRL_CONNECTING) { in nvme_tcp_reconnect_or_remove()
1810 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || in nvme_tcp_reconnect_or_remove()
1811 ctrl->state == NVME_CTRL_LIVE); in nvme_tcp_reconnect_or_remove()
1815 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
1816 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
1817 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
1818 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
1819 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
1821 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
1822 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
1826 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
1828 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
1831 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
1835 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
1836 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
1840 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
1841 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
1842 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
1843 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
1845 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
1846 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
1847 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
1848 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
1849 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
1852 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
1853 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
1858 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
1860 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); in nvme_tcp_setup_ctrl()
1861 ret = -EINVAL; in nvme_tcp_setup_ctrl()
1865 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
1869 if (ctrl->queue_count > 1) in nvme_tcp_setup_ctrl()
1870 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
1872 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_setup_ctrl()
1873 nvme_tcp_destroy_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
1881 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
1883 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
1885 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
1888 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
1889 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
1891 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
1896 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
1897 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
1898 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
1905 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
1907 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
1908 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
1910 nvme_start_queues(ctrl); in nvme_tcp_error_recovery_work()
1911 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
1912 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_error_recovery_work()
1914 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
1916 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); in nvme_tcp_error_recovery_work()
1920 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
1923 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
1925 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_teardown_ctrl()
1926 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_teardown_ctrl()
1928 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
1929 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_ctrl()
1931 nvme_shutdown_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
1933 nvme_disable_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
1934 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
1937 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
1939 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
1944 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
1947 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
1948 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
1950 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
1952 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING); in nvme_reset_ctrl_work()
1956 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
1962 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
1963 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
1968 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
1970 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
1974 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
1977 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
1979 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
1980 kfree(ctrl); in nvme_tcp_free_ctrl()
1985 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
1987 sg->addr = 0; in nvme_tcp_set_sg_null()
1988 sg->length = 0; in nvme_tcp_set_sg_null()
1989 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
1996 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
1998 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
1999 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2000 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2006 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2008 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2009 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2010 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2016 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2017 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2018 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2019 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2023 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2024 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2025 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2026 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2027 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2029 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2030 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2031 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2034 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2035 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2036 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2037 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2039 nvme_tcp_queue_request(&ctrl->async_req); in nvme_tcp_submit_async_event()
2046 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; in nvme_tcp_timeout() local
2047 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_timeout()
2054 if (ctrl->ctrl.state == NVME_CTRL_RESETTING) in nvme_tcp_timeout()
2057 dev_warn(ctrl->ctrl.device, in nvme_tcp_timeout()
2059 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2061 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2067 flush_work(&ctrl->err_work); in nvme_tcp_timeout()
2068 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); in nvme_tcp_timeout()
2069 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); in nvme_tcp_timeout()
2073 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); in nvme_tcp_timeout()
2074 nvme_tcp_error_recovery(&ctrl->ctrl); in nvme_tcp_timeout()
2083 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_map_data()
2084 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2086 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2088 if (rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_map_data()
2089 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2090 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2092 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2101 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_setup_cmd_pdu()
2102 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2106 ret = nvme_setup_cmd(ns, rq, &pdu->cmd); in nvme_tcp_setup_cmd_pdu()
2110 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2111 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2112 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2113 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2114 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2115 req->data_len = blk_rq_payload_bytes(rq); in nvme_tcp_setup_cmd_pdu()
2116 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2119 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2120 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2121 else if (req->curr_bio) in nvme_tcp_setup_cmd_pdu()
2124 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2125 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2126 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2127 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2128 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2129 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2132 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2133 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2134 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2135 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2140 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2151 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2152 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2153 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2155 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2158 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2159 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2174 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_map_queues() local
2175 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_tcp_map_queues()
2177 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_tcp_map_queues()
2179 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2180 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2181 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2182 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2183 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2184 set->map[HCTX_TYPE_READ].queue_offset = in nvme_tcp_map_queues()
2185 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2188 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2189 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2190 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2191 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2192 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2193 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_tcp_map_queues()
2195 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); in nvme_tcp_map_queues()
2196 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); in nvme_tcp_map_queues()
2198 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_tcp_map_queues()
2200 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
2201 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_map_queues()
2202 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_tcp_map_queues()
2203 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_map_queues()
2204 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2205 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2208 dev_info(ctrl->ctrl.device, in nvme_tcp_map_queues()
2210 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_tcp_map_queues()
2211 ctrl->io_queues[HCTX_TYPE_READ], in nvme_tcp_map_queues()
2212 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2219 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2220 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2222 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2225 return queue->nr_cqe; in nvme_tcp_poll()
2264 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2268 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2269 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2281 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2284 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2285 if (!ctrl) in nvme_tcp_create_ctrl()
2286 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2288 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2289 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2290 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2291 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2292 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2293 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2295 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2297 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2298 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2300 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2301 opts->trsvcid = in nvme_tcp_create_ctrl()
2303 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2304 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2307 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2311 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2314 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2318 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2320 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2323 opts->host_traddr); in nvme_tcp_create_ctrl()
2328 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2329 ret = -EALREADY; in nvme_tcp_create_ctrl()
2333 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2335 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2336 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2340 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2344 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2346 ret = -EINTR; in nvme_tcp_create_ctrl()
2350 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2354 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", in nvme_tcp_create_ctrl()
2355 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_tcp_create_ctrl()
2357 nvme_get_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2360 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2363 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2366 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2367 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2369 ret = -EIO; in nvme_tcp_create_ctrl()
2372 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2374 kfree(ctrl); in nvme_tcp_create_ctrl()
2395 return -ENOMEM; in nvme_tcp_init_module()
2403 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2408 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2409 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()