Lines Matching full:queue

43 	struct nvme_tcp_queue	*queue;  member
138 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
145 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
147 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
150 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
152 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
155 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
156 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
159 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_hdgst_len() argument
161 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
164 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) in nvme_tcp_ddgst_len() argument
166 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
169 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue) in nvme_tcp_inline_data_size() argument
171 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
176 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
189 req->data_len <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
268 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request() local
271 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
272 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
276 * directly, otherwise queue io_work. Also, only do that if we in nvme_tcp_queue_request()
279 if (queue->io_cpu == smp_processor_id() && in nvme_tcp_queue_request()
280 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
281 queue->more_requests = !last; in nvme_tcp_queue_request()
282 nvme_tcp_try_send(queue); in nvme_tcp_queue_request()
283 queue->more_requests = false; in nvme_tcp_queue_request()
284 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
286 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
290 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) in nvme_tcp_process_req_list() argument
295 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
297 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
302 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) in nvme_tcp_fetch_request() argument
306 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
309 nvme_tcp_process_req_list(queue); in nvme_tcp_fetch_request()
310 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
348 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, in nvme_tcp_verify_hdgst() argument
356 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
357 "queue %d: header digest flag is cleared\n", in nvme_tcp_verify_hdgst()
358 nvme_tcp_queue_id(queue)); in nvme_tcp_verify_hdgst()
363 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
366 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
375 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) in nvme_tcp_check_ddgst() argument
378 u8 digest_len = nvme_tcp_hdgst_len(queue); in nvme_tcp_check_ddgst()
385 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
386 "queue %d: data digest flag is cleared\n", in nvme_tcp_check_ddgst()
387 nvme_tcp_queue_id(queue)); in nvme_tcp_check_ddgst()
390 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
410 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request() local
411 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_init_request()
413 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
419 req->queue = queue; in nvme_tcp_init_request()
429 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx() local
431 hctx->driver_data = queue; in nvme_tcp_init_hctx()
439 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx() local
441 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
446 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) in nvme_tcp_recv_state() argument
448 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
449 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
453 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) in nvme_tcp_init_recv_ctx() argument
455 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
456 nvme_tcp_hdgst_len(queue); in nvme_tcp_init_recv_ctx()
457 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
458 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
459 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
471 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, in nvme_tcp_process_nvme_cqe() argument
476 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
478 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
479 "queue %d tag 0x%x not found\n", in nvme_tcp_process_nvme_cqe()
480 nvme_tcp_queue_id(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
481 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
487 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
492 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, in nvme_tcp_handle_c2h_data() argument
497 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
499 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
500 "queue %d tag %#x not found\n", in nvme_tcp_handle_c2h_data()
501 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
506 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
507 "queue %d tag %#x unexpected data\n", in nvme_tcp_handle_c2h_data()
508 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
512 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
516 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
517 "queue %d tag %#x SUCCESS set but not last PDU\n", in nvme_tcp_handle_c2h_data()
518 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
519 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
526 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, in nvme_tcp_handle_comp() argument
534 * survive any kind of queue freeze and often don't respond to in nvme_tcp_handle_comp()
538 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), in nvme_tcp_handle_comp()
540 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
543 ret = nvme_tcp_process_nvme_cqe(queue, cqe); in nvme_tcp_handle_comp()
552 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu() local
554 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
555 u8 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_h2c_data_pdu()
561 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
569 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
579 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
581 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
594 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, in nvme_tcp_handle_r2t() argument
601 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
603 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
604 "queue %d tag %#x not found\n", in nvme_tcp_handle_r2t()
605 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_handle_r2t()
622 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_pdu() argument
626 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
627 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
631 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
635 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
636 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
639 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
642 hdr = queue->pdu; in nvme_tcp_recv_pdu()
643 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
644 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
650 if (queue->data_digest) { in nvme_tcp_recv_pdu()
651 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
658 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
660 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
661 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
663 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_pdu()
664 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
666 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
680 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, in nvme_tcp_recv_data() argument
683 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
687 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
689 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
690 "queue %d tag %#x not found\n", in nvme_tcp_recv_data()
691 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_recv_data()
699 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
711 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
712 "queue %d no space in request %#x", in nvme_tcp_recv_data()
713 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
714 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
724 if (queue->data_digest) in nvme_tcp_recv_data()
726 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
731 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
732 "queue %d failed to copy request %#x data", in nvme_tcp_recv_data()
733 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
739 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
742 if (!queue->data_remaining) { in nvme_tcp_recv_data()
743 if (queue->data_digest) { in nvme_tcp_recv_data()
744 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
745 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
749 queue->nr_cqe++; in nvme_tcp_recv_data()
751 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_data()
758 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, in nvme_tcp_recv_ddgst() argument
761 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
762 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
763 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
764 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
771 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
774 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
777 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
778 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
780 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
781 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
786 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), in nvme_tcp_recv_ddgst()
790 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
793 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_recv_ddgst()
800 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb() local
805 switch (nvme_tcp_recv_state(queue)) { in nvme_tcp_recv_skb()
807 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
810 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
813 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
819 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
821 queue->rd_enabled = false; in nvme_tcp_recv_skb()
822 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
832 struct nvme_tcp_queue *queue; in nvme_tcp_data_ready() local
835 queue = sk->sk_user_data; in nvme_tcp_data_ready()
836 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
837 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
838 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
844 struct nvme_tcp_queue *queue; in nvme_tcp_write_space() local
847 queue = sk->sk_user_data; in nvme_tcp_write_space()
848 if (likely(queue && sk_stream_is_writeable(sk))) { in nvme_tcp_write_space()
850 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
857 struct nvme_tcp_queue *queue; in nvme_tcp_state_change() local
860 queue = sk->sk_user_data; in nvme_tcp_state_change()
861 if (!queue) in nvme_tcp_state_change()
870 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
873 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
874 "queue %d socket state %d\n", in nvme_tcp_state_change()
875 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
878 queue->state_change(sk); in nvme_tcp_state_change()
883 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) in nvme_tcp_queue_more() argument
885 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
886 !llist_empty(&queue->req_list) || queue->more_requests; in nvme_tcp_queue_more()
889 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) in nvme_tcp_done_send_req() argument
891 queue->request = NULL; in nvme_tcp_done_send_req()
901 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data() local
910 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
916 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
919 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
926 if (queue->data_digest) in nvme_tcp_try_send_data()
927 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
932 if (queue->data_digest) { in nvme_tcp_try_send_data()
933 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
938 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_data()
948 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu() local
951 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_cmd_pdu()
956 if (inline_data || nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_cmd_pdu()
961 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
962 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
964 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
973 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
974 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
977 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_cmd_pdu()
988 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu() local
990 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_try_send_data_pdu()
994 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
995 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
997 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
1006 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1007 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1019 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst() local
1027 if (nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_ddgst()
1032 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1037 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send_ddgst()
1045 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) in nvme_tcp_try_send() argument
1050 if (!queue->request) { in nvme_tcp_try_send()
1051 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1052 if (!queue->request) in nvme_tcp_try_send()
1055 req = queue->request; in nvme_tcp_try_send()
1083 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1086 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1087 nvme_tcp_done_send_req(queue); in nvme_tcp_try_send()
1092 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) in nvme_tcp_try_recv() argument
1094 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1099 rd_desc.arg.data = queue; in nvme_tcp_try_recv()
1102 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1110 struct nvme_tcp_queue *queue = in nvme_tcp_io_work() local
1118 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1119 result = nvme_tcp_try_send(queue); in nvme_tcp_io_work()
1120 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1127 result = nvme_tcp_try_recv(queue); in nvme_tcp_io_work()
1138 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1141 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_free_crypto() argument
1143 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1145 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1146 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1150 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) in nvme_tcp_alloc_crypto() argument
1158 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1159 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1161 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1163 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1164 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1166 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1170 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1185 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req() local
1187 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_async_req()
1189 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1195 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1202 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue() local
1204 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1207 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1208 nvme_tcp_free_crypto(queue); in nvme_tcp_free_queue()
1210 sock_release(queue->sock); in nvme_tcp_free_queue()
1211 kfree(queue->pdu); in nvme_tcp_free_queue()
1214 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) in nvme_tcp_init_connection() argument
1240 if (queue->hdr_digest) in nvme_tcp_init_connection()
1242 if (queue->data_digest) in nvme_tcp_init_connection()
1247 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1254 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1261 pr_err("queue %d: bad type returned %d\n", in nvme_tcp_init_connection()
1262 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1267 pr_err("queue %d: bad pdu length returned %d\n", in nvme_tcp_init_connection()
1268 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1273 pr_err("queue %d: bad pfv returned %d\n", in nvme_tcp_init_connection()
1274 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1279 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1280 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1281 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1282 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1283 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1289 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1290 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1291 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1292 nvme_tcp_queue_id(queue), in nvme_tcp_init_connection()
1293 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1299 pr_err("queue %d: unsupported cpda returned %d\n", in nvme_tcp_init_connection()
1300 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1312 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) in nvme_tcp_admin_queue() argument
1314 return nvme_tcp_queue_id(queue) == 0; in nvme_tcp_admin_queue()
1317 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) in nvme_tcp_default_queue() argument
1319 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue()
1320 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_default_queue()
1322 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_default_queue()
1326 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) in nvme_tcp_read_queue() argument
1328 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue()
1329 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_read_queue()
1331 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_read_queue()
1332 !nvme_tcp_default_queue(queue) && in nvme_tcp_read_queue()
1337 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) in nvme_tcp_poll_queue() argument
1339 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue()
1340 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_poll_queue()
1342 return !nvme_tcp_admin_queue(queue) && in nvme_tcp_poll_queue()
1343 !nvme_tcp_default_queue(queue) && in nvme_tcp_poll_queue()
1344 !nvme_tcp_read_queue(queue) && in nvme_tcp_poll_queue()
1350 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) in nvme_tcp_set_queue_io_cpu() argument
1352 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu()
1353 int qid = nvme_tcp_queue_id(queue); in nvme_tcp_set_queue_io_cpu()
1356 if (nvme_tcp_default_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1358 else if (nvme_tcp_read_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1360 else if (nvme_tcp_poll_queue(queue)) in nvme_tcp_set_queue_io_cpu()
1363 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1370 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue() local
1373 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1374 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1375 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1376 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1377 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1378 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1381 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1383 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1387 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1395 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1398 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1401 * Cleanup whatever is sitting in the TCP transmit queue on socket in nvme_tcp_alloc_queue()
1405 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1408 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1412 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1415 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1417 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1418 nvme_tcp_set_queue_io_cpu(queue); in nvme_tcp_alloc_queue()
1419 queue->request = NULL; in nvme_tcp_alloc_queue()
1420 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1421 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1422 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1423 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1424 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1427 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1431 "failed to bind queue %d socket %d\n", in nvme_tcp_alloc_queue()
1437 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1438 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1439 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1440 ret = nvme_tcp_alloc_crypto(queue); in nvme_tcp_alloc_queue()
1443 "failed to allocate queue %d crypto\n", qid); in nvme_tcp_alloc_queue()
1449 nvme_tcp_hdgst_len(queue); in nvme_tcp_alloc_queue()
1450 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1451 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1456 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1457 nvme_tcp_queue_id(queue)); in nvme_tcp_alloc_queue()
1459 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1467 ret = nvme_tcp_init_connection(queue); in nvme_tcp_alloc_queue()
1471 queue->rd_enabled = true; in nvme_tcp_alloc_queue()
1472 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1473 nvme_tcp_init_recv_ctx(queue); in nvme_tcp_alloc_queue()
1475 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1476 queue->sock->sk->sk_user_data = queue; in nvme_tcp_alloc_queue()
1477 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_alloc_queue()
1478 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_alloc_queue()
1479 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_alloc_queue()
1480 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_alloc_queue()
1481 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_alloc_queue()
1482 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_alloc_queue()
1484 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_alloc_queue()
1486 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1491 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1493 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1495 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1496 nvme_tcp_free_crypto(queue); in nvme_tcp_alloc_queue()
1498 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1499 queue->sock = NULL; in nvme_tcp_alloc_queue()
1503 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) in nvme_tcp_restore_sock_calls() argument
1505 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_calls()
1509 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_calls()
1510 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_calls()
1511 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_calls()
1515 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) in __nvme_tcp_stop_queue() argument
1517 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1518 nvme_tcp_restore_sock_calls(queue); in __nvme_tcp_stop_queue()
1519 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1525 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue() local
1527 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1529 __nvme_tcp_stop_queue(queue); in nvme_tcp_stop_queue()
1548 "failed to connect queue: %d ret=%d\n", idx, ret); in nvme_tcp_start_queue()
1710 * sufficient queue count to have dedicated default queues. in nvme_tcp_set_io_queues()
2116 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, in nvme_tcp_set_sg_inline() argument
2121 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2140 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event() local
2143 u8 hdgst = nvme_tcp_hdgst_len(queue); in nvme_tcp_submit_async_event()
2147 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2168 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out()
2170 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2181 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout()
2185 "queue %d: timeout request %#x type %d\n", in nvme_tcp_timeout()
2186 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2214 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, in nvme_tcp_map_data() argument
2226 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2227 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2239 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu() local
2240 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; in nvme_tcp_setup_cmd_pdu()
2257 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2264 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2266 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2268 ddgst = nvme_tcp_ddgst_len(queue); in nvme_tcp_setup_cmd_pdu()
2275 ret = nvme_tcp_map_data(queue, rq); in nvme_tcp_setup_cmd_pdu()
2278 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2288 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs() local
2290 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2291 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2297 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2298 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq() local
2301 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2304 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2305 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2365 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll() local
2366 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2368 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2371 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2374 nvme_tcp_try_recv(queue); in nvme_tcp_poll()
2375 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2376 return queue->nr_cqe; in nvme_tcp_poll()