1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 
24 #define LPFC_NVMET_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
25 #define LPFC_NVMET_RQE_MIN_POST		128
26 #define LPFC_NVMET_RQE_DEF_POST		512
27 #define LPFC_NVMET_RQE_DEF_COUNT	2048
28 #define LPFC_NVMET_SUCCESS_LEN		12
29 
30 #define LPFC_NVMET_MRQ_AUTO		0
31 #define LPFC_NVMET_MRQ_MAX		16
32 
33 #define LPFC_NVMET_WAIT_TMO		(5 * MSEC_PER_SEC)
34 
35 /* Used for NVME Target */
36 struct lpfc_nvmet_tgtport {
37 	struct lpfc_hba *phba;
38 	struct completion *tport_unreg_cmp;
39 
40 	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
41 	atomic_t rcv_ls_req_in;
42 	atomic_t rcv_ls_req_out;
43 	atomic_t rcv_ls_req_drop;
44 	atomic_t xmt_ls_abort;
45 	atomic_t xmt_ls_abort_cmpl;
46 
47 	/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
48 	atomic_t xmt_ls_rsp;
49 	atomic_t xmt_ls_drop;
50 
51 	/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
52 	atomic_t xmt_ls_rsp_error;
53 	atomic_t xmt_ls_rsp_aborted;
54 	atomic_t xmt_ls_rsp_xb_set;
55 	atomic_t xmt_ls_rsp_cmpl;
56 
57 	/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
58 	atomic_t rcv_fcp_cmd_in;
59 	atomic_t rcv_fcp_cmd_out;
60 	atomic_t rcv_fcp_cmd_drop;
61 	atomic_t rcv_fcp_cmd_defer;
62 	atomic_t xmt_fcp_release;
63 
64 	/* Stats counters - lpfc_nvmet_xmt_fcp_op */
65 	atomic_t xmt_fcp_drop;
66 	atomic_t xmt_fcp_read_rsp;
67 	atomic_t xmt_fcp_read;
68 	atomic_t xmt_fcp_write;
69 	atomic_t xmt_fcp_rsp;
70 
71 	/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
72 	atomic_t xmt_fcp_rsp_xb_set;
73 	atomic_t xmt_fcp_rsp_cmpl;
74 	atomic_t xmt_fcp_rsp_error;
75 	atomic_t xmt_fcp_rsp_aborted;
76 	atomic_t xmt_fcp_rsp_drop;
77 
78 	/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
79 	atomic_t xmt_fcp_xri_abort_cqe;
80 	atomic_t xmt_fcp_abort;
81 	atomic_t xmt_fcp_abort_cmpl;
82 	atomic_t xmt_abort_sol;
83 	atomic_t xmt_abort_unsol;
84 	atomic_t xmt_abort_rsp;
85 	atomic_t xmt_abort_rsp_error;
86 
87 	/* Stats counters - defer IO */
88 	atomic_t defer_ctx;
89 	atomic_t defer_fod;
90 	atomic_t defer_wqfull;
91 };
92 
93 struct lpfc_nvmet_ctx_info {
94 	struct list_head nvmet_ctx_list;
95 	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
96 	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
97 	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
98 	uint16_t	nvmet_ctx_list_cnt;
99 	char pad[16];  /* pad to a cache-line */
100 };
101 
102 /* This retrieves the context info associated with the specified cpu / mrq */
103 #define lpfc_get_ctx_list(phba, cpu, mrq)  \
104 	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
105 
106 struct lpfc_nvmet_rcv_ctx {
107 	union {
108 		struct nvmefc_tgt_ls_req ls_req;
109 		struct nvmefc_tgt_fcp_req fcp_req;
110 	} ctx;
111 	struct list_head list;
112 	struct lpfc_hba *phba;
113 	struct lpfc_iocbq *wqeq;
114 	struct lpfc_iocbq *abort_wqeq;
115 	dma_addr_t txrdy_phys;
116 	spinlock_t ctxlock; /* protect flag access */
117 	uint32_t *txrdy;
118 	uint32_t sid;
119 	uint32_t offset;
120 	uint16_t oxid;
121 	uint16_t size;
122 	uint16_t entry_cnt;
123 	uint16_t cpu;
124 	uint16_t idx;
125 	uint16_t state;
126 	/* States */
127 #define LPFC_NVMET_STE_LS_RCV		1
128 #define LPFC_NVMET_STE_LS_ABORT		2
129 #define LPFC_NVMET_STE_LS_RSP		3
130 #define LPFC_NVMET_STE_RCV		4
131 #define LPFC_NVMET_STE_DATA		5
132 #define LPFC_NVMET_STE_ABORT		6
133 #define LPFC_NVMET_STE_DONE		7
134 #define LPFC_NVMET_STE_FREE		0xff
135 	uint16_t flag;
136 #define LPFC_NVMET_IO_INP		0x1  /* IO is in progress on exchange */
137 #define LPFC_NVMET_ABORT_OP		0x2  /* Abort WQE issued on exchange */
138 #define LPFC_NVMET_XBUSY		0x4  /* XB bit set on IO cmpl */
139 #define LPFC_NVMET_CTX_RLS		0x8  /* ctx free requested */
140 #define LPFC_NVMET_ABTS_RCV		0x10  /* ABTS received on exchange */
141 #define LPFC_NVMET_CTX_REUSE_WQ		0x20  /* ctx reused via WQ */
142 #define LPFC_NVMET_DEFER_WQFULL		0x40  /* Waiting on a free WQE */
143 #define LPFC_NVMET_TNOTIFY		0x80  /* notify transport of abts */
144 	struct rqb_dmabuf *rqb_buffer;
145 	struct lpfc_nvmet_ctxbuf *ctxbuf;
146 	struct lpfc_sli4_hdw_queue *hdwq;
147 
148 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
149 	uint64_t ts_isr_cmd;
150 	uint64_t ts_cmd_nvme;
151 	uint64_t ts_nvme_data;
152 	uint64_t ts_data_wqput;
153 	uint64_t ts_isr_data;
154 	uint64_t ts_data_nvme;
155 	uint64_t ts_nvme_status;
156 	uint64_t ts_status_wqput;
157 	uint64_t ts_isr_status;
158 	uint64_t ts_status_nvme;
159 #endif
160 };
161