1 /*
2 * cxgb3i_offload.c: Chelsio S3xx iscsi offloaded tcp connection management
3 *
4 * Copyright (C) 2003-2015 Chelsio Communications. All rights reserved.
5 *
6 * This program is distributed in the hope that it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
9 * release for licensing terms and conditions.
10 *
11 * Written by: Dimitris Michailidis (dm@chelsio.com)
12 * Karen Xie (kxie@chelsio.com)
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <scsi/scsi_host.h>
20
21 #include "common.h"
22 #include "t3_cpl.h"
23 #include "t3cdev.h"
24 #include "cxgb3_defs.h"
25 #include "cxgb3_ctl_defs.h"
26 #include "cxgb3_offload.h"
27 #include "firmware_exports.h"
28 #include "cxgb3i.h"
29
30 static unsigned int dbg_level;
31 #include "../libcxgbi.h"
32
33 #define DRV_MODULE_NAME "cxgb3i"
34 #define DRV_MODULE_DESC "Chelsio T3 iSCSI Driver"
35 #define DRV_MODULE_VERSION "2.0.1-ko"
36 #define DRV_MODULE_RELDATE "Apr. 2015"
37
38 static char version[] =
39 DRV_MODULE_DESC " " DRV_MODULE_NAME
40 " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41
42 MODULE_AUTHOR("Chelsio Communications, Inc.");
43 MODULE_DESCRIPTION(DRV_MODULE_DESC);
44 MODULE_VERSION(DRV_MODULE_VERSION);
45 MODULE_LICENSE("GPL");
46
47 module_param(dbg_level, uint, 0644);
48 MODULE_PARM_DESC(dbg_level, "debug flag (default=0)");
49
50 static int cxgb3i_rcv_win = 256 * 1024;
51 module_param(cxgb3i_rcv_win, int, 0644);
52 MODULE_PARM_DESC(cxgb3i_rcv_win, "TCP receive window in bytes (default=256KB)");
53
54 static int cxgb3i_snd_win = 128 * 1024;
55 module_param(cxgb3i_snd_win, int, 0644);
56 MODULE_PARM_DESC(cxgb3i_snd_win, "TCP send window in bytes (default=128KB)");
57
58 static int cxgb3i_rx_credit_thres = 10 * 1024;
59 module_param(cxgb3i_rx_credit_thres, int, 0644);
60 MODULE_PARM_DESC(cxgb3i_rx_credit_thres,
61 "RX credits return threshold in bytes (default=10KB)");
62
63 static unsigned int cxgb3i_max_connect = 8 * 1024;
64 module_param(cxgb3i_max_connect, uint, 0644);
65 MODULE_PARM_DESC(cxgb3i_max_connect, "Max. # of connections (default=8092)");
66
67 static unsigned int cxgb3i_sport_base = 20000;
68 module_param(cxgb3i_sport_base, uint, 0644);
69 MODULE_PARM_DESC(cxgb3i_sport_base, "starting port number (default=20000)");
70
71 static void cxgb3i_dev_open(struct t3cdev *);
72 static void cxgb3i_dev_close(struct t3cdev *);
73 static void cxgb3i_dev_event_handler(struct t3cdev *, u32, u32);
74
75 static struct cxgb3_client t3_client = {
76 .name = DRV_MODULE_NAME,
77 .handlers = cxgb3i_cpl_handlers,
78 .add = cxgb3i_dev_open,
79 .remove = cxgb3i_dev_close,
80 .event_handler = cxgb3i_dev_event_handler,
81 };
82
83 static struct scsi_host_template cxgb3i_host_template = {
84 .module = THIS_MODULE,
85 .name = DRV_MODULE_NAME,
86 .proc_name = DRV_MODULE_NAME,
87 .can_queue = CXGB3I_SCSI_HOST_QDEPTH,
88 .queuecommand = iscsi_queuecommand,
89 .change_queue_depth = scsi_change_queue_depth,
90 .sg_tablesize = SG_ALL,
91 .max_sectors = 0xFFFF,
92 .cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
93 .eh_timed_out = iscsi_eh_cmd_timed_out,
94 .eh_abort_handler = iscsi_eh_abort,
95 .eh_device_reset_handler = iscsi_eh_device_reset,
96 .eh_target_reset_handler = iscsi_eh_recover_target,
97 .target_alloc = iscsi_target_alloc,
98 .dma_boundary = PAGE_SIZE - 1,
99 .this_id = -1,
100 .track_queue_depth = 1,
101 };
102
103 static struct iscsi_transport cxgb3i_iscsi_transport = {
104 .owner = THIS_MODULE,
105 .name = DRV_MODULE_NAME,
106 /* owner and name should be set already */
107 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
108 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
109 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
110 .attr_is_visible = cxgbi_attr_is_visible,
111 .get_host_param = cxgbi_get_host_param,
112 .set_host_param = cxgbi_set_host_param,
113 /* session management */
114 .create_session = cxgbi_create_session,
115 .destroy_session = cxgbi_destroy_session,
116 .get_session_param = iscsi_session_get_param,
117 /* connection management */
118 .create_conn = cxgbi_create_conn,
119 .bind_conn = cxgbi_bind_conn,
120 .unbind_conn = iscsi_conn_unbind,
121 .destroy_conn = iscsi_tcp_conn_teardown,
122 .start_conn = iscsi_conn_start,
123 .stop_conn = iscsi_conn_stop,
124 .get_conn_param = iscsi_conn_get_param,
125 .set_param = cxgbi_set_conn_param,
126 .get_stats = cxgbi_get_conn_stats,
127 /* pdu xmit req from user space */
128 .send_pdu = iscsi_conn_send_pdu,
129 /* task */
130 .init_task = iscsi_tcp_task_init,
131 .xmit_task = iscsi_tcp_task_xmit,
132 .cleanup_task = cxgbi_cleanup_task,
133 /* pdu */
134 .alloc_pdu = cxgbi_conn_alloc_pdu,
135 .init_pdu = cxgbi_conn_init_pdu,
136 .xmit_pdu = cxgbi_conn_xmit_pdu,
137 .parse_pdu_itt = cxgbi_parse_pdu_itt,
138 /* TCP connect/disconnect */
139 .get_ep_param = cxgbi_get_ep_param,
140 .ep_connect = cxgbi_ep_connect,
141 .ep_poll = cxgbi_ep_poll,
142 .ep_disconnect = cxgbi_ep_disconnect,
143 /* Error recovery timeout call */
144 .session_recovery_timedout = iscsi_session_recovery_timedout,
145 };
146
147 static struct scsi_transport_template *cxgb3i_stt;
148
149 /*
150 * CPL (Chelsio Protocol Language) defines a message passing interface between
151 * the host driver and Chelsio asic.
152 * The section below implments CPLs that related to iscsi tcp connection
153 * open/close/abort and data send/receive.
154 */
155
156 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
157
send_act_open_req(struct cxgbi_sock * csk,struct sk_buff * skb,const struct l2t_entry * e)158 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
159 const struct l2t_entry *e)
160 {
161 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win);
162 struct cpl_act_open_req *req = (struct cpl_act_open_req *)skb->head;
163
164 skb->priority = CPL_PRIORITY_SETUP;
165
166 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
167 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid));
168 req->local_port = csk->saddr.sin_port;
169 req->peer_port = csk->daddr.sin_port;
170 req->local_ip = csk->saddr.sin_addr.s_addr;
171 req->peer_ip = csk->daddr.sin_addr.s_addr;
172
173 req->opt0h = htonl(V_KEEP_ALIVE(1) | F_TCAM_BYPASS |
174 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) |
175 V_L2T_IDX(e->idx) | V_TX_CHANNEL(e->smt_idx));
176 req->opt0l = htonl(V_ULP_MODE(ULP2_MODE_ISCSI) |
177 V_RCV_BUFSIZ(csk->rcv_win >> 10));
178
179 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
180 "csk 0x%p,%u,0x%lx,%u, %pI4:%u-%pI4:%u, %u,%u,%u.\n",
181 csk, csk->state, csk->flags, csk->atid,
182 &req->local_ip, ntohs(req->local_port),
183 &req->peer_ip, ntohs(req->peer_port),
184 csk->mss_idx, e->idx, e->smt_idx);
185
186 l2t_send(csk->cdev->lldev, skb, csk->l2t);
187 }
188
act_open_arp_failure(struct t3cdev * dev,struct sk_buff * skb)189 static inline void act_open_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
190 {
191 cxgbi_sock_act_open_req_arp_failure(NULL, skb);
192 }
193
194 /*
195 * CPL connection close request: host ->
196 *
197 * Close a connection by sending a CPL_CLOSE_CON_REQ message and queue it to
198 * the write queue (i.e., after any unsent txt data).
199 */
send_close_req(struct cxgbi_sock * csk)200 static void send_close_req(struct cxgbi_sock *csk)
201 {
202 struct sk_buff *skb = csk->cpl_close;
203 struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
204 unsigned int tid = csk->tid;
205
206 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
207 "csk 0x%p,%u,0x%lx,%u.\n",
208 csk, csk->state, csk->flags, csk->tid);
209
210 csk->cpl_close = NULL;
211 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
212 req->wr.wr_lo = htonl(V_WR_TID(tid));
213 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
214 req->rsvd = htonl(csk->write_seq);
215
216 cxgbi_sock_skb_entail(csk, skb);
217 if (csk->state >= CTP_ESTABLISHED)
218 push_tx_frames(csk, 1);
219 }
220
221 /*
222 * CPL connection abort request: host ->
223 *
224 * Send an ABORT_REQ message. Makes sure we do not send multiple ABORT_REQs
225 * for the same connection and also that we do not try to send a message
226 * after the connection has closed.
227 */
abort_arp_failure(struct t3cdev * tdev,struct sk_buff * skb)228 static void abort_arp_failure(struct t3cdev *tdev, struct sk_buff *skb)
229 {
230 struct cpl_abort_req *req = cplhdr(skb);
231
232 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
233 "t3dev 0x%p, tid %u, skb 0x%p.\n",
234 tdev, GET_TID(req), skb);
235 req->cmd = CPL_ABORT_NO_RST;
236 cxgb3_ofld_send(tdev, skb);
237 }
238
send_abort_req(struct cxgbi_sock * csk)239 static void send_abort_req(struct cxgbi_sock *csk)
240 {
241 struct sk_buff *skb = csk->cpl_abort_req;
242 struct cpl_abort_req *req;
243
244 if (unlikely(csk->state == CTP_ABORTING || !skb))
245 return;
246 cxgbi_sock_set_state(csk, CTP_ABORTING);
247 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
248 /* Purge the send queue so we don't send anything after an abort. */
249 cxgbi_sock_purge_write_queue(csk);
250
251 csk->cpl_abort_req = NULL;
252 req = (struct cpl_abort_req *)skb->head;
253 skb->priority = CPL_PRIORITY_DATA;
254 set_arp_failure_handler(skb, abort_arp_failure);
255 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
256 req->wr.wr_lo = htonl(V_WR_TID(csk->tid));
257 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
258 req->rsvd0 = htonl(csk->snd_nxt);
259 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
260 req->cmd = CPL_ABORT_SEND_RST;
261
262 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
263 "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
264 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
265 req->rsvd1);
266
267 l2t_send(csk->cdev->lldev, skb, csk->l2t);
268 }
269
270 /*
271 * CPL connection abort reply: host ->
272 *
273 * Send an ABORT_RPL message in response of the ABORT_REQ received.
274 */
send_abort_rpl(struct cxgbi_sock * csk,int rst_status)275 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
276 {
277 struct sk_buff *skb = csk->cpl_abort_rpl;
278 struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
279
280 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
281 "csk 0x%p,%u,0x%lx,%u, status %d.\n",
282 csk, csk->state, csk->flags, csk->tid, rst_status);
283
284 csk->cpl_abort_rpl = NULL;
285 skb->priority = CPL_PRIORITY_DATA;
286 rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
287 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid));
288 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
289 rpl->cmd = rst_status;
290 cxgb3_ofld_send(csk->cdev->lldev, skb);
291 }
292
293 /*
294 * CPL connection rx data ack: host ->
295 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
296 * credits sent.
297 */
send_rx_credits(struct cxgbi_sock * csk,u32 credits)298 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
299 {
300 struct sk_buff *skb;
301 struct cpl_rx_data_ack *req;
302 u32 dack = F_RX_DACK_CHANGE | V_RX_DACK_MODE(1);
303
304 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
305 "csk 0x%p,%u,0x%lx,%u, credit %u, dack %u.\n",
306 csk, csk->state, csk->flags, csk->tid, credits, dack);
307
308 skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
309 if (!skb) {
310 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
311 return 0;
312 }
313 req = (struct cpl_rx_data_ack *)skb->head;
314 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
315 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid));
316 req->credit_dack = htonl(F_RX_DACK_CHANGE | V_RX_DACK_MODE(1) |
317 V_RX_CREDITS(credits));
318 skb->priority = CPL_PRIORITY_ACK;
319 cxgb3_ofld_send(csk->cdev->lldev, skb);
320 return credits;
321 }
322
323 /*
324 * CPL connection tx data: host ->
325 *
326 * Send iscsi PDU via TX_DATA CPL message. Returns the number of
327 * credits sent.
328 * Each TX_DATA consumes work request credit (wrs), so we need to keep track of
329 * how many we've used so far and how many are pending (i.e., yet ack'ed by T3).
330 */
331
332 static unsigned int wrlen __read_mostly;
333 static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
334
init_wr_tab(unsigned int wr_len)335 static void init_wr_tab(unsigned int wr_len)
336 {
337 int i;
338
339 if (skb_wrs[1]) /* already initialized */
340 return;
341 for (i = 1; i < SKB_WR_LIST_SIZE; i++) {
342 int sgl_len = (3 * i) / 2 + (i & 1);
343
344 sgl_len += 3;
345 skb_wrs[i] = (sgl_len <= wr_len
346 ? 1 : 1 + (sgl_len - 2) / (wr_len - 1));
347 }
348 wrlen = wr_len * 8;
349 }
350
make_tx_data_wr(struct cxgbi_sock * csk,struct sk_buff * skb,int len,int req_completion)351 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
352 int len, int req_completion)
353 {
354 struct tx_data_wr *req;
355 struct l2t_entry *l2t = csk->l2t;
356
357 skb_reset_transport_header(skb);
358 req = __skb_push(skb, sizeof(*req));
359 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
360 (req_completion ? F_WR_COMPL : 0));
361 req->wr_lo = htonl(V_WR_TID(csk->tid));
362 /* len includes the length of any HW ULP additions */
363 req->len = htonl(len);
364 /* V_TX_ULP_SUBMODE sets both the mode and submode */
365 req->flags = htonl(V_TX_ULP_SUBMODE(cxgbi_skcb_tx_ulp_mode(skb)) |
366 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1)));
367 req->sndseq = htonl(csk->snd_nxt);
368 req->param = htonl(V_TX_PORT(l2t->smt_idx));
369
370 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
371 req->flags |= htonl(V_TX_ACK_PAGES(2) | F_TX_INIT |
372 V_TX_CPU_IDX(csk->rss_qid));
373 /* sendbuffer is in units of 32KB. */
374 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15));
375 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
376 }
377 }
378
379 /*
380 * push_tx_frames -- start transmit
381 *
382 * Prepends TX_DATA_WR or CPL_CLOSE_CON_REQ headers to buffers waiting in a
383 * connection's send queue and sends them on to T3. Must be called with the
384 * connection's lock held. Returns the amount of send buffer space that was
385 * freed as a result of sending queued data to T3.
386 */
387
arp_failure_skb_discard(struct t3cdev * dev,struct sk_buff * skb)388 static void arp_failure_skb_discard(struct t3cdev *dev, struct sk_buff *skb)
389 {
390 kfree_skb(skb);
391 }
392
push_tx_frames(struct cxgbi_sock * csk,int req_completion)393 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
394 {
395 int total_size = 0;
396 struct sk_buff *skb;
397
398 if (unlikely(csk->state < CTP_ESTABLISHED ||
399 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
400 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
401 "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
402 csk, csk->state, csk->flags, csk->tid);
403 return 0;
404 }
405
406 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
407 int len = skb->len; /* length before skb_push */
408 int frags = skb_shinfo(skb)->nr_frags + (len != skb->data_len);
409 int wrs_needed = skb_wrs[frags];
410
411 if (wrs_needed > 1 && len + sizeof(struct tx_data_wr) <= wrlen)
412 wrs_needed = 1;
413
414 WARN_ON(frags >= SKB_WR_LIST_SIZE || wrs_needed < 1);
415
416 if (csk->wr_cred < wrs_needed) {
417 log_debug(1 << CXGBI_DBG_PDU_TX,
418 "csk 0x%p, skb len %u/%u, frag %u, wr %d<%u.\n",
419 csk, skb->len, skb->data_len, frags,
420 wrs_needed, csk->wr_cred);
421 break;
422 }
423
424 __skb_unlink(skb, &csk->write_queue);
425 skb->priority = CPL_PRIORITY_DATA;
426 skb->csum = wrs_needed; /* remember this until the WR_ACK */
427 csk->wr_cred -= wrs_needed;
428 csk->wr_una_cred += wrs_needed;
429 cxgbi_sock_enqueue_wr(csk, skb);
430
431 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
432 "csk 0x%p, enqueue, skb len %u/%u, frag %u, wr %d, "
433 "left %u, unack %u.\n",
434 csk, skb->len, skb->data_len, frags, skb->csum,
435 csk->wr_cred, csk->wr_una_cred);
436
437 if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
438 if ((req_completion &&
439 csk->wr_una_cred == wrs_needed) ||
440 csk->wr_una_cred >= csk->wr_max_cred / 2) {
441 req_completion = 1;
442 csk->wr_una_cred = 0;
443 }
444 len += cxgbi_ulp_extra_len(cxgbi_skcb_tx_ulp_mode(skb));
445 make_tx_data_wr(csk, skb, len, req_completion);
446 csk->snd_nxt += len;
447 cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
448 }
449 total_size += skb->truesize;
450 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
451 "csk 0x%p, tid 0x%x, send skb 0x%p.\n",
452 csk, csk->tid, skb);
453 set_arp_failure_handler(skb, arp_failure_skb_discard);
454 l2t_send(csk->cdev->lldev, skb, csk->l2t);
455 }
456 return total_size;
457 }
458
459 /*
460 * Process a CPL_ACT_ESTABLISH message: -> host
461 * Updates connection state from an active establish CPL message. Runs with
462 * the connection lock held.
463 */
464
free_atid(struct cxgbi_sock * csk)465 static inline void free_atid(struct cxgbi_sock *csk)
466 {
467 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
468 cxgb3_free_atid(csk->cdev->lldev, csk->atid);
469 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
470 cxgbi_sock_put(csk);
471 }
472 }
473
do_act_establish(struct t3cdev * tdev,struct sk_buff * skb,void * ctx)474 static int do_act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
475 {
476 struct cxgbi_sock *csk = ctx;
477 struct cpl_act_establish *req = cplhdr(skb);
478 unsigned int tid = GET_TID(req);
479 unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
480 u32 rcv_isn = ntohl(req->rcv_isn); /* real RCV_ISN + 1 */
481
482 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
483 "atid 0x%x,tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
484 atid, atid, csk, csk->state, csk->flags, rcv_isn);
485
486 cxgbi_sock_get(csk);
487 cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
488 csk->tid = tid;
489 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid);
490
491 free_atid(csk);
492
493 csk->rss_qid = G_QNUM(ntohs(skb->csum));
494
495 spin_lock_bh(&csk->lock);
496 if (csk->retry_timer.function) {
497 del_timer(&csk->retry_timer);
498 csk->retry_timer.function = NULL;
499 }
500
501 if (unlikely(csk->state != CTP_ACTIVE_OPEN))
502 pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
503 csk, csk->state, csk->flags, csk->tid);
504
505 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
506 if (csk->rcv_win > (M_RCV_BUFSIZ << 10))
507 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10);
508
509 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
510
511 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
512 /* upper layer has requested closing */
513 send_abort_req(csk);
514 else {
515 if (skb_queue_len(&csk->write_queue))
516 push_tx_frames(csk, 1);
517 cxgbi_conn_tx_open(csk);
518 }
519
520 spin_unlock_bh(&csk->lock);
521 __kfree_skb(skb);
522 return 0;
523 }
524
525 /*
526 * Process a CPL_ACT_OPEN_RPL message: -> host
527 * Handle active open failures.
528 */
act_open_rpl_status_to_errno(int status)529 static int act_open_rpl_status_to_errno(int status)
530 {
531 switch (status) {
532 case CPL_ERR_CONN_RESET:
533 return -ECONNREFUSED;
534 case CPL_ERR_ARP_MISS:
535 return -EHOSTUNREACH;
536 case CPL_ERR_CONN_TIMEDOUT:
537 return -ETIMEDOUT;
538 case CPL_ERR_TCAM_FULL:
539 return -ENOMEM;
540 case CPL_ERR_CONN_EXIST:
541 return -EADDRINUSE;
542 default:
543 return -EIO;
544 }
545 }
546
act_open_retry_timer(struct timer_list * t)547 static void act_open_retry_timer(struct timer_list *t)
548 {
549 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
550 struct sk_buff *skb;
551
552 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
553 "csk 0x%p,%u,0x%lx,%u.\n",
554 csk, csk->state, csk->flags, csk->tid);
555
556 cxgbi_sock_get(csk);
557 spin_lock_bh(&csk->lock);
558 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
559 if (!skb)
560 cxgbi_sock_fail_act_open(csk, -ENOMEM);
561 else {
562 skb->sk = (struct sock *)csk;
563 set_arp_failure_handler(skb, act_open_arp_failure);
564 send_act_open_req(csk, skb, csk->l2t);
565 }
566 spin_unlock_bh(&csk->lock);
567 cxgbi_sock_put(csk);
568 }
569
do_act_open_rpl(struct t3cdev * tdev,struct sk_buff * skb,void * ctx)570 static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
571 {
572 struct cxgbi_sock *csk = ctx;
573 struct cpl_act_open_rpl *rpl = cplhdr(skb);
574
575 pr_info("csk 0x%p,%u,0x%lx,%u, status %u, %pI4:%u-%pI4:%u.\n",
576 csk, csk->state, csk->flags, csk->atid, rpl->status,
577 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
578 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
579
580 if (rpl->status != CPL_ERR_TCAM_FULL &&
581 rpl->status != CPL_ERR_CONN_EXIST &&
582 rpl->status != CPL_ERR_ARP_MISS)
583 cxgb3_queue_tid_release(tdev, GET_TID(rpl));
584
585 cxgbi_sock_get(csk);
586 spin_lock_bh(&csk->lock);
587 if (rpl->status == CPL_ERR_CONN_EXIST &&
588 csk->retry_timer.function != act_open_retry_timer) {
589 csk->retry_timer.function = act_open_retry_timer;
590 mod_timer(&csk->retry_timer, jiffies + HZ / 2);
591 } else
592 cxgbi_sock_fail_act_open(csk,
593 act_open_rpl_status_to_errno(rpl->status));
594
595 spin_unlock_bh(&csk->lock);
596 cxgbi_sock_put(csk);
597 __kfree_skb(skb);
598 return 0;
599 }
600
601 /*
602 * Process PEER_CLOSE CPL messages: -> host
603 * Handle peer FIN.
604 */
do_peer_close(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)605 static int do_peer_close(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
606 {
607 struct cxgbi_sock *csk = ctx;
608
609 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
610 "csk 0x%p,%u,0x%lx,%u.\n",
611 csk, csk->state, csk->flags, csk->tid);
612
613 cxgbi_sock_rcv_peer_close(csk);
614 __kfree_skb(skb);
615 return 0;
616 }
617
618 /*
619 * Process CLOSE_CONN_RPL CPL message: -> host
620 * Process a peer ACK to our FIN.
621 */
do_close_con_rpl(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)622 static int do_close_con_rpl(struct t3cdev *cdev, struct sk_buff *skb,
623 void *ctx)
624 {
625 struct cxgbi_sock *csk = ctx;
626 struct cpl_close_con_rpl *rpl = cplhdr(skb);
627
628 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
629 "csk 0x%p,%u,0x%lx,%u, snxt %u.\n",
630 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt));
631
632 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
633 __kfree_skb(skb);
634 return 0;
635 }
636
637 /*
638 * Process ABORT_REQ_RSS CPL message: -> host
639 * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
640 * request except that we need to reply to it.
641 */
642
abort_status_to_errno(struct cxgbi_sock * csk,int abort_reason,int * need_rst)643 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
644 int *need_rst)
645 {
646 switch (abort_reason) {
647 case CPL_ERR_BAD_SYN:
648 case CPL_ERR_CONN_RESET:
649 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET;
650 case CPL_ERR_XMIT_TIMEDOUT:
651 case CPL_ERR_PERSIST_TIMEDOUT:
652 case CPL_ERR_FINWAIT2_TIMEDOUT:
653 case CPL_ERR_KEEPALIVE_TIMEDOUT:
654 return -ETIMEDOUT;
655 default:
656 return -EIO;
657 }
658 }
659
do_abort_req(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)660 static int do_abort_req(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
661 {
662 const struct cpl_abort_req_rss *req = cplhdr(skb);
663 struct cxgbi_sock *csk = ctx;
664 int rst_status = CPL_ABORT_NO_RST;
665
666 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
667 "csk 0x%p,%u,0x%lx,%u.\n",
668 csk, csk->state, csk->flags, csk->tid);
669
670 if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
671 req->status == CPL_ERR_PERSIST_NEG_ADVICE) {
672 goto done;
673 }
674
675 cxgbi_sock_get(csk);
676 spin_lock_bh(&csk->lock);
677
678 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
679 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
680 cxgbi_sock_set_state(csk, CTP_ABORTING);
681 goto out;
682 }
683
684 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
685 send_abort_rpl(csk, rst_status);
686
687 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
688 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
689 cxgbi_sock_closed(csk);
690 }
691
692 out:
693 spin_unlock_bh(&csk->lock);
694 cxgbi_sock_put(csk);
695 done:
696 __kfree_skb(skb);
697 return 0;
698 }
699
700 /*
701 * Process ABORT_RPL_RSS CPL message: -> host
702 * Process abort replies. We only process these messages if we anticipate
703 * them as the coordination between SW and HW in this area is somewhat lacking
704 * and sometimes we get ABORT_RPLs after we are done with the connection that
705 * originated the ABORT_REQ.
706 */
do_abort_rpl(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)707 static int do_abort_rpl(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
708 {
709 struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
710 struct cxgbi_sock *csk = ctx;
711
712 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
713 "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
714 rpl->status, csk, csk ? csk->state : 0,
715 csk ? csk->flags : 0UL);
716 /*
717 * Ignore replies to post-close aborts indicating that the abort was
718 * requested too late. These connections are terminated when we get
719 * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
720 * arrives the TID is either no longer used or it has been recycled.
721 */
722 if (rpl->status == CPL_ERR_ABORT_FAILED)
723 goto rel_skb;
724 /*
725 * Sometimes we've already closed the connection, e.g., a post-close
726 * abort races with ABORT_REQ_RSS, the latter frees the connection
727 * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
728 * but FW turns the ABORT_REQ into a regular one and so we get
729 * ABORT_RPL_RSS with status 0 and no connection.
730 */
731 if (csk)
732 cxgbi_sock_rcv_abort_rpl(csk);
733 rel_skb:
734 __kfree_skb(skb);
735 return 0;
736 }
737
738 /*
739 * Process RX_ISCSI_HDR CPL message: -> host
740 * Handle received PDUs, the payload could be DDP'ed. If not, the payload
741 * follow after the bhs.
742 */
do_iscsi_hdr(struct t3cdev * t3dev,struct sk_buff * skb,void * ctx)743 static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
744 {
745 struct cxgbi_sock *csk = ctx;
746 struct cpl_iscsi_hdr *hdr_cpl = cplhdr(skb);
747 struct cpl_iscsi_hdr_norss data_cpl;
748 struct cpl_rx_data_ddp_norss ddp_cpl;
749 unsigned int hdr_len, data_len, status;
750 unsigned int len;
751 int err;
752
753 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
754 "csk 0x%p,%u,0x%lx,%u, skb 0x%p,%u.\n",
755 csk, csk->state, csk->flags, csk->tid, skb, skb->len);
756
757 spin_lock_bh(&csk->lock);
758
759 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
760 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
761 "csk 0x%p,%u,0x%lx,%u, bad state.\n",
762 csk, csk->state, csk->flags, csk->tid);
763 if (csk->state != CTP_ABORTING)
764 goto abort_conn;
765 else
766 goto discard;
767 }
768
769 cxgbi_skcb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
770 cxgbi_skcb_flags(skb) = 0;
771
772 skb_reset_transport_header(skb);
773 __skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
774
775 len = hdr_len = ntohs(hdr_cpl->len);
776 /* msg coalesce is off or not enough data received */
777 if (skb->len <= hdr_len) {
778 pr_err("%s: tid %u, CPL_ISCSI_HDR, skb len %u < %u.\n",
779 csk->cdev->ports[csk->port_id]->name, csk->tid,
780 skb->len, hdr_len);
781 goto abort_conn;
782 }
783 cxgbi_skcb_set_flag(skb, SKCBF_RX_COALESCED);
784
785 err = skb_copy_bits(skb, skb->len - sizeof(ddp_cpl), &ddp_cpl,
786 sizeof(ddp_cpl));
787 if (err < 0) {
788 pr_err("%s: tid %u, copy cpl_ddp %u-%zu failed %d.\n",
789 csk->cdev->ports[csk->port_id]->name, csk->tid,
790 skb->len, sizeof(ddp_cpl), err);
791 goto abort_conn;
792 }
793
794 cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
795 cxgbi_skcb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
796 cxgbi_skcb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
797 status = ntohl(ddp_cpl.ddp_status);
798
799 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
800 "csk 0x%p, skb 0x%p,%u, pdulen %u, status 0x%x.\n",
801 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status);
802
803 if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT))
804 cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
805 if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT))
806 cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
807 if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT))
808 cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
809
810 if (skb->len > (hdr_len + sizeof(ddp_cpl))) {
811 err = skb_copy_bits(skb, hdr_len, &data_cpl, sizeof(data_cpl));
812 if (err < 0) {
813 pr_err("%s: tid %u, cp %zu/%u failed %d.\n",
814 csk->cdev->ports[csk->port_id]->name,
815 csk->tid, sizeof(data_cpl), skb->len, err);
816 goto abort_conn;
817 }
818 data_len = ntohs(data_cpl.len);
819 log_debug(1 << CXGBI_DBG_DDP | 1 << CXGBI_DBG_PDU_RX,
820 "skb 0x%p, pdu not ddp'ed %u/%u, status 0x%x.\n",
821 skb, data_len, cxgbi_skcb_rx_pdulen(skb), status);
822 len += sizeof(data_cpl) + data_len;
823 } else if (status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT))
824 cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
825
826 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb);
827 __pskb_trim(skb, len);
828 __skb_queue_tail(&csk->receive_queue, skb);
829 cxgbi_conn_pdu_ready(csk);
830
831 spin_unlock_bh(&csk->lock);
832 return 0;
833
834 abort_conn:
835 send_abort_req(csk);
836 discard:
837 spin_unlock_bh(&csk->lock);
838 __kfree_skb(skb);
839 return 0;
840 }
841
842 /*
843 * Process TX_DATA_ACK CPL messages: -> host
844 * Process an acknowledgment of WR completion. Advance snd_una and send the
845 * next batch of work requests from the write queue.
846 */
do_wr_ack(struct t3cdev * cdev,struct sk_buff * skb,void * ctx)847 static int do_wr_ack(struct t3cdev *cdev, struct sk_buff *skb, void *ctx)
848 {
849 struct cxgbi_sock *csk = ctx;
850 struct cpl_wr_ack *hdr = cplhdr(skb);
851
852 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
853 "csk 0x%p,%u,0x%lx,%u, cr %u.\n",
854 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits));
855
856 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1);
857 __kfree_skb(skb);
858 return 0;
859 }
860
861 /*
862 * for each connection, pre-allocate skbs needed for close/abort requests. So
863 * that we can service the request right away.
864 */
alloc_cpls(struct cxgbi_sock * csk)865 static int alloc_cpls(struct cxgbi_sock *csk)
866 {
867 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0,
868 GFP_KERNEL);
869 if (!csk->cpl_close)
870 return -ENOMEM;
871 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0,
872 GFP_KERNEL);
873 if (!csk->cpl_abort_req)
874 goto free_cpl_skbs;
875
876 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0,
877 GFP_KERNEL);
878 if (!csk->cpl_abort_rpl)
879 goto free_cpl_skbs;
880
881 return 0;
882
883 free_cpl_skbs:
884 cxgbi_sock_free_cpl_skbs(csk);
885 return -ENOMEM;
886 }
887
l2t_put(struct cxgbi_sock * csk)888 static void l2t_put(struct cxgbi_sock *csk)
889 {
890 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
891
892 if (csk->l2t) {
893 l2t_release(t3dev, csk->l2t);
894 csk->l2t = NULL;
895 cxgbi_sock_put(csk);
896 }
897 }
898
899 /*
900 * release_offload_resources - release offload resource
901 * Release resources held by an offload connection (TID, L2T entry, etc.)
902 */
release_offload_resources(struct cxgbi_sock * csk)903 static void release_offload_resources(struct cxgbi_sock *csk)
904 {
905 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev;
906
907 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
908 "csk 0x%p,%u,0x%lx,%u.\n",
909 csk, csk->state, csk->flags, csk->tid);
910
911 csk->rss_qid = 0;
912 cxgbi_sock_free_cpl_skbs(csk);
913
914 if (csk->wr_cred != csk->wr_max_cred) {
915 cxgbi_sock_purge_wr_queue(csk);
916 cxgbi_sock_reset_wr_list(csk);
917 }
918 l2t_put(csk);
919 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
920 free_atid(csk);
921 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
922 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid);
923 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
924 cxgbi_sock_put(csk);
925 }
926 csk->dst = NULL;
927 csk->cdev = NULL;
928 }
929
update_address(struct cxgbi_hba * chba)930 static void update_address(struct cxgbi_hba *chba)
931 {
932 if (chba->ipv4addr) {
933 if (chba->vdev &&
934 chba->ipv4addr != cxgb3i_get_private_ipv4addr(chba->vdev)) {
935 cxgb3i_set_private_ipv4addr(chba->vdev, chba->ipv4addr);
936 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
937 pr_info("%s set %pI4.\n",
938 chba->vdev->name, &chba->ipv4addr);
939 } else if (chba->ipv4addr !=
940 cxgb3i_get_private_ipv4addr(chba->ndev)) {
941 cxgb3i_set_private_ipv4addr(chba->ndev, chba->ipv4addr);
942 pr_info("%s set %pI4.\n",
943 chba->ndev->name, &chba->ipv4addr);
944 }
945 } else if (cxgb3i_get_private_ipv4addr(chba->ndev)) {
946 if (chba->vdev)
947 cxgb3i_set_private_ipv4addr(chba->vdev, 0);
948 cxgb3i_set_private_ipv4addr(chba->ndev, 0);
949 }
950 }
951
init_act_open(struct cxgbi_sock * csk)952 static int init_act_open(struct cxgbi_sock *csk)
953 {
954 struct dst_entry *dst = csk->dst;
955 struct cxgbi_device *cdev = csk->cdev;
956 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
957 struct net_device *ndev = cdev->ports[csk->port_id];
958 struct cxgbi_hba *chba = cdev->hbas[csk->port_id];
959 struct sk_buff *skb = NULL;
960 int ret;
961
962 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
963 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags);
964
965 update_address(chba);
966 if (chba->ipv4addr)
967 csk->saddr.sin_addr.s_addr = chba->ipv4addr;
968
969 csk->rss_qid = 0;
970 csk->l2t = t3_l2t_get(t3dev, dst, ndev,
971 &csk->daddr.sin_addr.s_addr);
972 if (!csk->l2t) {
973 pr_err("NO l2t available.\n");
974 return -EINVAL;
975 }
976 cxgbi_sock_get(csk);
977
978 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk);
979 if (csk->atid < 0) {
980 pr_err("NO atid available.\n");
981 ret = -EINVAL;
982 goto put_sock;
983 }
984 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
985 cxgbi_sock_get(csk);
986
987 skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
988 if (!skb) {
989 ret = -ENOMEM;
990 goto free_atid;
991 }
992 skb->sk = (struct sock *)csk;
993 set_arp_failure_handler(skb, act_open_arp_failure);
994 csk->snd_win = cxgb3i_snd_win;
995 csk->rcv_win = cxgb3i_rcv_win;
996
997 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1;
998 csk->wr_una_cred = 0;
999 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst));
1000 cxgbi_sock_reset_wr_list(csk);
1001 csk->err = 0;
1002
1003 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1004 "csk 0x%p,%u,0x%lx, %pI4:%u-%pI4:%u.\n",
1005 csk, csk->state, csk->flags,
1006 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
1007 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port));
1008
1009 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1010 send_act_open_req(csk, skb, csk->l2t);
1011 return 0;
1012
1013 free_atid:
1014 cxgb3_free_atid(t3dev, csk->atid);
1015 put_sock:
1016 cxgbi_sock_put(csk);
1017 l2t_release(t3dev, csk->l2t);
1018 csk->l2t = NULL;
1019
1020 return ret;
1021 }
1022
1023 cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS] = {
1024 [CPL_ACT_ESTABLISH] = do_act_establish,
1025 [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1026 [CPL_PEER_CLOSE] = do_peer_close,
1027 [CPL_ABORT_REQ_RSS] = do_abort_req,
1028 [CPL_ABORT_RPL_RSS] = do_abort_rpl,
1029 [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1030 [CPL_TX_DMA_ACK] = do_wr_ack,
1031 [CPL_ISCSI_HDR] = do_iscsi_hdr,
1032 };
1033
1034 /**
1035 * cxgb3i_ofld_init - allocate and initialize resources for each adapter found
1036 * @cdev: cxgbi adapter
1037 */
cxgb3i_ofld_init(struct cxgbi_device * cdev)1038 static int cxgb3i_ofld_init(struct cxgbi_device *cdev)
1039 {
1040 struct t3cdev *t3dev = (struct t3cdev *)cdev->lldev;
1041 struct adap_ports port;
1042 struct ofld_page_info rx_page_info;
1043 unsigned int wr_len;
1044 int rc;
1045
1046 if (t3dev->ctl(t3dev, GET_WR_LEN, &wr_len) < 0 ||
1047 t3dev->ctl(t3dev, GET_PORTS, &port) < 0 ||
1048 t3dev->ctl(t3dev, GET_RX_PAGE_INFO, &rx_page_info) < 0) {
1049 pr_warn("t3 0x%p, offload up, ioctl failed.\n", t3dev);
1050 return -EINVAL;
1051 }
1052
1053 if (cxgb3i_max_connect > CXGBI_MAX_CONN)
1054 cxgb3i_max_connect = CXGBI_MAX_CONN;
1055
1056 rc = cxgbi_device_portmap_create(cdev, cxgb3i_sport_base,
1057 cxgb3i_max_connect);
1058 if (rc < 0)
1059 return rc;
1060
1061 init_wr_tab(wr_len);
1062 cdev->csk_release_offload_resources = release_offload_resources;
1063 cdev->csk_push_tx_frames = push_tx_frames;
1064 cdev->csk_send_abort_req = send_abort_req;
1065 cdev->csk_send_close_req = send_close_req;
1066 cdev->csk_send_rx_credits = send_rx_credits;
1067 cdev->csk_alloc_cpls = alloc_cpls;
1068 cdev->csk_init_act_open = init_act_open;
1069
1070 pr_info("cdev 0x%p, offload up, added.\n", cdev);
1071 return 0;
1072 }
1073
1074 /*
1075 * functions to program the pagepod in h/w
1076 */
ulp_mem_io_set_hdr(struct sk_buff * skb,unsigned int addr)1077 static inline void ulp_mem_io_set_hdr(struct sk_buff *skb, unsigned int addr)
1078 {
1079 struct ulp_mem_io *req = (struct ulp_mem_io *)skb->head;
1080
1081 memset(req, 0, sizeof(*req));
1082
1083 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS));
1084 req->cmd_lock_addr = htonl(V_ULP_MEMIO_ADDR(addr >> 5) |
1085 V_ULPTX_CMD(ULP_MEM_WRITE));
1086 req->len = htonl(V_ULP_MEMIO_DATA_LEN(IPPOD_SIZE >> 5) |
1087 V_ULPTX_NFLITS((IPPOD_SIZE >> 3) + 1));
1088 }
1089
cdev2ppm(struct cxgbi_device * cdev)1090 static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
1091 {
1092 return ((struct t3cdev *)cdev->lldev)->ulp_iscsi;
1093 }
1094
ddp_set_map(struct cxgbi_ppm * ppm,struct cxgbi_sock * csk,struct cxgbi_task_tag_info * ttinfo)1095 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1096 struct cxgbi_task_tag_info *ttinfo)
1097 {
1098 unsigned int idx = ttinfo->idx;
1099 unsigned int npods = ttinfo->npods;
1100 struct scatterlist *sg = ttinfo->sgl;
1101 struct cxgbi_pagepod *ppod;
1102 struct ulp_mem_io *req;
1103 unsigned int sg_off;
1104 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1105 int i;
1106
1107 for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1108 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1109 IPPOD_SIZE, 0, GFP_ATOMIC);
1110
1111 if (!skb)
1112 return -ENOMEM;
1113 ulp_mem_io_set_hdr(skb, pm_addr);
1114 req = (struct ulp_mem_io *)skb->head;
1115 ppod = (struct cxgbi_pagepod *)(req + 1);
1116 sg_off = i * PPOD_PAGES_MAX;
1117 cxgbi_ddp_set_one_ppod(ppod, ttinfo, &sg,
1118 &sg_off);
1119 skb->priority = CPL_PRIORITY_CONTROL;
1120 cxgb3_ofld_send(ppm->lldev, skb);
1121 }
1122 return 0;
1123 }
1124
ddp_clear_map(struct cxgbi_device * cdev,struct cxgbi_ppm * ppm,struct cxgbi_task_tag_info * ttinfo)1125 static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1126 struct cxgbi_task_tag_info *ttinfo)
1127 {
1128 unsigned int idx = ttinfo->idx;
1129 unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1130 unsigned int npods = ttinfo->npods;
1131 int i;
1132
1133 log_debug(1 << CXGBI_DBG_DDP,
1134 "cdev 0x%p, clear idx %u, npods %u.\n",
1135 cdev, idx, npods);
1136
1137 for (i = 0; i < npods; i++, idx++, pm_addr += IPPOD_SIZE) {
1138 struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
1139 IPPOD_SIZE, 0, GFP_ATOMIC);
1140
1141 if (!skb) {
1142 pr_err("cdev 0x%p, clear ddp, %u,%d/%u, skb OOM.\n",
1143 cdev, idx, i, npods);
1144 continue;
1145 }
1146 ulp_mem_io_set_hdr(skb, pm_addr);
1147 skb->priority = CPL_PRIORITY_CONTROL;
1148 cxgb3_ofld_send(ppm->lldev, skb);
1149 }
1150 }
1151
ddp_setup_conn_pgidx(struct cxgbi_sock * csk,unsigned int tid,int pg_idx)1152 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1153 unsigned int tid, int pg_idx)
1154 {
1155 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1156 GFP_KERNEL);
1157 struct cpl_set_tcb_field *req;
1158 u64 val = pg_idx < DDP_PGIDX_MAX ? pg_idx : 0;
1159
1160 log_debug(1 << CXGBI_DBG_DDP,
1161 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx);
1162 if (!skb)
1163 return -ENOMEM;
1164
1165 /* set up ulp submode and page size */
1166 req = (struct cpl_set_tcb_field *)skb->head;
1167 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1168 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1169 req->reply = V_NO_REPLY(1);
1170 req->cpu_idx = 0;
1171 req->word = htons(31);
1172 req->mask = cpu_to_be64(0xF0000000);
1173 req->val = cpu_to_be64(val << 28);
1174 skb->priority = CPL_PRIORITY_CONTROL;
1175
1176 cxgb3_ofld_send(csk->cdev->lldev, skb);
1177 return 0;
1178 }
1179
1180 /**
1181 * ddp_setup_conn_digest - setup conn. digest setting
1182 * @csk: cxgb tcp socket
1183 * @tid: connection id
1184 * @hcrc: header digest enabled
1185 * @dcrc: data digest enabled
1186 * set up the iscsi digest settings for a connection identified by tid
1187 */
ddp_setup_conn_digest(struct cxgbi_sock * csk,unsigned int tid,int hcrc,int dcrc)1188 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1189 int hcrc, int dcrc)
1190 {
1191 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1192 GFP_KERNEL);
1193 struct cpl_set_tcb_field *req;
1194 u64 val = (hcrc ? 1 : 0) | (dcrc ? 2 : 0);
1195
1196 log_debug(1 << CXGBI_DBG_DDP,
1197 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc);
1198 if (!skb)
1199 return -ENOMEM;
1200
1201 /* set up ulp submode and page size */
1202 req = (struct cpl_set_tcb_field *)skb->head;
1203 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1204 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1205 req->reply = V_NO_REPLY(1);
1206 req->cpu_idx = 0;
1207 req->word = htons(31);
1208 req->mask = cpu_to_be64(0x0F000000);
1209 req->val = cpu_to_be64(val << 24);
1210 skb->priority = CPL_PRIORITY_CONTROL;
1211
1212 cxgb3_ofld_send(csk->cdev->lldev, skb);
1213 return 0;
1214 }
1215
1216 /**
1217 * cxgb3i_ddp_init - initialize the cxgb3 adapter's ddp resource
1218 * @cdev: cxgb3i adapter
1219 * initialize the ddp pagepod manager for a given adapter
1220 */
cxgb3i_ddp_init(struct cxgbi_device * cdev)1221 static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
1222 {
1223 struct t3cdev *tdev = (struct t3cdev *)cdev->lldev;
1224 struct net_device *ndev = cdev->ports[0];
1225 struct cxgbi_tag_format tformat;
1226 unsigned int ppmax, tagmask = 0;
1227 struct ulp_iscsi_info uinfo;
1228 int i, err;
1229
1230 err = tdev->ctl(tdev, ULP_ISCSI_GET_PARAMS, &uinfo);
1231 if (err < 0) {
1232 pr_err("%s, failed to get iscsi param %d.\n",
1233 ndev->name, err);
1234 return err;
1235 }
1236 if (uinfo.llimit >= uinfo.ulimit) {
1237 pr_warn("T3 %s, iscsi NOT enabled %u ~ %u!\n",
1238 ndev->name, uinfo.llimit, uinfo.ulimit);
1239 return -EACCES;
1240 }
1241
1242 ppmax = (uinfo.ulimit - uinfo.llimit + 1) >> PPOD_SIZE_SHIFT;
1243 tagmask = cxgbi_tagmask_set(ppmax);
1244
1245 pr_info("T3 %s: 0x%x~0x%x, 0x%x, tagmask 0x%x -> 0x%x.\n",
1246 ndev->name, uinfo.llimit, uinfo.ulimit, ppmax, uinfo.tagmask,
1247 tagmask);
1248
1249 memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
1250 for (i = 0; i < 4; i++)
1251 tformat.pgsz_order[i] = uinfo.pgsz_factor[i];
1252 cxgbi_tagmask_check(tagmask, &tformat);
1253
1254 err = cxgbi_ddp_ppm_setup(&tdev->ulp_iscsi, cdev, &tformat,
1255 (uinfo.ulimit - uinfo.llimit + 1),
1256 uinfo.llimit, uinfo.llimit, 0, 0, 0);
1257 if (err)
1258 return err;
1259
1260 if (!(cdev->flags & CXGBI_FLAG_DDP_OFF)) {
1261 uinfo.tagmask = tagmask;
1262 uinfo.ulimit = uinfo.llimit + (ppmax << PPOD_SIZE_SHIFT);
1263
1264 err = tdev->ctl(tdev, ULP_ISCSI_SET_PARAMS, &uinfo);
1265 if (err < 0) {
1266 pr_err("T3 %s fail to set iscsi param %d.\n",
1267 ndev->name, err);
1268 cdev->flags |= CXGBI_FLAG_DDP_OFF;
1269 }
1270 err = 0;
1271 }
1272
1273 cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1274 cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1275 cdev->csk_ddp_set_map = ddp_set_map;
1276 cdev->csk_ddp_clear_map = ddp_clear_map;
1277 cdev->cdev2ppm = cdev2ppm;
1278 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1279 uinfo.max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1280 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1281 uinfo.max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1282
1283 return 0;
1284 }
1285
cxgb3i_dev_close(struct t3cdev * t3dev)1286 static void cxgb3i_dev_close(struct t3cdev *t3dev)
1287 {
1288 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1289
1290 if (!cdev || cdev->flags & CXGBI_FLAG_ADAPTER_RESET) {
1291 pr_info("0x%p close, f 0x%x.\n", cdev, cdev ? cdev->flags : 0);
1292 return;
1293 }
1294
1295 cxgbi_device_unregister(cdev);
1296 }
1297
1298 /**
1299 * cxgb3i_dev_open - init a t3 adapter structure and any h/w settings
1300 * @t3dev: t3cdev adapter
1301 */
cxgb3i_dev_open(struct t3cdev * t3dev)1302 static void cxgb3i_dev_open(struct t3cdev *t3dev)
1303 {
1304 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1305 struct adapter *adapter = tdev2adap(t3dev);
1306 int i, err;
1307
1308 if (cdev) {
1309 pr_info("0x%p, updating.\n", cdev);
1310 return;
1311 }
1312
1313 cdev = cxgbi_device_register(0, adapter->params.nports);
1314 if (!cdev) {
1315 pr_warn("device 0x%p register failed.\n", t3dev);
1316 return;
1317 }
1318
1319 cdev->flags = CXGBI_FLAG_DEV_T3 | CXGBI_FLAG_IPV4_SET;
1320 cdev->lldev = t3dev;
1321 cdev->pdev = adapter->pdev;
1322 cdev->ports = adapter->port;
1323 cdev->nports = adapter->params.nports;
1324 cdev->mtus = adapter->params.mtus;
1325 cdev->nmtus = NMTUS;
1326 cdev->rx_credit_thres = cxgb3i_rx_credit_thres;
1327 cdev->skb_tx_rsvd = CXGB3I_TX_HEADER_LEN;
1328 cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr_norss);
1329 cdev->itp = &cxgb3i_iscsi_transport;
1330
1331 err = cxgb3i_ddp_init(cdev);
1332 if (err) {
1333 pr_info("0x%p ddp init failed %d\n", cdev, err);
1334 goto err_out;
1335 }
1336
1337 err = cxgb3i_ofld_init(cdev);
1338 if (err) {
1339 pr_info("0x%p offload init failed\n", cdev);
1340 goto err_out;
1341 }
1342
1343 err = cxgbi_hbas_add(cdev, CXGB3I_MAX_LUN, CXGBI_MAX_CONN,
1344 &cxgb3i_host_template, cxgb3i_stt);
1345 if (err)
1346 goto err_out;
1347
1348 for (i = 0; i < cdev->nports; i++)
1349 cdev->hbas[i]->ipv4addr =
1350 cxgb3i_get_private_ipv4addr(cdev->ports[i]);
1351
1352 pr_info("cdev 0x%p, f 0x%x, t3dev 0x%p open, err %d.\n",
1353 cdev, cdev ? cdev->flags : 0, t3dev, err);
1354 return;
1355
1356 err_out:
1357 cxgbi_device_unregister(cdev);
1358 }
1359
cxgb3i_dev_event_handler(struct t3cdev * t3dev,u32 event,u32 port)1360 static void cxgb3i_dev_event_handler(struct t3cdev *t3dev, u32 event, u32 port)
1361 {
1362 struct cxgbi_device *cdev = cxgbi_device_find_by_lldev(t3dev);
1363
1364 log_debug(1 << CXGBI_DBG_TOE,
1365 "0x%p, cdev 0x%p, event 0x%x, port 0x%x.\n",
1366 t3dev, cdev, event, port);
1367 if (!cdev)
1368 return;
1369
1370 switch (event) {
1371 case OFFLOAD_STATUS_DOWN:
1372 cdev->flags |= CXGBI_FLAG_ADAPTER_RESET;
1373 break;
1374 case OFFLOAD_STATUS_UP:
1375 cdev->flags &= ~CXGBI_FLAG_ADAPTER_RESET;
1376 break;
1377 }
1378 }
1379
1380 /**
1381 * cxgb3i_init_module - module init entry point
1382 *
1383 * initialize any driver wide global data structures and register itself
1384 * with the cxgb3 module
1385 */
cxgb3i_init_module(void)1386 static int __init cxgb3i_init_module(void)
1387 {
1388 int rc;
1389
1390 printk(KERN_INFO "%s", version);
1391
1392 rc = cxgbi_iscsi_init(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1393 if (rc < 0)
1394 return rc;
1395
1396 cxgb3_register_client(&t3_client);
1397 return 0;
1398 }
1399
1400 /**
1401 * cxgb3i_exit_module - module cleanup/exit entry point
1402 *
1403 * go through the driver hba list and for each hba, release any resource held.
1404 * and unregisters iscsi transport and the cxgb3 module
1405 */
cxgb3i_exit_module(void)1406 static void __exit cxgb3i_exit_module(void)
1407 {
1408 cxgb3_unregister_client(&t3_client);
1409 cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T3);
1410 cxgbi_iscsi_cleanup(&cxgb3i_iscsi_transport, &cxgb3i_stt);
1411 }
1412
1413 module_init(cxgb3i_init_module);
1414 module_exit(cxgb3i_exit_module);
1415