1 /*
2  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_cm.h>
34 #include <net/dst.h>
35 #include <net/icmp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
41 #include <linux/sched/signal.h>
42 #include <linux/sched/mm.h>
43 
44 #include "ipoib.h"
45 
46 int ipoib_max_conn_qp = 128;
47 
48 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
49 MODULE_PARM_DESC(max_nonsrq_conn_qp,
50 		 "Max number of connected-mode QPs per interface "
51 		 "(applied only if shared receive queue is not available)");
52 
53 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
54 static int data_debug_level;
55 
56 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
57 MODULE_PARM_DESC(cm_data_debug_level,
58 		 "Enable data path debug tracing for connected mode if > 0");
59 #endif
60 
61 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
62 
63 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
64 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
65 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
66 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
67 
68 #define IPOIB_CM_RX_RESERVE     (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
69 
70 static struct ib_qp_attr ipoib_cm_err_attr = {
71 	.qp_state = IB_QPS_ERR
72 };
73 
74 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
75 
76 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
77 	.opcode = IB_WR_SEND,
78 };
79 
80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
81 			       const struct ib_cm_event *event);
82 
ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv * priv,int frags,u64 mapping[IPOIB_CM_RX_SG])83 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
84 				  u64 mapping[IPOIB_CM_RX_SG])
85 {
86 	int i;
87 
88 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
89 
90 	for (i = 0; i < frags; ++i)
91 		ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
92 }
93 
ipoib_cm_post_receive_srq(struct net_device * dev,int id)94 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
95 {
96 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
97 	int i, ret;
98 
99 	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
100 
101 	for (i = 0; i < priv->cm.num_frags; ++i)
102 		priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
103 
104 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
105 	if (unlikely(ret)) {
106 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
107 		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
108 				      priv->cm.srq_ring[id].mapping);
109 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
110 		priv->cm.srq_ring[id].skb = NULL;
111 	}
112 
113 	return ret;
114 }
115 
ipoib_cm_post_receive_nonsrq(struct net_device * dev,struct ipoib_cm_rx * rx,struct ib_recv_wr * wr,struct ib_sge * sge,int id)116 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
117 					struct ipoib_cm_rx *rx,
118 					struct ib_recv_wr *wr,
119 					struct ib_sge *sge, int id)
120 {
121 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
122 	int i, ret;
123 
124 	wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
125 
126 	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
127 		sge[i].addr = rx->rx_ring[id].mapping[i];
128 
129 	ret = ib_post_recv(rx->qp, wr, NULL);
130 	if (unlikely(ret)) {
131 		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
132 		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
133 				      rx->rx_ring[id].mapping);
134 		dev_kfree_skb_any(rx->rx_ring[id].skb);
135 		rx->rx_ring[id].skb = NULL;
136 	}
137 
138 	return ret;
139 }
140 
ipoib_cm_alloc_rx_skb(struct net_device * dev,struct ipoib_cm_rx_buf * rx_ring,int id,int frags,u64 mapping[IPOIB_CM_RX_SG],gfp_t gfp)141 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
142 					     struct ipoib_cm_rx_buf *rx_ring,
143 					     int id, int frags,
144 					     u64 mapping[IPOIB_CM_RX_SG],
145 					     gfp_t gfp)
146 {
147 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
148 	struct sk_buff *skb;
149 	int i;
150 
151 	skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
152 	if (unlikely(!skb))
153 		return NULL;
154 
155 	/*
156 	 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
157 	 * IP header to a multiple of 16.
158 	 */
159 	skb_reserve(skb, IPOIB_CM_RX_RESERVE);
160 
161 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
162 				       DMA_FROM_DEVICE);
163 	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
164 		dev_kfree_skb_any(skb);
165 		return NULL;
166 	}
167 
168 	for (i = 0; i < frags; i++) {
169 		struct page *page = alloc_page(gfp);
170 
171 		if (!page)
172 			goto partial_error;
173 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
174 
175 		mapping[i + 1] = ib_dma_map_page(priv->ca, page,
176 						 0, PAGE_SIZE, DMA_FROM_DEVICE);
177 		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
178 			goto partial_error;
179 	}
180 
181 	rx_ring[id].skb = skb;
182 	return skb;
183 
184 partial_error:
185 
186 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
187 
188 	for (; i > 0; --i)
189 		ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
190 
191 	dev_kfree_skb_any(skb);
192 	return NULL;
193 }
194 
ipoib_cm_free_rx_ring(struct net_device * dev,struct ipoib_cm_rx_buf * rx_ring)195 static void ipoib_cm_free_rx_ring(struct net_device *dev,
196 				  struct ipoib_cm_rx_buf *rx_ring)
197 {
198 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
199 	int i;
200 
201 	for (i = 0; i < ipoib_recvq_size; ++i)
202 		if (rx_ring[i].skb) {
203 			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
204 					      rx_ring[i].mapping);
205 			dev_kfree_skb_any(rx_ring[i].skb);
206 		}
207 
208 	vfree(rx_ring);
209 }
210 
ipoib_cm_start_rx_drain(struct ipoib_dev_priv * priv)211 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
212 {
213 	struct ipoib_cm_rx *p;
214 
215 	/* We only reserved 1 extra slot in CQ for drain WRs, so
216 	 * make sure we have at most 1 outstanding WR. */
217 	if (list_empty(&priv->cm.rx_flush_list) ||
218 	    !list_empty(&priv->cm.rx_drain_list))
219 		return;
220 
221 	/*
222 	 * QPs on flush list are error state.  This way, a "flush
223 	 * error" WC will be immediately generated for each WR we post.
224 	 */
225 	p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
226 	ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
227 	if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
228 		ipoib_warn(priv, "failed to post drain wr\n");
229 
230 	list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
231 }
232 
ipoib_cm_rx_event_handler(struct ib_event * event,void * ctx)233 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
234 {
235 	struct ipoib_cm_rx *p = ctx;
236 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
237 	unsigned long flags;
238 
239 	if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
240 		return;
241 
242 	spin_lock_irqsave(&priv->lock, flags);
243 	list_move(&p->list, &priv->cm.rx_flush_list);
244 	p->state = IPOIB_CM_RX_FLUSH;
245 	ipoib_cm_start_rx_drain(priv);
246 	spin_unlock_irqrestore(&priv->lock, flags);
247 }
248 
ipoib_cm_create_rx_qp(struct net_device * dev,struct ipoib_cm_rx * p)249 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
250 					   struct ipoib_cm_rx *p)
251 {
252 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
253 	struct ib_qp_init_attr attr = {
254 		.event_handler = ipoib_cm_rx_event_handler,
255 		.send_cq = priv->recv_cq, /* For drain WR */
256 		.recv_cq = priv->recv_cq,
257 		.srq = priv->cm.srq,
258 		.cap.max_send_wr = 1, /* For drain WR */
259 		.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
260 		.sq_sig_type = IB_SIGNAL_ALL_WR,
261 		.qp_type = IB_QPT_RC,
262 		.qp_context = p,
263 	};
264 
265 	if (!ipoib_cm_has_srq(dev)) {
266 		attr.cap.max_recv_wr  = ipoib_recvq_size;
267 		attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
268 	}
269 
270 	return ib_create_qp(priv->pd, &attr);
271 }
272 
ipoib_cm_modify_rx_qp(struct net_device * dev,struct ib_cm_id * cm_id,struct ib_qp * qp,unsigned int psn)273 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
274 				 struct ib_cm_id *cm_id, struct ib_qp *qp,
275 				 unsigned int psn)
276 {
277 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
278 	struct ib_qp_attr qp_attr;
279 	int qp_attr_mask, ret;
280 
281 	qp_attr.qp_state = IB_QPS_INIT;
282 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
283 	if (ret) {
284 		ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
285 		return ret;
286 	}
287 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
288 	if (ret) {
289 		ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
290 		return ret;
291 	}
292 	qp_attr.qp_state = IB_QPS_RTR;
293 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
294 	if (ret) {
295 		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
296 		return ret;
297 	}
298 	qp_attr.rq_psn = psn;
299 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
300 	if (ret) {
301 		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
302 		return ret;
303 	}
304 
305 	/*
306 	 * Current Mellanox HCA firmware won't generate completions
307 	 * with error for drain WRs unless the QP has been moved to
308 	 * RTS first. This work-around leaves a window where a QP has
309 	 * moved to error asynchronously, but this will eventually get
310 	 * fixed in firmware, so let's not error out if modify QP
311 	 * fails.
312 	 */
313 	qp_attr.qp_state = IB_QPS_RTS;
314 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
315 	if (ret) {
316 		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
317 		return 0;
318 	}
319 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
320 	if (ret) {
321 		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
322 		return 0;
323 	}
324 
325 	return 0;
326 }
327 
ipoib_cm_init_rx_wr(struct net_device * dev,struct ib_recv_wr * wr,struct ib_sge * sge)328 static void ipoib_cm_init_rx_wr(struct net_device *dev,
329 				struct ib_recv_wr *wr,
330 				struct ib_sge *sge)
331 {
332 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
333 	int i;
334 
335 	for (i = 0; i < priv->cm.num_frags; ++i)
336 		sge[i].lkey = priv->pd->local_dma_lkey;
337 
338 	sge[0].length = IPOIB_CM_HEAD_SIZE;
339 	for (i = 1; i < priv->cm.num_frags; ++i)
340 		sge[i].length = PAGE_SIZE;
341 
342 	wr->next    = NULL;
343 	wr->sg_list = sge;
344 	wr->num_sge = priv->cm.num_frags;
345 }
346 
ipoib_cm_nonsrq_init_rx(struct net_device * dev,struct ib_cm_id * cm_id,struct ipoib_cm_rx * rx)347 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
348 				   struct ipoib_cm_rx *rx)
349 {
350 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
351 	struct {
352 		struct ib_recv_wr wr;
353 		struct ib_sge sge[IPOIB_CM_RX_SG];
354 	} *t;
355 	int ret;
356 	int i;
357 
358 	rx->rx_ring = vzalloc(array_size(ipoib_recvq_size,
359 					 sizeof(*rx->rx_ring)));
360 	if (!rx->rx_ring)
361 		return -ENOMEM;
362 
363 	t = kmalloc(sizeof(*t), GFP_KERNEL);
364 	if (!t) {
365 		ret = -ENOMEM;
366 		goto err_free_1;
367 	}
368 
369 	ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
370 
371 	spin_lock_irq(&priv->lock);
372 
373 	if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
374 		spin_unlock_irq(&priv->lock);
375 		ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
376 		ret = -EINVAL;
377 		goto err_free;
378 	} else
379 		++priv->cm.nonsrq_conn_qp;
380 
381 	spin_unlock_irq(&priv->lock);
382 
383 	for (i = 0; i < ipoib_recvq_size; ++i) {
384 		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
385 					   rx->rx_ring[i].mapping,
386 					   GFP_KERNEL)) {
387 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
388 			ret = -ENOMEM;
389 			goto err_count;
390 		}
391 		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
392 		if (ret) {
393 			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
394 				   "failed for buf %d\n", i);
395 			ret = -EIO;
396 			goto err_count;
397 		}
398 	}
399 
400 	rx->recv_count = ipoib_recvq_size;
401 
402 	kfree(t);
403 
404 	return 0;
405 
406 err_count:
407 	spin_lock_irq(&priv->lock);
408 	--priv->cm.nonsrq_conn_qp;
409 	spin_unlock_irq(&priv->lock);
410 
411 err_free:
412 	kfree(t);
413 
414 err_free_1:
415 	ipoib_cm_free_rx_ring(dev, rx->rx_ring);
416 
417 	return ret;
418 }
419 
ipoib_cm_send_rep(struct net_device * dev,struct ib_cm_id * cm_id,struct ib_qp * qp,const struct ib_cm_req_event_param * req,unsigned int psn)420 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
421 			     struct ib_qp *qp,
422 			     const struct ib_cm_req_event_param *req,
423 			     unsigned int psn)
424 {
425 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
426 	struct ipoib_cm_data data = {};
427 	struct ib_cm_rep_param rep = {};
428 
429 	data.qpn = cpu_to_be32(priv->qp->qp_num);
430 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
431 
432 	rep.private_data = &data;
433 	rep.private_data_len = sizeof(data);
434 	rep.flow_control = 0;
435 	rep.rnr_retry_count = req->rnr_retry_count;
436 	rep.srq = ipoib_cm_has_srq(dev);
437 	rep.qp_num = qp->qp_num;
438 	rep.starting_psn = psn;
439 	return ib_send_cm_rep(cm_id, &rep);
440 }
441 
ipoib_cm_req_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event)442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
443 				const struct ib_cm_event *event)
444 {
445 	struct net_device *dev = cm_id->context;
446 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
447 	struct ipoib_cm_rx *p;
448 	unsigned int psn;
449 	int ret;
450 
451 	ipoib_dbg(priv, "REQ arrived\n");
452 	p = kzalloc(sizeof(*p), GFP_KERNEL);
453 	if (!p)
454 		return -ENOMEM;
455 	p->dev = dev;
456 	p->id = cm_id;
457 	cm_id->context = p;
458 	p->state = IPOIB_CM_RX_LIVE;
459 	p->jiffies = jiffies;
460 	INIT_LIST_HEAD(&p->list);
461 
462 	p->qp = ipoib_cm_create_rx_qp(dev, p);
463 	if (IS_ERR(p->qp)) {
464 		ret = PTR_ERR(p->qp);
465 		goto err_qp;
466 	}
467 
468 	psn = prandom_u32() & 0xffffff;
469 	ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
470 	if (ret)
471 		goto err_modify;
472 
473 	if (!ipoib_cm_has_srq(dev)) {
474 		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
475 		if (ret)
476 			goto err_modify;
477 	}
478 
479 	spin_lock_irq(&priv->lock);
480 	queue_delayed_work(priv->wq,
481 			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
482 	/* Add this entry to passive ids list head, but do not re-add it
483 	 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
484 	p->jiffies = jiffies;
485 	if (p->state == IPOIB_CM_RX_LIVE)
486 		list_move(&p->list, &priv->cm.passive_ids);
487 	spin_unlock_irq(&priv->lock);
488 
489 	ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
490 	if (ret) {
491 		ipoib_warn(priv, "failed to send REP: %d\n", ret);
492 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
493 			ipoib_warn(priv, "unable to move qp to error state\n");
494 	}
495 	return 0;
496 
497 err_modify:
498 	ib_destroy_qp(p->qp);
499 err_qp:
500 	kfree(p);
501 	return ret;
502 }
503 
ipoib_cm_rx_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event)504 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
505 			       const struct ib_cm_event *event)
506 {
507 	struct ipoib_cm_rx *p;
508 	struct ipoib_dev_priv *priv;
509 
510 	switch (event->event) {
511 	case IB_CM_REQ_RECEIVED:
512 		return ipoib_cm_req_handler(cm_id, event);
513 	case IB_CM_DREQ_RECEIVED:
514 		ib_send_cm_drep(cm_id, NULL, 0);
515 		/* Fall through */
516 	case IB_CM_REJ_RECEIVED:
517 		p = cm_id->context;
518 		priv = ipoib_priv(p->dev);
519 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
520 			ipoib_warn(priv, "unable to move qp to error state\n");
521 		/* Fall through */
522 	default:
523 		return 0;
524 	}
525 }
526 /* Adjust length of skb with fragments to match received data */
skb_put_frags(struct sk_buff * skb,unsigned int hdr_space,unsigned int length,struct sk_buff * toskb)527 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
528 			  unsigned int length, struct sk_buff *toskb)
529 {
530 	int i, num_frags;
531 	unsigned int size;
532 
533 	/* put header into skb */
534 	size = min(length, hdr_space);
535 	skb->tail += size;
536 	skb->len += size;
537 	length -= size;
538 
539 	num_frags = skb_shinfo(skb)->nr_frags;
540 	for (i = 0; i < num_frags; i++) {
541 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542 
543 		if (length == 0) {
544 			/* don't need this page */
545 			skb_fill_page_desc(toskb, i, skb_frag_page(frag),
546 					   0, PAGE_SIZE);
547 			--skb_shinfo(skb)->nr_frags;
548 		} else {
549 			size = min_t(unsigned int, length, PAGE_SIZE);
550 
551 			skb_frag_size_set(frag, size);
552 			skb->data_len += size;
553 			skb->truesize += size;
554 			skb->len += size;
555 			length -= size;
556 		}
557 	}
558 }
559 
ipoib_cm_handle_rx_wc(struct net_device * dev,struct ib_wc * wc)560 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
561 {
562 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
563 	struct ipoib_cm_rx_buf *rx_ring;
564 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
565 	struct sk_buff *skb, *newskb;
566 	struct ipoib_cm_rx *p;
567 	unsigned long flags;
568 	u64 mapping[IPOIB_CM_RX_SG];
569 	int frags;
570 	int has_srq;
571 	struct sk_buff *small_skb;
572 
573 	ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
574 		       wr_id, wc->status);
575 
576 	if (unlikely(wr_id >= ipoib_recvq_size)) {
577 		if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
578 			spin_lock_irqsave(&priv->lock, flags);
579 			list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
580 			ipoib_cm_start_rx_drain(priv);
581 			queue_work(priv->wq, &priv->cm.rx_reap_task);
582 			spin_unlock_irqrestore(&priv->lock, flags);
583 		} else
584 			ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
585 				   wr_id, ipoib_recvq_size);
586 		return;
587 	}
588 
589 	p = wc->qp->qp_context;
590 
591 	has_srq = ipoib_cm_has_srq(dev);
592 	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
593 
594 	skb = rx_ring[wr_id].skb;
595 
596 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
597 		ipoib_dbg(priv,
598 			  "cm recv error (status=%d, wrid=%d vend_err %#x)\n",
599 			  wc->status, wr_id, wc->vendor_err);
600 		++dev->stats.rx_dropped;
601 		if (has_srq)
602 			goto repost;
603 		else {
604 			if (!--p->recv_count) {
605 				spin_lock_irqsave(&priv->lock, flags);
606 				list_move(&p->list, &priv->cm.rx_reap_list);
607 				spin_unlock_irqrestore(&priv->lock, flags);
608 				queue_work(priv->wq, &priv->cm.rx_reap_task);
609 			}
610 			return;
611 		}
612 	}
613 
614 	if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
615 		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
616 			spin_lock_irqsave(&priv->lock, flags);
617 			p->jiffies = jiffies;
618 			/* Move this entry to list head, but do not re-add it
619 			 * if it has been moved out of list. */
620 			if (p->state == IPOIB_CM_RX_LIVE)
621 				list_move(&p->list, &priv->cm.passive_ids);
622 			spin_unlock_irqrestore(&priv->lock, flags);
623 		}
624 	}
625 
626 	if (wc->byte_len < IPOIB_CM_COPYBREAK) {
627 		int dlen = wc->byte_len;
628 
629 		small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
630 		if (small_skb) {
631 			skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
632 			ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
633 						   dlen, DMA_FROM_DEVICE);
634 			skb_copy_from_linear_data(skb, small_skb->data, dlen);
635 			ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
636 						      dlen, DMA_FROM_DEVICE);
637 			skb_put(small_skb, dlen);
638 			skb = small_skb;
639 			goto copied;
640 		}
641 	}
642 
643 	frags = PAGE_ALIGN(wc->byte_len -
644 			   min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
645 		PAGE_SIZE;
646 
647 	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
648 				       mapping, GFP_ATOMIC);
649 	if (unlikely(!newskb)) {
650 		/*
651 		 * If we can't allocate a new RX buffer, dump
652 		 * this packet and reuse the old buffer.
653 		 */
654 		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
655 		++dev->stats.rx_dropped;
656 		goto repost;
657 	}
658 
659 	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
660 	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
661 
662 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
663 		       wc->byte_len, wc->slid);
664 
665 	skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
666 
667 copied:
668 	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
669 	skb_add_pseudo_hdr(skb);
670 
671 	++dev->stats.rx_packets;
672 	dev->stats.rx_bytes += skb->len;
673 
674 	skb->dev = dev;
675 	/* XXX get correct PACKET_ type here */
676 	skb->pkt_type = PACKET_HOST;
677 	netif_receive_skb(skb);
678 
679 repost:
680 	if (has_srq) {
681 		if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
682 			ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
683 				   "for buf %d\n", wr_id);
684 	} else {
685 		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
686 							  &priv->cm.rx_wr,
687 							  priv->cm.rx_sge,
688 							  wr_id))) {
689 			--p->recv_count;
690 			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
691 				   "for buf %d\n", wr_id);
692 		}
693 	}
694 }
695 
post_send(struct ipoib_dev_priv * priv,struct ipoib_cm_tx * tx,unsigned int wr_id,struct ipoib_tx_buf * tx_req)696 static inline int post_send(struct ipoib_dev_priv *priv,
697 			    struct ipoib_cm_tx *tx,
698 			    unsigned int wr_id,
699 			    struct ipoib_tx_buf *tx_req)
700 {
701 	ipoib_build_sge(priv, tx_req);
702 
703 	priv->tx_wr.wr.wr_id	= wr_id | IPOIB_OP_CM;
704 
705 	return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
706 }
707 
ipoib_cm_send(struct net_device * dev,struct sk_buff * skb,struct ipoib_cm_tx * tx)708 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
709 {
710 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
711 	struct ipoib_tx_buf *tx_req;
712 	int rc;
713 	unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
714 
715 	if (unlikely(skb->len > tx->mtu)) {
716 		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
717 			   skb->len, tx->mtu);
718 		++dev->stats.tx_dropped;
719 		++dev->stats.tx_errors;
720 		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
721 		return;
722 	}
723 	if (skb_shinfo(skb)->nr_frags > usable_sge) {
724 		if (skb_linearize(skb) < 0) {
725 			ipoib_warn(priv, "skb could not be linearized\n");
726 			++dev->stats.tx_dropped;
727 			++dev->stats.tx_errors;
728 			dev_kfree_skb_any(skb);
729 			return;
730 		}
731 		/* Does skb_linearize return ok without reducing nr_frags? */
732 		if (skb_shinfo(skb)->nr_frags > usable_sge) {
733 			ipoib_warn(priv, "too many frags after skb linearize\n");
734 			++dev->stats.tx_dropped;
735 			++dev->stats.tx_errors;
736 			dev_kfree_skb_any(skb);
737 			return;
738 		}
739 	}
740 	ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
741 		       tx->tx_head, skb->len, tx->qp->qp_num);
742 
743 	/*
744 	 * We put the skb into the tx_ring _before_ we call post_send()
745 	 * because it's entirely possible that the completion handler will
746 	 * run before we execute anything after the post_send().  That
747 	 * means we have to make sure everything is properly recorded and
748 	 * our state is consistent before we call post_send().
749 	 */
750 	tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
751 	tx_req->skb = skb;
752 
753 	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
754 		++dev->stats.tx_errors;
755 		dev_kfree_skb_any(skb);
756 		return;
757 	}
758 
759 	if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
760 		ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
761 			  tx->qp->qp_num);
762 		netif_stop_queue(dev);
763 	}
764 
765 	skb_orphan(skb);
766 	skb_dst_drop(skb);
767 
768 	if (netif_queue_stopped(dev)) {
769 		rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
770 				      IB_CQ_REPORT_MISSED_EVENTS);
771 		if (unlikely(rc < 0))
772 			ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
773 		else if (rc)
774 			napi_schedule(&priv->send_napi);
775 	}
776 
777 	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
778 	if (unlikely(rc)) {
779 		ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
780 		++dev->stats.tx_errors;
781 		ipoib_dma_unmap_tx(priv, tx_req);
782 		dev_kfree_skb_any(skb);
783 
784 		if (netif_queue_stopped(dev))
785 			netif_wake_queue(dev);
786 	} else {
787 		netif_trans_update(dev);
788 		++tx->tx_head;
789 		++priv->tx_head;
790 	}
791 }
792 
ipoib_cm_handle_tx_wc(struct net_device * dev,struct ib_wc * wc)793 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
794 {
795 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
796 	struct ipoib_cm_tx *tx = wc->qp->qp_context;
797 	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
798 	struct ipoib_tx_buf *tx_req;
799 	unsigned long flags;
800 
801 	ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
802 		       wr_id, wc->status);
803 
804 	if (unlikely(wr_id >= ipoib_sendq_size)) {
805 		ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
806 			   wr_id, ipoib_sendq_size);
807 		return;
808 	}
809 
810 	tx_req = &tx->tx_ring[wr_id];
811 
812 	ipoib_dma_unmap_tx(priv, tx_req);
813 
814 	/* FIXME: is this right? Shouldn't we only increment on success? */
815 	++dev->stats.tx_packets;
816 	dev->stats.tx_bytes += tx_req->skb->len;
817 
818 	dev_kfree_skb_any(tx_req->skb);
819 
820 	netif_tx_lock(dev);
821 
822 	++tx->tx_tail;
823 	++priv->tx_tail;
824 
825 	if (unlikely(netif_queue_stopped(dev) &&
826 		     (priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
827 		     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
828 		netif_wake_queue(dev);
829 
830 	if (wc->status != IB_WC_SUCCESS &&
831 	    wc->status != IB_WC_WR_FLUSH_ERR) {
832 		struct ipoib_neigh *neigh;
833 
834 		/* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
835 		 * so don't make waves.
836 		 */
837 		if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
838 		    wc->status == IB_WC_RETRY_EXC_ERR)
839 			ipoib_dbg(priv,
840 				  "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
841 				   __func__, wc->status, wr_id, wc->vendor_err);
842 		else
843 			ipoib_warn(priv,
844 				    "%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
845 				   __func__, wc->status, wr_id, wc->vendor_err);
846 
847 		spin_lock_irqsave(&priv->lock, flags);
848 		neigh = tx->neigh;
849 
850 		if (neigh) {
851 			neigh->cm = NULL;
852 			ipoib_neigh_free(neigh);
853 
854 			tx->neigh = NULL;
855 		}
856 
857 		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
858 			list_move(&tx->list, &priv->cm.reap_list);
859 			queue_work(priv->wq, &priv->cm.reap_task);
860 		}
861 
862 		clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
863 
864 		spin_unlock_irqrestore(&priv->lock, flags);
865 	}
866 
867 	netif_tx_unlock(dev);
868 }
869 
ipoib_cm_dev_open(struct net_device * dev)870 int ipoib_cm_dev_open(struct net_device *dev)
871 {
872 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
873 	int ret;
874 
875 	if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
876 		return 0;
877 
878 	priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
879 	if (IS_ERR(priv->cm.id)) {
880 		pr_warn("%s: failed to create CM ID\n", priv->ca->name);
881 		ret = PTR_ERR(priv->cm.id);
882 		goto err_cm;
883 	}
884 
885 	ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
886 			   0);
887 	if (ret) {
888 		pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
889 			IPOIB_CM_IETF_ID | priv->qp->qp_num);
890 		goto err_listen;
891 	}
892 
893 	return 0;
894 
895 err_listen:
896 	ib_destroy_cm_id(priv->cm.id);
897 err_cm:
898 	priv->cm.id = NULL;
899 	return ret;
900 }
901 
ipoib_cm_free_rx_reap_list(struct net_device * dev)902 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
903 {
904 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
905 	struct ipoib_cm_rx *rx, *n;
906 	LIST_HEAD(list);
907 
908 	spin_lock_irq(&priv->lock);
909 	list_splice_init(&priv->cm.rx_reap_list, &list);
910 	spin_unlock_irq(&priv->lock);
911 
912 	list_for_each_entry_safe(rx, n, &list, list) {
913 		ib_destroy_cm_id(rx->id);
914 		ib_destroy_qp(rx->qp);
915 		if (!ipoib_cm_has_srq(dev)) {
916 			ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
917 			spin_lock_irq(&priv->lock);
918 			--priv->cm.nonsrq_conn_qp;
919 			spin_unlock_irq(&priv->lock);
920 		}
921 		kfree(rx);
922 	}
923 }
924 
ipoib_cm_dev_stop(struct net_device * dev)925 void ipoib_cm_dev_stop(struct net_device *dev)
926 {
927 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
928 	struct ipoib_cm_rx *p;
929 	unsigned long begin;
930 	int ret;
931 
932 	if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
933 		return;
934 
935 	ib_destroy_cm_id(priv->cm.id);
936 	priv->cm.id = NULL;
937 
938 	spin_lock_irq(&priv->lock);
939 	while (!list_empty(&priv->cm.passive_ids)) {
940 		p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
941 		list_move(&p->list, &priv->cm.rx_error_list);
942 		p->state = IPOIB_CM_RX_ERROR;
943 		spin_unlock_irq(&priv->lock);
944 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
945 		if (ret)
946 			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
947 		spin_lock_irq(&priv->lock);
948 	}
949 
950 	/* Wait for all RX to be drained */
951 	begin = jiffies;
952 
953 	while (!list_empty(&priv->cm.rx_error_list) ||
954 	       !list_empty(&priv->cm.rx_flush_list) ||
955 	       !list_empty(&priv->cm.rx_drain_list)) {
956 		if (time_after(jiffies, begin + 5 * HZ)) {
957 			ipoib_warn(priv, "RX drain timing out\n");
958 
959 			/*
960 			 * assume the HW is wedged and just free up everything.
961 			 */
962 			list_splice_init(&priv->cm.rx_flush_list,
963 					 &priv->cm.rx_reap_list);
964 			list_splice_init(&priv->cm.rx_error_list,
965 					 &priv->cm.rx_reap_list);
966 			list_splice_init(&priv->cm.rx_drain_list,
967 					 &priv->cm.rx_reap_list);
968 			break;
969 		}
970 		spin_unlock_irq(&priv->lock);
971 		usleep_range(1000, 2000);
972 		ipoib_drain_cq(dev);
973 		spin_lock_irq(&priv->lock);
974 	}
975 
976 	spin_unlock_irq(&priv->lock);
977 
978 	ipoib_cm_free_rx_reap_list(dev);
979 
980 	cancel_delayed_work(&priv->cm.stale_task);
981 }
982 
ipoib_cm_rep_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event)983 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
984 				const struct ib_cm_event *event)
985 {
986 	struct ipoib_cm_tx *p = cm_id->context;
987 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
988 	struct ipoib_cm_data *data = event->private_data;
989 	struct sk_buff_head skqueue;
990 	struct ib_qp_attr qp_attr;
991 	int qp_attr_mask, ret;
992 	struct sk_buff *skb;
993 
994 	p->mtu = be32_to_cpu(data->mtu);
995 
996 	if (p->mtu <= IPOIB_ENCAP_LEN) {
997 		ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
998 			   p->mtu, IPOIB_ENCAP_LEN);
999 		return -EINVAL;
1000 	}
1001 
1002 	qp_attr.qp_state = IB_QPS_RTR;
1003 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1004 	if (ret) {
1005 		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
1006 		return ret;
1007 	}
1008 
1009 	qp_attr.rq_psn = 0 /* FIXME */;
1010 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1011 	if (ret) {
1012 		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
1013 		return ret;
1014 	}
1015 
1016 	qp_attr.qp_state = IB_QPS_RTS;
1017 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1018 	if (ret) {
1019 		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
1020 		return ret;
1021 	}
1022 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1023 	if (ret) {
1024 		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
1025 		return ret;
1026 	}
1027 
1028 	skb_queue_head_init(&skqueue);
1029 
1030 	netif_tx_lock_bh(p->dev);
1031 	spin_lock_irq(&priv->lock);
1032 	set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1033 	if (p->neigh)
1034 		while ((skb = __skb_dequeue(&p->neigh->queue)))
1035 			__skb_queue_tail(&skqueue, skb);
1036 	spin_unlock_irq(&priv->lock);
1037 	netif_tx_unlock_bh(p->dev);
1038 
1039 	while ((skb = __skb_dequeue(&skqueue))) {
1040 		skb->dev = p->dev;
1041 		ret = dev_queue_xmit(skb);
1042 		if (ret)
1043 			ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
1044 				   __func__, ret);
1045 	}
1046 
1047 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
1048 	if (ret) {
1049 		ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1050 		return ret;
1051 	}
1052 	return 0;
1053 }
1054 
ipoib_cm_create_tx_qp(struct net_device * dev,struct ipoib_cm_tx * tx)1055 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1056 {
1057 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1058 	struct ib_qp_init_attr attr = {
1059 		.send_cq		= priv->send_cq,
1060 		.recv_cq		= priv->recv_cq,
1061 		.srq			= priv->cm.srq,
1062 		.cap.max_send_wr	= ipoib_sendq_size,
1063 		.cap.max_send_sge	= 1,
1064 		.sq_sig_type		= IB_SIGNAL_ALL_WR,
1065 		.qp_type		= IB_QPT_RC,
1066 		.qp_context		= tx,
1067 		.create_flags		= 0
1068 	};
1069 	struct ib_qp *tx_qp;
1070 
1071 	if (dev->features & NETIF_F_SG)
1072 		attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
1073 					      MAX_SKB_FRAGS + 1);
1074 
1075 	tx_qp = ib_create_qp(priv->pd, &attr);
1076 	tx->max_send_sge = attr.cap.max_send_sge;
1077 	return tx_qp;
1078 }
1079 
ipoib_cm_send_req(struct net_device * dev,struct ib_cm_id * id,struct ib_qp * qp,u32 qpn,struct sa_path_rec * pathrec)1080 static int ipoib_cm_send_req(struct net_device *dev,
1081 			     struct ib_cm_id *id, struct ib_qp *qp,
1082 			     u32 qpn,
1083 			     struct sa_path_rec *pathrec)
1084 {
1085 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1086 	struct ipoib_cm_data data = {};
1087 	struct ib_cm_req_param req = {};
1088 
1089 	data.qpn = cpu_to_be32(priv->qp->qp_num);
1090 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1091 
1092 	req.primary_path		= pathrec;
1093 	req.alternate_path		= NULL;
1094 	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1095 	req.qp_num			= qp->qp_num;
1096 	req.qp_type			= qp->qp_type;
1097 	req.private_data		= &data;
1098 	req.private_data_len		= sizeof(data);
1099 	req.flow_control		= 0;
1100 
1101 	req.starting_psn		= 0; /* FIXME */
1102 
1103 	/*
1104 	 * Pick some arbitrary defaults here; we could make these
1105 	 * module parameters if anyone cared about setting them.
1106 	 */
1107 	req.responder_resources		= 4;
1108 	req.remote_cm_response_timeout	= 20;
1109 	req.local_cm_response_timeout	= 20;
1110 	req.retry_count			= 0; /* RFC draft warns against retries */
1111 	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
1112 	req.max_cm_retries		= 15;
1113 	req.srq				= ipoib_cm_has_srq(dev);
1114 	return ib_send_cm_req(id, &req);
1115 }
1116 
ipoib_cm_modify_tx_init(struct net_device * dev,struct ib_cm_id * cm_id,struct ib_qp * qp)1117 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1118 				  struct ib_cm_id *cm_id, struct ib_qp *qp)
1119 {
1120 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1121 	struct ib_qp_attr qp_attr;
1122 	int qp_attr_mask, ret;
1123 	ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1124 	if (ret) {
1125 		ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1126 		return ret;
1127 	}
1128 
1129 	qp_attr.qp_state = IB_QPS_INIT;
1130 	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1131 	qp_attr.port_num = priv->port;
1132 	qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1133 
1134 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1135 	if (ret) {
1136 		ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1137 		return ret;
1138 	}
1139 	return 0;
1140 }
1141 
ipoib_cm_tx_init(struct ipoib_cm_tx * p,u32 qpn,struct sa_path_rec * pathrec)1142 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1143 			    struct sa_path_rec *pathrec)
1144 {
1145 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1146 	unsigned int noio_flag;
1147 	int ret;
1148 
1149 	noio_flag = memalloc_noio_save();
1150 	p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
1151 	if (!p->tx_ring) {
1152 		memalloc_noio_restore(noio_flag);
1153 		ret = -ENOMEM;
1154 		goto err_tx;
1155 	}
1156 	memset(p->tx_ring, 0, ipoib_sendq_size * sizeof(*p->tx_ring));
1157 
1158 	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1159 	memalloc_noio_restore(noio_flag);
1160 	if (IS_ERR(p->qp)) {
1161 		ret = PTR_ERR(p->qp);
1162 		ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
1163 		goto err_qp;
1164 	}
1165 
1166 	p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1167 	if (IS_ERR(p->id)) {
1168 		ret = PTR_ERR(p->id);
1169 		ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1170 		goto err_id;
1171 	}
1172 
1173 	ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
1174 	if (ret) {
1175 		ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1176 		goto err_modify_send;
1177 	}
1178 
1179 	ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1180 	if (ret) {
1181 		ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1182 		goto err_modify_send;
1183 	}
1184 
1185 	ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1186 		  p->qp->qp_num, pathrec->dgid.raw, qpn);
1187 
1188 	return 0;
1189 
1190 err_modify_send:
1191 	ib_destroy_cm_id(p->id);
1192 err_id:
1193 	p->id = NULL;
1194 	ib_destroy_qp(p->qp);
1195 err_qp:
1196 	p->qp = NULL;
1197 	vfree(p->tx_ring);
1198 err_tx:
1199 	return ret;
1200 }
1201 
ipoib_cm_tx_destroy(struct ipoib_cm_tx * p)1202 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1203 {
1204 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1205 	struct ipoib_tx_buf *tx_req;
1206 	unsigned long begin;
1207 
1208 	ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1209 		  p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1210 
1211 	if (p->id)
1212 		ib_destroy_cm_id(p->id);
1213 
1214 	if (p->tx_ring) {
1215 		/* Wait for all sends to complete */
1216 		begin = jiffies;
1217 		while ((int) p->tx_tail - (int) p->tx_head < 0) {
1218 			if (time_after(jiffies, begin + 5 * HZ)) {
1219 				ipoib_warn(priv, "timing out; %d sends not completed\n",
1220 					   p->tx_head - p->tx_tail);
1221 				goto timeout;
1222 			}
1223 
1224 			usleep_range(1000, 2000);
1225 		}
1226 	}
1227 
1228 timeout:
1229 
1230 	while ((int) p->tx_tail - (int) p->tx_head < 0) {
1231 		tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1232 		ipoib_dma_unmap_tx(priv, tx_req);
1233 		dev_kfree_skb_any(tx_req->skb);
1234 		netif_tx_lock_bh(p->dev);
1235 		++p->tx_tail;
1236 		++priv->tx_tail;
1237 		if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
1238 		    netif_queue_stopped(p->dev) &&
1239 		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1240 			netif_wake_queue(p->dev);
1241 		netif_tx_unlock_bh(p->dev);
1242 	}
1243 
1244 	if (p->qp)
1245 		ib_destroy_qp(p->qp);
1246 
1247 	vfree(p->tx_ring);
1248 	kfree(p);
1249 }
1250 
ipoib_cm_tx_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * event)1251 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1252 			       const struct ib_cm_event *event)
1253 {
1254 	struct ipoib_cm_tx *tx = cm_id->context;
1255 	struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1256 	struct net_device *dev = priv->dev;
1257 	struct ipoib_neigh *neigh;
1258 	unsigned long flags;
1259 	int ret;
1260 
1261 	switch (event->event) {
1262 	case IB_CM_DREQ_RECEIVED:
1263 		ipoib_dbg(priv, "DREQ received.\n");
1264 		ib_send_cm_drep(cm_id, NULL, 0);
1265 		break;
1266 	case IB_CM_REP_RECEIVED:
1267 		ipoib_dbg(priv, "REP received.\n");
1268 		ret = ipoib_cm_rep_handler(cm_id, event);
1269 		if (ret)
1270 			ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1271 				       NULL, 0, NULL, 0);
1272 		break;
1273 	case IB_CM_REQ_ERROR:
1274 	case IB_CM_REJ_RECEIVED:
1275 	case IB_CM_TIMEWAIT_EXIT:
1276 		ipoib_dbg(priv, "CM error %d.\n", event->event);
1277 		netif_tx_lock_bh(dev);
1278 		spin_lock_irqsave(&priv->lock, flags);
1279 		neigh = tx->neigh;
1280 
1281 		if (neigh) {
1282 			neigh->cm = NULL;
1283 			ipoib_neigh_free(neigh);
1284 
1285 			tx->neigh = NULL;
1286 		}
1287 
1288 		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1289 			list_move(&tx->list, &priv->cm.reap_list);
1290 			queue_work(priv->wq, &priv->cm.reap_task);
1291 		}
1292 
1293 		spin_unlock_irqrestore(&priv->lock, flags);
1294 		netif_tx_unlock_bh(dev);
1295 		break;
1296 	default:
1297 		break;
1298 	}
1299 
1300 	return 0;
1301 }
1302 
ipoib_cm_create_tx(struct net_device * dev,struct ipoib_path * path,struct ipoib_neigh * neigh)1303 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1304 				       struct ipoib_neigh *neigh)
1305 {
1306 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1307 	struct ipoib_cm_tx *tx;
1308 
1309 	tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
1310 	if (!tx)
1311 		return NULL;
1312 
1313 	neigh->cm = tx;
1314 	tx->neigh = neigh;
1315 	tx->path = path;
1316 	tx->dev = dev;
1317 	list_add(&tx->list, &priv->cm.start_list);
1318 	set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1319 	queue_work(priv->wq, &priv->cm.start_task);
1320 	return tx;
1321 }
1322 
ipoib_cm_destroy_tx(struct ipoib_cm_tx * tx)1323 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1324 {
1325 	struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1326 	unsigned long flags;
1327 	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1328 		spin_lock_irqsave(&priv->lock, flags);
1329 		list_move(&tx->list, &priv->cm.reap_list);
1330 		queue_work(priv->wq, &priv->cm.reap_task);
1331 		ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1332 			  tx->neigh->daddr + 4);
1333 		tx->neigh = NULL;
1334 		spin_unlock_irqrestore(&priv->lock, flags);
1335 	}
1336 }
1337 
1338 #define QPN_AND_OPTIONS_OFFSET	4
1339 
ipoib_cm_tx_start(struct work_struct * work)1340 static void ipoib_cm_tx_start(struct work_struct *work)
1341 {
1342 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1343 						   cm.start_task);
1344 	struct net_device *dev = priv->dev;
1345 	struct ipoib_neigh *neigh;
1346 	struct ipoib_cm_tx *p;
1347 	unsigned long flags;
1348 	struct ipoib_path *path;
1349 	int ret;
1350 
1351 	struct sa_path_rec pathrec;
1352 	u32 qpn;
1353 
1354 	netif_tx_lock_bh(dev);
1355 	spin_lock_irqsave(&priv->lock, flags);
1356 
1357 	while (!list_empty(&priv->cm.start_list)) {
1358 		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1359 		list_del_init(&p->list);
1360 		neigh = p->neigh;
1361 
1362 		qpn = IPOIB_QPN(neigh->daddr);
1363 		/*
1364 		 * As long as the search is with these 2 locks,
1365 		 * path existence indicates its validity.
1366 		 */
1367 		path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1368 		if (!path) {
1369 			pr_info("%s ignore not valid path %pI6\n",
1370 				__func__,
1371 				neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1372 			goto free_neigh;
1373 		}
1374 		memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
1375 
1376 		spin_unlock_irqrestore(&priv->lock, flags);
1377 		netif_tx_unlock_bh(dev);
1378 
1379 		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1380 
1381 		netif_tx_lock_bh(dev);
1382 		spin_lock_irqsave(&priv->lock, flags);
1383 
1384 		if (ret) {
1385 free_neigh:
1386 			neigh = p->neigh;
1387 			if (neigh) {
1388 				neigh->cm = NULL;
1389 				ipoib_neigh_free(neigh);
1390 			}
1391 			list_del(&p->list);
1392 			kfree(p);
1393 		}
1394 	}
1395 
1396 	spin_unlock_irqrestore(&priv->lock, flags);
1397 	netif_tx_unlock_bh(dev);
1398 }
1399 
ipoib_cm_tx_reap(struct work_struct * work)1400 static void ipoib_cm_tx_reap(struct work_struct *work)
1401 {
1402 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1403 						   cm.reap_task);
1404 	struct net_device *dev = priv->dev;
1405 	struct ipoib_cm_tx *p;
1406 	unsigned long flags;
1407 
1408 	netif_tx_lock_bh(dev);
1409 	spin_lock_irqsave(&priv->lock, flags);
1410 
1411 	while (!list_empty(&priv->cm.reap_list)) {
1412 		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1413 		list_del_init(&p->list);
1414 		spin_unlock_irqrestore(&priv->lock, flags);
1415 		netif_tx_unlock_bh(dev);
1416 		ipoib_cm_tx_destroy(p);
1417 		netif_tx_lock_bh(dev);
1418 		spin_lock_irqsave(&priv->lock, flags);
1419 	}
1420 
1421 	spin_unlock_irqrestore(&priv->lock, flags);
1422 	netif_tx_unlock_bh(dev);
1423 }
1424 
ipoib_cm_skb_reap(struct work_struct * work)1425 static void ipoib_cm_skb_reap(struct work_struct *work)
1426 {
1427 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1428 						   cm.skb_task);
1429 	struct net_device *dev = priv->dev;
1430 	struct sk_buff *skb;
1431 	unsigned long flags;
1432 	unsigned int mtu = priv->mcast_mtu;
1433 
1434 	netif_tx_lock_bh(dev);
1435 	spin_lock_irqsave(&priv->lock, flags);
1436 
1437 	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1438 		spin_unlock_irqrestore(&priv->lock, flags);
1439 		netif_tx_unlock_bh(dev);
1440 
1441 		if (skb->protocol == htons(ETH_P_IP))
1442 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1443 #if IS_ENABLED(CONFIG_IPV6)
1444 		else if (skb->protocol == htons(ETH_P_IPV6))
1445 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1446 #endif
1447 		dev_kfree_skb_any(skb);
1448 
1449 		netif_tx_lock_bh(dev);
1450 		spin_lock_irqsave(&priv->lock, flags);
1451 	}
1452 
1453 	spin_unlock_irqrestore(&priv->lock, flags);
1454 	netif_tx_unlock_bh(dev);
1455 }
1456 
ipoib_cm_skb_too_long(struct net_device * dev,struct sk_buff * skb,unsigned int mtu)1457 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1458 			   unsigned int mtu)
1459 {
1460 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1461 	int e = skb_queue_empty(&priv->cm.skb_queue);
1462 
1463 	skb_dst_update_pmtu(skb, mtu);
1464 
1465 	skb_queue_tail(&priv->cm.skb_queue, skb);
1466 	if (e)
1467 		queue_work(priv->wq, &priv->cm.skb_task);
1468 }
1469 
ipoib_cm_rx_reap(struct work_struct * work)1470 static void ipoib_cm_rx_reap(struct work_struct *work)
1471 {
1472 	ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1473 						cm.rx_reap_task)->dev);
1474 }
1475 
ipoib_cm_stale_task(struct work_struct * work)1476 static void ipoib_cm_stale_task(struct work_struct *work)
1477 {
1478 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1479 						   cm.stale_task.work);
1480 	struct ipoib_cm_rx *p;
1481 	int ret;
1482 
1483 	spin_lock_irq(&priv->lock);
1484 	while (!list_empty(&priv->cm.passive_ids)) {
1485 		/* List is sorted by LRU, start from tail,
1486 		 * stop when we see a recently used entry */
1487 		p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1488 		if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1489 			break;
1490 		list_move(&p->list, &priv->cm.rx_error_list);
1491 		p->state = IPOIB_CM_RX_ERROR;
1492 		spin_unlock_irq(&priv->lock);
1493 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1494 		if (ret)
1495 			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1496 		spin_lock_irq(&priv->lock);
1497 	}
1498 
1499 	if (!list_empty(&priv->cm.passive_ids))
1500 		queue_delayed_work(priv->wq,
1501 				   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1502 	spin_unlock_irq(&priv->lock);
1503 }
1504 
show_mode(struct device * d,struct device_attribute * attr,char * buf)1505 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1506 			 char *buf)
1507 {
1508 	struct net_device *dev = to_net_dev(d);
1509 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1510 
1511 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1512 		return sprintf(buf, "connected\n");
1513 	else
1514 		return sprintf(buf, "datagram\n");
1515 }
1516 
set_mode(struct device * d,struct device_attribute * attr,const char * buf,size_t count)1517 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1518 			const char *buf, size_t count)
1519 {
1520 	struct net_device *dev = to_net_dev(d);
1521 	int ret;
1522 
1523 	if (!rtnl_trylock()) {
1524 		return restart_syscall();
1525 	}
1526 
1527 	if (dev->reg_state != NETREG_REGISTERED) {
1528 		rtnl_unlock();
1529 		return -EPERM;
1530 	}
1531 
1532 	ret = ipoib_set_mode(dev, buf);
1533 
1534 	/* The assumption is that the function ipoib_set_mode returned
1535 	 * with the rtnl held by it, if not the value -EBUSY returned,
1536 	 * then no need to rtnl_unlock
1537 	 */
1538 	if (ret != -EBUSY)
1539 		rtnl_unlock();
1540 
1541 	return (!ret || ret == -EBUSY) ? count : ret;
1542 }
1543 
1544 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1545 
ipoib_cm_add_mode_attr(struct net_device * dev)1546 int ipoib_cm_add_mode_attr(struct net_device *dev)
1547 {
1548 	return device_create_file(&dev->dev, &dev_attr_mode);
1549 }
1550 
ipoib_cm_create_srq(struct net_device * dev,int max_sge)1551 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1552 {
1553 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1554 	struct ib_srq_init_attr srq_init_attr = {
1555 		.srq_type = IB_SRQT_BASIC,
1556 		.attr = {
1557 			.max_wr  = ipoib_recvq_size,
1558 			.max_sge = max_sge
1559 		}
1560 	};
1561 
1562 	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1563 	if (IS_ERR(priv->cm.srq)) {
1564 		if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
1565 			pr_warn("%s: failed to allocate SRQ, error %ld\n",
1566 			       priv->ca->name, PTR_ERR(priv->cm.srq));
1567 		priv->cm.srq = NULL;
1568 		return;
1569 	}
1570 
1571 	priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size,
1572 					       sizeof(*priv->cm.srq_ring)));
1573 	if (!priv->cm.srq_ring) {
1574 		ib_destroy_srq(priv->cm.srq);
1575 		priv->cm.srq = NULL;
1576 		return;
1577 	}
1578 
1579 }
1580 
ipoib_cm_dev_init(struct net_device * dev)1581 int ipoib_cm_dev_init(struct net_device *dev)
1582 {
1583 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1584 	int max_srq_sge, i;
1585 
1586 	INIT_LIST_HEAD(&priv->cm.passive_ids);
1587 	INIT_LIST_HEAD(&priv->cm.reap_list);
1588 	INIT_LIST_HEAD(&priv->cm.start_list);
1589 	INIT_LIST_HEAD(&priv->cm.rx_error_list);
1590 	INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1591 	INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1592 	INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1593 	INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1594 	INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1595 	INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1596 	INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1597 	INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1598 
1599 	skb_queue_head_init(&priv->cm.skb_queue);
1600 
1601 	ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
1602 
1603 	max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
1604 	ipoib_cm_create_srq(dev, max_srq_sge);
1605 	if (ipoib_cm_has_srq(dev)) {
1606 		priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
1607 		priv->cm.num_frags  = max_srq_sge;
1608 		ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1609 			  priv->cm.max_cm_mtu, priv->cm.num_frags);
1610 	} else {
1611 		priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1612 		priv->cm.num_frags  = IPOIB_CM_RX_SG;
1613 	}
1614 
1615 	ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1616 
1617 	if (ipoib_cm_has_srq(dev)) {
1618 		for (i = 0; i < ipoib_recvq_size; ++i) {
1619 			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1620 						   priv->cm.num_frags - 1,
1621 						   priv->cm.srq_ring[i].mapping,
1622 						   GFP_KERNEL)) {
1623 				ipoib_warn(priv, "failed to allocate "
1624 					   "receive buffer %d\n", i);
1625 				ipoib_cm_dev_cleanup(dev);
1626 				return -ENOMEM;
1627 			}
1628 
1629 			if (ipoib_cm_post_receive_srq(dev, i)) {
1630 				ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1631 					   "failed for buf %d\n", i);
1632 				ipoib_cm_dev_cleanup(dev);
1633 				return -EIO;
1634 			}
1635 		}
1636 	}
1637 
1638 	priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1639 	return 0;
1640 }
1641 
ipoib_cm_dev_cleanup(struct net_device * dev)1642 void ipoib_cm_dev_cleanup(struct net_device *dev)
1643 {
1644 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1645 	int ret;
1646 
1647 	if (!priv->cm.srq)
1648 		return;
1649 
1650 	ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1651 
1652 	ret = ib_destroy_srq(priv->cm.srq);
1653 	if (ret)
1654 		ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1655 
1656 	priv->cm.srq = NULL;
1657 	if (!priv->cm.srq_ring)
1658 		return;
1659 
1660 	ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1661 	priv->cm.srq_ring = NULL;
1662 }
1663