1 /*
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 */
37
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
40 #include <linux/dma-mapping.h>
41 #include <linux/idr.h>
42 #include <linux/slab.h>
43 #include <linux/module.h>
44 #include <linux/security.h>
45 #include <rdma/ib_cache.h>
46
47 #include "mad_priv.h"
48 #include "core_priv.h"
49 #include "mad_rmpp.h"
50 #include "smi.h"
51 #include "opa_smi.h"
52 #include "agent.h"
53
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
56
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
61
62 /*
63 * The mlx4 driver uses the top byte to distinguish which virtual function
64 * generated the MAD, so we must avoid using it.
65 */
66 #define AGENT_ID_LIMIT (1 << 24)
67 static DEFINE_IDR(ib_mad_clients);
68 static struct list_head ib_mad_port_list;
69
70 /* Port list lock */
71 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
72
73 /* Forward declarations */
74 static int method_in_use(struct ib_mad_mgmt_method_table **method,
75 struct ib_mad_reg_req *mad_reg_req);
76 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77 static struct ib_mad_agent_private *find_mad_agent(
78 struct ib_mad_port_private *port_priv,
79 const struct ib_mad_hdr *mad);
80 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81 struct ib_mad_private *mad);
82 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
83 static void timeout_sends(struct work_struct *work);
84 static void local_completions(struct work_struct *work);
85 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv,
87 u8 mgmt_class);
88 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89 struct ib_mad_agent_private *agent_priv);
90 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
91 struct ib_wc *wc);
92 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
93
94 /*
95 * Returns a ib_mad_port_private structure or NULL for a device/port
96 * Assumes ib_mad_port_list_lock is being held
97 */
98 static inline struct ib_mad_port_private *
__ib_get_mad_port(struct ib_device * device,int port_num)99 __ib_get_mad_port(struct ib_device *device, int port_num)
100 {
101 struct ib_mad_port_private *entry;
102
103 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104 if (entry->device == device && entry->port_num == port_num)
105 return entry;
106 }
107 return NULL;
108 }
109
110 /*
111 * Wrapper function to return a ib_mad_port_private structure or NULL
112 * for a device/port
113 */
114 static inline struct ib_mad_port_private *
ib_get_mad_port(struct ib_device * device,int port_num)115 ib_get_mad_port(struct ib_device *device, int port_num)
116 {
117 struct ib_mad_port_private *entry;
118 unsigned long flags;
119
120 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121 entry = __ib_get_mad_port(device, port_num);
122 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
123
124 return entry;
125 }
126
convert_mgmt_class(u8 mgmt_class)127 static inline u8 convert_mgmt_class(u8 mgmt_class)
128 {
129 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
131 0 : mgmt_class;
132 }
133
get_spl_qp_index(enum ib_qp_type qp_type)134 static int get_spl_qp_index(enum ib_qp_type qp_type)
135 {
136 switch (qp_type)
137 {
138 case IB_QPT_SMI:
139 return 0;
140 case IB_QPT_GSI:
141 return 1;
142 default:
143 return -1;
144 }
145 }
146
vendor_class_index(u8 mgmt_class)147 static int vendor_class_index(u8 mgmt_class)
148 {
149 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
150 }
151
is_vendor_class(u8 mgmt_class)152 static int is_vendor_class(u8 mgmt_class)
153 {
154 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
156 return 0;
157 return 1;
158 }
159
is_vendor_oui(char * oui)160 static int is_vendor_oui(char *oui)
161 {
162 if (oui[0] || oui[1] || oui[2])
163 return 1;
164 return 0;
165 }
166
is_vendor_method_in_use(struct ib_mad_mgmt_vendor_class * vendor_class,struct ib_mad_reg_req * mad_reg_req)167 static int is_vendor_method_in_use(
168 struct ib_mad_mgmt_vendor_class *vendor_class,
169 struct ib_mad_reg_req *mad_reg_req)
170 {
171 struct ib_mad_mgmt_method_table *method;
172 int i;
173
174 for (i = 0; i < MAX_MGMT_OUI; i++) {
175 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176 method = vendor_class->method_table[i];
177 if (method) {
178 if (method_in_use(&method, mad_reg_req))
179 return 1;
180 else
181 break;
182 }
183 }
184 }
185 return 0;
186 }
187
ib_response_mad(const struct ib_mad_hdr * hdr)188 int ib_response_mad(const struct ib_mad_hdr *hdr)
189 {
190 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
194 }
195 EXPORT_SYMBOL(ib_response_mad);
196
197 /*
198 * ib_register_mad_agent - Register to send/receive MADs
199 *
200 * Context: Process context.
201 */
ib_register_mad_agent(struct ib_device * device,u8 port_num,enum ib_qp_type qp_type,struct ib_mad_reg_req * mad_reg_req,u8 rmpp_version,ib_mad_send_handler send_handler,ib_mad_recv_handler recv_handler,void * context,u32 registration_flags)202 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
203 u8 port_num,
204 enum ib_qp_type qp_type,
205 struct ib_mad_reg_req *mad_reg_req,
206 u8 rmpp_version,
207 ib_mad_send_handler send_handler,
208 ib_mad_recv_handler recv_handler,
209 void *context,
210 u32 registration_flags)
211 {
212 struct ib_mad_port_private *port_priv;
213 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
214 struct ib_mad_agent_private *mad_agent_priv;
215 struct ib_mad_reg_req *reg_req = NULL;
216 struct ib_mad_mgmt_class_table *class;
217 struct ib_mad_mgmt_vendor_class_table *vendor;
218 struct ib_mad_mgmt_vendor_class *vendor_class;
219 struct ib_mad_mgmt_method_table *method;
220 int ret2, qpn;
221 u8 mgmt_class, vclass;
222
223 /* Validate parameters */
224 qpn = get_spl_qp_index(qp_type);
225 if (qpn == -1) {
226 dev_notice(&device->dev,
227 "ib_register_mad_agent: invalid QP Type %d\n",
228 qp_type);
229 goto error1;
230 }
231
232 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
233 dev_notice(&device->dev,
234 "ib_register_mad_agent: invalid RMPP Version %u\n",
235 rmpp_version);
236 goto error1;
237 }
238
239 /* Validate MAD registration request if supplied */
240 if (mad_reg_req) {
241 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
242 dev_notice(&device->dev,
243 "ib_register_mad_agent: invalid Class Version %u\n",
244 mad_reg_req->mgmt_class_version);
245 goto error1;
246 }
247 if (!recv_handler) {
248 dev_notice(&device->dev,
249 "ib_register_mad_agent: no recv_handler\n");
250 goto error1;
251 }
252 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
253 /*
254 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
255 * one in this range currently allowed
256 */
257 if (mad_reg_req->mgmt_class !=
258 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
259 dev_notice(&device->dev,
260 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
261 mad_reg_req->mgmt_class);
262 goto error1;
263 }
264 } else if (mad_reg_req->mgmt_class == 0) {
265 /*
266 * Class 0 is reserved in IBA and is used for
267 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
268 */
269 dev_notice(&device->dev,
270 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
271 goto error1;
272 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
273 /*
274 * If class is in "new" vendor range,
275 * ensure supplied OUI is not zero
276 */
277 if (!is_vendor_oui(mad_reg_req->oui)) {
278 dev_notice(&device->dev,
279 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
280 mad_reg_req->mgmt_class);
281 goto error1;
282 }
283 }
284 /* Make sure class supplied is consistent with RMPP */
285 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
286 if (rmpp_version) {
287 dev_notice(&device->dev,
288 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
289 mad_reg_req->mgmt_class);
290 goto error1;
291 }
292 }
293
294 /* Make sure class supplied is consistent with QP type */
295 if (qp_type == IB_QPT_SMI) {
296 if ((mad_reg_req->mgmt_class !=
297 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
298 (mad_reg_req->mgmt_class !=
299 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
300 dev_notice(&device->dev,
301 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
302 mad_reg_req->mgmt_class);
303 goto error1;
304 }
305 } else {
306 if ((mad_reg_req->mgmt_class ==
307 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
308 (mad_reg_req->mgmt_class ==
309 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
310 dev_notice(&device->dev,
311 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
312 mad_reg_req->mgmt_class);
313 goto error1;
314 }
315 }
316 } else {
317 /* No registration request supplied */
318 if (!send_handler)
319 goto error1;
320 if (registration_flags & IB_MAD_USER_RMPP)
321 goto error1;
322 }
323
324 /* Validate device and port */
325 port_priv = ib_get_mad_port(device, port_num);
326 if (!port_priv) {
327 dev_notice(&device->dev,
328 "ib_register_mad_agent: Invalid port %d\n",
329 port_num);
330 ret = ERR_PTR(-ENODEV);
331 goto error1;
332 }
333
334 /* Verify the QP requested is supported. For example, Ethernet devices
335 * will not have QP0 */
336 if (!port_priv->qp_info[qpn].qp) {
337 dev_notice(&device->dev,
338 "ib_register_mad_agent: QP %d not supported\n", qpn);
339 ret = ERR_PTR(-EPROTONOSUPPORT);
340 goto error1;
341 }
342
343 /* Allocate structures */
344 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
345 if (!mad_agent_priv) {
346 ret = ERR_PTR(-ENOMEM);
347 goto error1;
348 }
349
350 if (mad_reg_req) {
351 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
352 if (!reg_req) {
353 ret = ERR_PTR(-ENOMEM);
354 goto error3;
355 }
356 }
357
358 /* Now, fill in the various structures */
359 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
360 mad_agent_priv->reg_req = reg_req;
361 mad_agent_priv->agent.rmpp_version = rmpp_version;
362 mad_agent_priv->agent.device = device;
363 mad_agent_priv->agent.recv_handler = recv_handler;
364 mad_agent_priv->agent.send_handler = send_handler;
365 mad_agent_priv->agent.context = context;
366 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
367 mad_agent_priv->agent.port_num = port_num;
368 mad_agent_priv->agent.flags = registration_flags;
369 spin_lock_init(&mad_agent_priv->lock);
370 INIT_LIST_HEAD(&mad_agent_priv->send_list);
371 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
372 INIT_LIST_HEAD(&mad_agent_priv->done_list);
373 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
374 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
375 INIT_LIST_HEAD(&mad_agent_priv->local_list);
376 INIT_WORK(&mad_agent_priv->local_work, local_completions);
377 atomic_set(&mad_agent_priv->refcount, 1);
378 init_completion(&mad_agent_priv->comp);
379
380 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
381 if (ret2) {
382 ret = ERR_PTR(ret2);
383 goto error4;
384 }
385
386 idr_preload(GFP_KERNEL);
387 idr_lock(&ib_mad_clients);
388 ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
389 AGENT_ID_LIMIT, GFP_ATOMIC);
390 idr_unlock(&ib_mad_clients);
391 idr_preload_end();
392
393 if (ret2 < 0) {
394 ret = ERR_PTR(ret2);
395 goto error5;
396 }
397 mad_agent_priv->agent.hi_tid = ret2;
398
399 /*
400 * Make sure MAD registration (if supplied)
401 * is non overlapping with any existing ones
402 */
403 spin_lock_irq(&port_priv->reg_lock);
404 if (mad_reg_req) {
405 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
406 if (!is_vendor_class(mgmt_class)) {
407 class = port_priv->version[mad_reg_req->
408 mgmt_class_version].class;
409 if (class) {
410 method = class->method_table[mgmt_class];
411 if (method) {
412 if (method_in_use(&method,
413 mad_reg_req))
414 goto error6;
415 }
416 }
417 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
418 mgmt_class);
419 } else {
420 /* "New" vendor class range */
421 vendor = port_priv->version[mad_reg_req->
422 mgmt_class_version].vendor;
423 if (vendor) {
424 vclass = vendor_class_index(mgmt_class);
425 vendor_class = vendor->vendor_class[vclass];
426 if (vendor_class) {
427 if (is_vendor_method_in_use(
428 vendor_class,
429 mad_reg_req))
430 goto error6;
431 }
432 }
433 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
434 }
435 if (ret2) {
436 ret = ERR_PTR(ret2);
437 goto error6;
438 }
439 }
440 spin_unlock_irq(&port_priv->reg_lock);
441
442 return &mad_agent_priv->agent;
443 error6:
444 spin_unlock_irq(&port_priv->reg_lock);
445 idr_lock(&ib_mad_clients);
446 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
447 idr_unlock(&ib_mad_clients);
448 error5:
449 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
450 error4:
451 kfree(reg_req);
452 error3:
453 kfree(mad_agent_priv);
454 error1:
455 return ret;
456 }
457 EXPORT_SYMBOL(ib_register_mad_agent);
458
is_snooping_sends(int mad_snoop_flags)459 static inline int is_snooping_sends(int mad_snoop_flags)
460 {
461 return (mad_snoop_flags &
462 (/*IB_MAD_SNOOP_POSTED_SENDS |
463 IB_MAD_SNOOP_RMPP_SENDS |*/
464 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
465 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
466 }
467
is_snooping_recvs(int mad_snoop_flags)468 static inline int is_snooping_recvs(int mad_snoop_flags)
469 {
470 return (mad_snoop_flags &
471 (IB_MAD_SNOOP_RECVS /*|
472 IB_MAD_SNOOP_RMPP_RECVS*/));
473 }
474
register_snoop_agent(struct ib_mad_qp_info * qp_info,struct ib_mad_snoop_private * mad_snoop_priv)475 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
476 struct ib_mad_snoop_private *mad_snoop_priv)
477 {
478 struct ib_mad_snoop_private **new_snoop_table;
479 unsigned long flags;
480 int i;
481
482 spin_lock_irqsave(&qp_info->snoop_lock, flags);
483 /* Check for empty slot in array. */
484 for (i = 0; i < qp_info->snoop_table_size; i++)
485 if (!qp_info->snoop_table[i])
486 break;
487
488 if (i == qp_info->snoop_table_size) {
489 /* Grow table. */
490 new_snoop_table = krealloc(qp_info->snoop_table,
491 sizeof mad_snoop_priv *
492 (qp_info->snoop_table_size + 1),
493 GFP_ATOMIC);
494 if (!new_snoop_table) {
495 i = -ENOMEM;
496 goto out;
497 }
498
499 qp_info->snoop_table = new_snoop_table;
500 qp_info->snoop_table_size++;
501 }
502 qp_info->snoop_table[i] = mad_snoop_priv;
503 atomic_inc(&qp_info->snoop_count);
504 out:
505 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
506 return i;
507 }
508
ib_register_mad_snoop(struct ib_device * device,u8 port_num,enum ib_qp_type qp_type,int mad_snoop_flags,ib_mad_snoop_handler snoop_handler,ib_mad_recv_handler recv_handler,void * context)509 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
510 u8 port_num,
511 enum ib_qp_type qp_type,
512 int mad_snoop_flags,
513 ib_mad_snoop_handler snoop_handler,
514 ib_mad_recv_handler recv_handler,
515 void *context)
516 {
517 struct ib_mad_port_private *port_priv;
518 struct ib_mad_agent *ret;
519 struct ib_mad_snoop_private *mad_snoop_priv;
520 int qpn;
521 int err;
522
523 /* Validate parameters */
524 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
525 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
526 ret = ERR_PTR(-EINVAL);
527 goto error1;
528 }
529 qpn = get_spl_qp_index(qp_type);
530 if (qpn == -1) {
531 ret = ERR_PTR(-EINVAL);
532 goto error1;
533 }
534 port_priv = ib_get_mad_port(device, port_num);
535 if (!port_priv) {
536 ret = ERR_PTR(-ENODEV);
537 goto error1;
538 }
539 /* Allocate structures */
540 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
541 if (!mad_snoop_priv) {
542 ret = ERR_PTR(-ENOMEM);
543 goto error1;
544 }
545
546 /* Now, fill in the various structures */
547 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
548 mad_snoop_priv->agent.device = device;
549 mad_snoop_priv->agent.recv_handler = recv_handler;
550 mad_snoop_priv->agent.snoop_handler = snoop_handler;
551 mad_snoop_priv->agent.context = context;
552 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
553 mad_snoop_priv->agent.port_num = port_num;
554 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
555 init_completion(&mad_snoop_priv->comp);
556
557 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
558 if (err) {
559 ret = ERR_PTR(err);
560 goto error2;
561 }
562
563 mad_snoop_priv->snoop_index = register_snoop_agent(
564 &port_priv->qp_info[qpn],
565 mad_snoop_priv);
566 if (mad_snoop_priv->snoop_index < 0) {
567 ret = ERR_PTR(mad_snoop_priv->snoop_index);
568 goto error3;
569 }
570
571 atomic_set(&mad_snoop_priv->refcount, 1);
572 return &mad_snoop_priv->agent;
573 error3:
574 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
575 error2:
576 kfree(mad_snoop_priv);
577 error1:
578 return ret;
579 }
580 EXPORT_SYMBOL(ib_register_mad_snoop);
581
deref_mad_agent(struct ib_mad_agent_private * mad_agent_priv)582 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
583 {
584 if (atomic_dec_and_test(&mad_agent_priv->refcount))
585 complete(&mad_agent_priv->comp);
586 }
587
deref_snoop_agent(struct ib_mad_snoop_private * mad_snoop_priv)588 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
589 {
590 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
591 complete(&mad_snoop_priv->comp);
592 }
593
unregister_mad_agent(struct ib_mad_agent_private * mad_agent_priv)594 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
595 {
596 struct ib_mad_port_private *port_priv;
597
598 /* Note that we could still be handling received MADs */
599
600 /*
601 * Canceling all sends results in dropping received response
602 * MADs, preventing us from queuing additional work
603 */
604 cancel_mads(mad_agent_priv);
605 port_priv = mad_agent_priv->qp_info->port_priv;
606 cancel_delayed_work(&mad_agent_priv->timed_work);
607
608 spin_lock_irq(&port_priv->reg_lock);
609 remove_mad_reg_req(mad_agent_priv);
610 spin_unlock_irq(&port_priv->reg_lock);
611 idr_lock(&ib_mad_clients);
612 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
613 idr_unlock(&ib_mad_clients);
614
615 flush_workqueue(port_priv->wq);
616 ib_cancel_rmpp_recvs(mad_agent_priv);
617
618 deref_mad_agent(mad_agent_priv);
619 wait_for_completion(&mad_agent_priv->comp);
620
621 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
622
623 kfree(mad_agent_priv->reg_req);
624 kfree_rcu(mad_agent_priv, rcu);
625 }
626
unregister_mad_snoop(struct ib_mad_snoop_private * mad_snoop_priv)627 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
628 {
629 struct ib_mad_qp_info *qp_info;
630 unsigned long flags;
631
632 qp_info = mad_snoop_priv->qp_info;
633 spin_lock_irqsave(&qp_info->snoop_lock, flags);
634 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
635 atomic_dec(&qp_info->snoop_count);
636 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
637
638 deref_snoop_agent(mad_snoop_priv);
639 wait_for_completion(&mad_snoop_priv->comp);
640
641 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
642
643 kfree(mad_snoop_priv);
644 }
645
646 /*
647 * ib_unregister_mad_agent - Unregisters a client from using MAD services
648 *
649 * Context: Process context.
650 */
ib_unregister_mad_agent(struct ib_mad_agent * mad_agent)651 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
652 {
653 struct ib_mad_agent_private *mad_agent_priv;
654 struct ib_mad_snoop_private *mad_snoop_priv;
655
656 /* If the TID is zero, the agent can only snoop. */
657 if (mad_agent->hi_tid) {
658 mad_agent_priv = container_of(mad_agent,
659 struct ib_mad_agent_private,
660 agent);
661 unregister_mad_agent(mad_agent_priv);
662 } else {
663 mad_snoop_priv = container_of(mad_agent,
664 struct ib_mad_snoop_private,
665 agent);
666 unregister_mad_snoop(mad_snoop_priv);
667 }
668 }
669 EXPORT_SYMBOL(ib_unregister_mad_agent);
670
dequeue_mad(struct ib_mad_list_head * mad_list)671 static void dequeue_mad(struct ib_mad_list_head *mad_list)
672 {
673 struct ib_mad_queue *mad_queue;
674 unsigned long flags;
675
676 mad_queue = mad_list->mad_queue;
677 spin_lock_irqsave(&mad_queue->lock, flags);
678 list_del(&mad_list->list);
679 mad_queue->count--;
680 spin_unlock_irqrestore(&mad_queue->lock, flags);
681 }
682
snoop_send(struct ib_mad_qp_info * qp_info,struct ib_mad_send_buf * send_buf,struct ib_mad_send_wc * mad_send_wc,int mad_snoop_flags)683 static void snoop_send(struct ib_mad_qp_info *qp_info,
684 struct ib_mad_send_buf *send_buf,
685 struct ib_mad_send_wc *mad_send_wc,
686 int mad_snoop_flags)
687 {
688 struct ib_mad_snoop_private *mad_snoop_priv;
689 unsigned long flags;
690 int i;
691
692 spin_lock_irqsave(&qp_info->snoop_lock, flags);
693 for (i = 0; i < qp_info->snoop_table_size; i++) {
694 mad_snoop_priv = qp_info->snoop_table[i];
695 if (!mad_snoop_priv ||
696 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
697 continue;
698
699 atomic_inc(&mad_snoop_priv->refcount);
700 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
701 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
702 send_buf, mad_send_wc);
703 deref_snoop_agent(mad_snoop_priv);
704 spin_lock_irqsave(&qp_info->snoop_lock, flags);
705 }
706 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
707 }
708
snoop_recv(struct ib_mad_qp_info * qp_info,struct ib_mad_recv_wc * mad_recv_wc,int mad_snoop_flags)709 static void snoop_recv(struct ib_mad_qp_info *qp_info,
710 struct ib_mad_recv_wc *mad_recv_wc,
711 int mad_snoop_flags)
712 {
713 struct ib_mad_snoop_private *mad_snoop_priv;
714 unsigned long flags;
715 int i;
716
717 spin_lock_irqsave(&qp_info->snoop_lock, flags);
718 for (i = 0; i < qp_info->snoop_table_size; i++) {
719 mad_snoop_priv = qp_info->snoop_table[i];
720 if (!mad_snoop_priv ||
721 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
722 continue;
723
724 atomic_inc(&mad_snoop_priv->refcount);
725 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
726 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
727 mad_recv_wc);
728 deref_snoop_agent(mad_snoop_priv);
729 spin_lock_irqsave(&qp_info->snoop_lock, flags);
730 }
731 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
732 }
733
build_smp_wc(struct ib_qp * qp,struct ib_cqe * cqe,u16 slid,u16 pkey_index,u8 port_num,struct ib_wc * wc)734 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
735 u16 pkey_index, u8 port_num, struct ib_wc *wc)
736 {
737 memset(wc, 0, sizeof *wc);
738 wc->wr_cqe = cqe;
739 wc->status = IB_WC_SUCCESS;
740 wc->opcode = IB_WC_RECV;
741 wc->pkey_index = pkey_index;
742 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
743 wc->src_qp = IB_QP0;
744 wc->qp = qp;
745 wc->slid = slid;
746 wc->sl = 0;
747 wc->dlid_path_bits = 0;
748 wc->port_num = port_num;
749 }
750
mad_priv_size(const struct ib_mad_private * mp)751 static size_t mad_priv_size(const struct ib_mad_private *mp)
752 {
753 return sizeof(struct ib_mad_private) + mp->mad_size;
754 }
755
alloc_mad_private(size_t mad_size,gfp_t flags)756 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
757 {
758 size_t size = sizeof(struct ib_mad_private) + mad_size;
759 struct ib_mad_private *ret = kzalloc(size, flags);
760
761 if (ret)
762 ret->mad_size = mad_size;
763
764 return ret;
765 }
766
port_mad_size(const struct ib_mad_port_private * port_priv)767 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
768 {
769 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
770 }
771
mad_priv_dma_size(const struct ib_mad_private * mp)772 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
773 {
774 return sizeof(struct ib_grh) + mp->mad_size;
775 }
776
777 /*
778 * Return 0 if SMP is to be sent
779 * Return 1 if SMP was consumed locally (whether or not solicited)
780 * Return < 0 if error
781 */
handle_outgoing_dr_smp(struct ib_mad_agent_private * mad_agent_priv,struct ib_mad_send_wr_private * mad_send_wr)782 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
783 struct ib_mad_send_wr_private *mad_send_wr)
784 {
785 int ret = 0;
786 struct ib_smp *smp = mad_send_wr->send_buf.mad;
787 struct opa_smp *opa_smp = (struct opa_smp *)smp;
788 unsigned long flags;
789 struct ib_mad_local_private *local;
790 struct ib_mad_private *mad_priv;
791 struct ib_mad_port_private *port_priv;
792 struct ib_mad_agent_private *recv_mad_agent = NULL;
793 struct ib_device *device = mad_agent_priv->agent.device;
794 u8 port_num;
795 struct ib_wc mad_wc;
796 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
797 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
798 u16 out_mad_pkey_index = 0;
799 u16 drslid;
800 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
801 mad_agent_priv->qp_info->port_priv->port_num);
802
803 if (rdma_cap_ib_switch(device) &&
804 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
805 port_num = send_wr->port_num;
806 else
807 port_num = mad_agent_priv->agent.port_num;
808
809 /*
810 * Directed route handling starts if the initial LID routed part of
811 * a request or the ending LID routed part of a response is empty.
812 * If we are at the start of the LID routed part, don't update the
813 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
814 */
815 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
816 u32 opa_drslid;
817
818 if ((opa_get_smp_direction(opa_smp)
819 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
820 OPA_LID_PERMISSIVE &&
821 opa_smi_handle_dr_smp_send(opa_smp,
822 rdma_cap_ib_switch(device),
823 port_num) == IB_SMI_DISCARD) {
824 ret = -EINVAL;
825 dev_err(&device->dev, "OPA Invalid directed route\n");
826 goto out;
827 }
828 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
829 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
830 opa_drslid & 0xffff0000) {
831 ret = -EINVAL;
832 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
833 opa_drslid);
834 goto out;
835 }
836 drslid = (u16)(opa_drslid & 0x0000ffff);
837
838 /* Check to post send on QP or process locally */
839 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
840 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
841 goto out;
842 } else {
843 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
844 IB_LID_PERMISSIVE &&
845 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
846 IB_SMI_DISCARD) {
847 ret = -EINVAL;
848 dev_err(&device->dev, "Invalid directed route\n");
849 goto out;
850 }
851 drslid = be16_to_cpu(smp->dr_slid);
852
853 /* Check to post send on QP or process locally */
854 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
855 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
856 goto out;
857 }
858
859 local = kmalloc(sizeof *local, GFP_ATOMIC);
860 if (!local) {
861 ret = -ENOMEM;
862 goto out;
863 }
864 local->mad_priv = NULL;
865 local->recv_mad_agent = NULL;
866 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
867 if (!mad_priv) {
868 ret = -ENOMEM;
869 kfree(local);
870 goto out;
871 }
872
873 build_smp_wc(mad_agent_priv->agent.qp,
874 send_wr->wr.wr_cqe, drslid,
875 send_wr->pkey_index,
876 send_wr->port_num, &mad_wc);
877
878 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
879 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
880 + mad_send_wr->send_buf.data_len
881 + sizeof(struct ib_grh);
882 }
883
884 /* No GRH for DR SMP */
885 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
886 (const struct ib_mad_hdr *)smp, mad_size,
887 (struct ib_mad_hdr *)mad_priv->mad,
888 &mad_size, &out_mad_pkey_index);
889 switch (ret)
890 {
891 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
892 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
893 mad_agent_priv->agent.recv_handler) {
894 local->mad_priv = mad_priv;
895 local->recv_mad_agent = mad_agent_priv;
896 /*
897 * Reference MAD agent until receive
898 * side of local completion handled
899 */
900 atomic_inc(&mad_agent_priv->refcount);
901 } else
902 kfree(mad_priv);
903 break;
904 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
905 kfree(mad_priv);
906 break;
907 case IB_MAD_RESULT_SUCCESS:
908 /* Treat like an incoming receive MAD */
909 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
910 mad_agent_priv->agent.port_num);
911 if (port_priv) {
912 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
913 recv_mad_agent = find_mad_agent(port_priv,
914 (const struct ib_mad_hdr *)mad_priv->mad);
915 }
916 if (!port_priv || !recv_mad_agent) {
917 /*
918 * No receiving agent so drop packet and
919 * generate send completion.
920 */
921 kfree(mad_priv);
922 break;
923 }
924 local->mad_priv = mad_priv;
925 local->recv_mad_agent = recv_mad_agent;
926 break;
927 default:
928 kfree(mad_priv);
929 kfree(local);
930 ret = -EINVAL;
931 goto out;
932 }
933
934 local->mad_send_wr = mad_send_wr;
935 if (opa) {
936 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
937 local->return_wc_byte_len = mad_size;
938 }
939 /* Reference MAD agent until send side of local completion handled */
940 atomic_inc(&mad_agent_priv->refcount);
941 /* Queue local completion to local list */
942 spin_lock_irqsave(&mad_agent_priv->lock, flags);
943 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
944 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
945 queue_work(mad_agent_priv->qp_info->port_priv->wq,
946 &mad_agent_priv->local_work);
947 ret = 1;
948 out:
949 return ret;
950 }
951
get_pad_size(int hdr_len,int data_len,size_t mad_size)952 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
953 {
954 int seg_size, pad;
955
956 seg_size = mad_size - hdr_len;
957 if (data_len && seg_size) {
958 pad = seg_size - data_len % seg_size;
959 return pad == seg_size ? 0 : pad;
960 } else
961 return seg_size;
962 }
963
free_send_rmpp_list(struct ib_mad_send_wr_private * mad_send_wr)964 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
965 {
966 struct ib_rmpp_segment *s, *t;
967
968 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
969 list_del(&s->list);
970 kfree(s);
971 }
972 }
973
alloc_send_rmpp_list(struct ib_mad_send_wr_private * send_wr,size_t mad_size,gfp_t gfp_mask)974 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
975 size_t mad_size, gfp_t gfp_mask)
976 {
977 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
978 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
979 struct ib_rmpp_segment *seg = NULL;
980 int left, seg_size, pad;
981
982 send_buf->seg_size = mad_size - send_buf->hdr_len;
983 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
984 seg_size = send_buf->seg_size;
985 pad = send_wr->pad;
986
987 /* Allocate data segments. */
988 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
989 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
990 if (!seg) {
991 free_send_rmpp_list(send_wr);
992 return -ENOMEM;
993 }
994 seg->num = ++send_buf->seg_count;
995 list_add_tail(&seg->list, &send_wr->rmpp_list);
996 }
997
998 /* Zero any padding */
999 if (pad)
1000 memset(seg->data + seg_size - pad, 0, pad);
1001
1002 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
1003 agent.rmpp_version;
1004 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
1005 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
1006
1007 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
1008 struct ib_rmpp_segment, list);
1009 send_wr->last_ack_seg = send_wr->cur_seg;
1010 return 0;
1011 }
1012
ib_mad_kernel_rmpp_agent(const struct ib_mad_agent * agent)1013 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1014 {
1015 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
1016 }
1017 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1018
ib_create_send_mad(struct ib_mad_agent * mad_agent,u32 remote_qpn,u16 pkey_index,int rmpp_active,int hdr_len,int data_len,gfp_t gfp_mask,u8 base_version)1019 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1020 u32 remote_qpn, u16 pkey_index,
1021 int rmpp_active,
1022 int hdr_len, int data_len,
1023 gfp_t gfp_mask,
1024 u8 base_version)
1025 {
1026 struct ib_mad_agent_private *mad_agent_priv;
1027 struct ib_mad_send_wr_private *mad_send_wr;
1028 int pad, message_size, ret, size;
1029 void *buf;
1030 size_t mad_size;
1031 bool opa;
1032
1033 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1034 agent);
1035
1036 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1037
1038 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1039 mad_size = sizeof(struct opa_mad);
1040 else
1041 mad_size = sizeof(struct ib_mad);
1042
1043 pad = get_pad_size(hdr_len, data_len, mad_size);
1044 message_size = hdr_len + data_len + pad;
1045
1046 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1047 if (!rmpp_active && message_size > mad_size)
1048 return ERR_PTR(-EINVAL);
1049 } else
1050 if (rmpp_active || message_size > mad_size)
1051 return ERR_PTR(-EINVAL);
1052
1053 size = rmpp_active ? hdr_len : mad_size;
1054 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1055 if (!buf)
1056 return ERR_PTR(-ENOMEM);
1057
1058 mad_send_wr = buf + size;
1059 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1060 mad_send_wr->send_buf.mad = buf;
1061 mad_send_wr->send_buf.hdr_len = hdr_len;
1062 mad_send_wr->send_buf.data_len = data_len;
1063 mad_send_wr->pad = pad;
1064
1065 mad_send_wr->mad_agent_priv = mad_agent_priv;
1066 mad_send_wr->sg_list[0].length = hdr_len;
1067 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1068
1069 /* OPA MADs don't have to be the full 2048 bytes */
1070 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1071 data_len < mad_size - hdr_len)
1072 mad_send_wr->sg_list[1].length = data_len;
1073 else
1074 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1075
1076 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1077
1078 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1079
1080 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1081 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1082 mad_send_wr->send_wr.wr.num_sge = 2;
1083 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1084 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1085 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1086 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1087 mad_send_wr->send_wr.pkey_index = pkey_index;
1088
1089 if (rmpp_active) {
1090 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1091 if (ret) {
1092 kfree(buf);
1093 return ERR_PTR(ret);
1094 }
1095 }
1096
1097 mad_send_wr->send_buf.mad_agent = mad_agent;
1098 atomic_inc(&mad_agent_priv->refcount);
1099 return &mad_send_wr->send_buf;
1100 }
1101 EXPORT_SYMBOL(ib_create_send_mad);
1102
ib_get_mad_data_offset(u8 mgmt_class)1103 int ib_get_mad_data_offset(u8 mgmt_class)
1104 {
1105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1106 return IB_MGMT_SA_HDR;
1107 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1108 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1109 (mgmt_class == IB_MGMT_CLASS_BIS))
1110 return IB_MGMT_DEVICE_HDR;
1111 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1112 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1113 return IB_MGMT_VENDOR_HDR;
1114 else
1115 return IB_MGMT_MAD_HDR;
1116 }
1117 EXPORT_SYMBOL(ib_get_mad_data_offset);
1118
ib_is_mad_class_rmpp(u8 mgmt_class)1119 int ib_is_mad_class_rmpp(u8 mgmt_class)
1120 {
1121 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1122 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1123 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1124 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1125 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1126 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1127 return 1;
1128 return 0;
1129 }
1130 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1131
ib_get_rmpp_segment(struct ib_mad_send_buf * send_buf,int seg_num)1132 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1133 {
1134 struct ib_mad_send_wr_private *mad_send_wr;
1135 struct list_head *list;
1136
1137 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1138 send_buf);
1139 list = &mad_send_wr->cur_seg->list;
1140
1141 if (mad_send_wr->cur_seg->num < seg_num) {
1142 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1143 if (mad_send_wr->cur_seg->num == seg_num)
1144 break;
1145 } else if (mad_send_wr->cur_seg->num > seg_num) {
1146 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1147 if (mad_send_wr->cur_seg->num == seg_num)
1148 break;
1149 }
1150 return mad_send_wr->cur_seg->data;
1151 }
1152 EXPORT_SYMBOL(ib_get_rmpp_segment);
1153
ib_get_payload(struct ib_mad_send_wr_private * mad_send_wr)1154 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1155 {
1156 if (mad_send_wr->send_buf.seg_count)
1157 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1158 mad_send_wr->seg_num);
1159 else
1160 return mad_send_wr->send_buf.mad +
1161 mad_send_wr->send_buf.hdr_len;
1162 }
1163
ib_free_send_mad(struct ib_mad_send_buf * send_buf)1164 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1165 {
1166 struct ib_mad_agent_private *mad_agent_priv;
1167 struct ib_mad_send_wr_private *mad_send_wr;
1168
1169 mad_agent_priv = container_of(send_buf->mad_agent,
1170 struct ib_mad_agent_private, agent);
1171 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1172 send_buf);
1173
1174 free_send_rmpp_list(mad_send_wr);
1175 kfree(send_buf->mad);
1176 deref_mad_agent(mad_agent_priv);
1177 }
1178 EXPORT_SYMBOL(ib_free_send_mad);
1179
ib_send_mad(struct ib_mad_send_wr_private * mad_send_wr)1180 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1181 {
1182 struct ib_mad_qp_info *qp_info;
1183 struct list_head *list;
1184 struct ib_mad_agent *mad_agent;
1185 struct ib_sge *sge;
1186 unsigned long flags;
1187 int ret;
1188
1189 /* Set WR ID to find mad_send_wr upon completion */
1190 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1191 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1192 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1193 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1194
1195 mad_agent = mad_send_wr->send_buf.mad_agent;
1196 sge = mad_send_wr->sg_list;
1197 sge[0].addr = ib_dma_map_single(mad_agent->device,
1198 mad_send_wr->send_buf.mad,
1199 sge[0].length,
1200 DMA_TO_DEVICE);
1201 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1202 return -ENOMEM;
1203
1204 mad_send_wr->header_mapping = sge[0].addr;
1205
1206 sge[1].addr = ib_dma_map_single(mad_agent->device,
1207 ib_get_payload(mad_send_wr),
1208 sge[1].length,
1209 DMA_TO_DEVICE);
1210 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1211 ib_dma_unmap_single(mad_agent->device,
1212 mad_send_wr->header_mapping,
1213 sge[0].length, DMA_TO_DEVICE);
1214 return -ENOMEM;
1215 }
1216 mad_send_wr->payload_mapping = sge[1].addr;
1217
1218 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1219 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1220 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1221 NULL);
1222 list = &qp_info->send_queue.list;
1223 } else {
1224 ret = 0;
1225 list = &qp_info->overflow_list;
1226 }
1227
1228 if (!ret) {
1229 qp_info->send_queue.count++;
1230 list_add_tail(&mad_send_wr->mad_list.list, list);
1231 }
1232 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1233 if (ret) {
1234 ib_dma_unmap_single(mad_agent->device,
1235 mad_send_wr->header_mapping,
1236 sge[0].length, DMA_TO_DEVICE);
1237 ib_dma_unmap_single(mad_agent->device,
1238 mad_send_wr->payload_mapping,
1239 sge[1].length, DMA_TO_DEVICE);
1240 }
1241 return ret;
1242 }
1243
1244 /*
1245 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1246 * with the registered client
1247 */
ib_post_send_mad(struct ib_mad_send_buf * send_buf,struct ib_mad_send_buf ** bad_send_buf)1248 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1249 struct ib_mad_send_buf **bad_send_buf)
1250 {
1251 struct ib_mad_agent_private *mad_agent_priv;
1252 struct ib_mad_send_buf *next_send_buf;
1253 struct ib_mad_send_wr_private *mad_send_wr;
1254 unsigned long flags;
1255 int ret = -EINVAL;
1256
1257 /* Walk list of send WRs and post each on send list */
1258 for (; send_buf; send_buf = next_send_buf) {
1259 mad_send_wr = container_of(send_buf,
1260 struct ib_mad_send_wr_private,
1261 send_buf);
1262 mad_agent_priv = mad_send_wr->mad_agent_priv;
1263
1264 ret = ib_mad_enforce_security(mad_agent_priv,
1265 mad_send_wr->send_wr.pkey_index);
1266 if (ret)
1267 goto error;
1268
1269 if (!send_buf->mad_agent->send_handler ||
1270 (send_buf->timeout_ms &&
1271 !send_buf->mad_agent->recv_handler)) {
1272 ret = -EINVAL;
1273 goto error;
1274 }
1275
1276 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1277 if (mad_agent_priv->agent.rmpp_version) {
1278 ret = -EINVAL;
1279 goto error;
1280 }
1281 }
1282
1283 /*
1284 * Save pointer to next work request to post in case the
1285 * current one completes, and the user modifies the work
1286 * request associated with the completion
1287 */
1288 next_send_buf = send_buf->next;
1289 mad_send_wr->send_wr.ah = send_buf->ah;
1290
1291 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1292 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1293 ret = handle_outgoing_dr_smp(mad_agent_priv,
1294 mad_send_wr);
1295 if (ret < 0) /* error */
1296 goto error;
1297 else if (ret == 1) /* locally consumed */
1298 continue;
1299 }
1300
1301 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1302 /* Timeout will be updated after send completes */
1303 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1304 mad_send_wr->max_retries = send_buf->retries;
1305 mad_send_wr->retries_left = send_buf->retries;
1306 send_buf->retries = 0;
1307 /* Reference for work request to QP + response */
1308 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1309 mad_send_wr->status = IB_WC_SUCCESS;
1310
1311 /* Reference MAD agent until send completes */
1312 atomic_inc(&mad_agent_priv->refcount);
1313 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1314 list_add_tail(&mad_send_wr->agent_list,
1315 &mad_agent_priv->send_list);
1316 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1317
1318 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1319 ret = ib_send_rmpp_mad(mad_send_wr);
1320 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1321 ret = ib_send_mad(mad_send_wr);
1322 } else
1323 ret = ib_send_mad(mad_send_wr);
1324 if (ret < 0) {
1325 /* Fail send request */
1326 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1327 list_del(&mad_send_wr->agent_list);
1328 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1329 atomic_dec(&mad_agent_priv->refcount);
1330 goto error;
1331 }
1332 }
1333 return 0;
1334 error:
1335 if (bad_send_buf)
1336 *bad_send_buf = send_buf;
1337 return ret;
1338 }
1339 EXPORT_SYMBOL(ib_post_send_mad);
1340
1341 /*
1342 * ib_free_recv_mad - Returns data buffers used to receive
1343 * a MAD to the access layer
1344 */
ib_free_recv_mad(struct ib_mad_recv_wc * mad_recv_wc)1345 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1346 {
1347 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1348 struct ib_mad_private_header *mad_priv_hdr;
1349 struct ib_mad_private *priv;
1350 struct list_head free_list;
1351
1352 INIT_LIST_HEAD(&free_list);
1353 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1354
1355 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1356 &free_list, list) {
1357 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1358 recv_buf);
1359 mad_priv_hdr = container_of(mad_recv_wc,
1360 struct ib_mad_private_header,
1361 recv_wc);
1362 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1363 header);
1364 kfree(priv);
1365 }
1366 }
1367 EXPORT_SYMBOL(ib_free_recv_mad);
1368
ib_redirect_mad_qp(struct ib_qp * qp,u8 rmpp_version,ib_mad_send_handler send_handler,ib_mad_recv_handler recv_handler,void * context)1369 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1370 u8 rmpp_version,
1371 ib_mad_send_handler send_handler,
1372 ib_mad_recv_handler recv_handler,
1373 void *context)
1374 {
1375 return ERR_PTR(-EINVAL); /* XXX: for now */
1376 }
1377 EXPORT_SYMBOL(ib_redirect_mad_qp);
1378
ib_process_mad_wc(struct ib_mad_agent * mad_agent,struct ib_wc * wc)1379 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1380 struct ib_wc *wc)
1381 {
1382 dev_err(&mad_agent->device->dev,
1383 "ib_process_mad_wc() not implemented yet\n");
1384 return 0;
1385 }
1386 EXPORT_SYMBOL(ib_process_mad_wc);
1387
method_in_use(struct ib_mad_mgmt_method_table ** method,struct ib_mad_reg_req * mad_reg_req)1388 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1389 struct ib_mad_reg_req *mad_reg_req)
1390 {
1391 int i;
1392
1393 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1394 if ((*method)->agent[i]) {
1395 pr_err("Method %d already in use\n", i);
1396 return -EINVAL;
1397 }
1398 }
1399 return 0;
1400 }
1401
allocate_method_table(struct ib_mad_mgmt_method_table ** method)1402 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1403 {
1404 /* Allocate management method table */
1405 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1406 return (*method) ? 0 : (-ENOMEM);
1407 }
1408
1409 /*
1410 * Check to see if there are any methods still in use
1411 */
check_method_table(struct ib_mad_mgmt_method_table * method)1412 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1413 {
1414 int i;
1415
1416 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1417 if (method->agent[i])
1418 return 1;
1419 return 0;
1420 }
1421
1422 /*
1423 * Check to see if there are any method tables for this class still in use
1424 */
check_class_table(struct ib_mad_mgmt_class_table * class)1425 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1426 {
1427 int i;
1428
1429 for (i = 0; i < MAX_MGMT_CLASS; i++)
1430 if (class->method_table[i])
1431 return 1;
1432 return 0;
1433 }
1434
check_vendor_class(struct ib_mad_mgmt_vendor_class * vendor_class)1435 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1436 {
1437 int i;
1438
1439 for (i = 0; i < MAX_MGMT_OUI; i++)
1440 if (vendor_class->method_table[i])
1441 return 1;
1442 return 0;
1443 }
1444
find_vendor_oui(struct ib_mad_mgmt_vendor_class * vendor_class,const char * oui)1445 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1446 const char *oui)
1447 {
1448 int i;
1449
1450 for (i = 0; i < MAX_MGMT_OUI; i++)
1451 /* Is there matching OUI for this vendor class ? */
1452 if (!memcmp(vendor_class->oui[i], oui, 3))
1453 return i;
1454
1455 return -1;
1456 }
1457
check_vendor_table(struct ib_mad_mgmt_vendor_class_table * vendor)1458 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1459 {
1460 int i;
1461
1462 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1463 if (vendor->vendor_class[i])
1464 return 1;
1465
1466 return 0;
1467 }
1468
remove_methods_mad_agent(struct ib_mad_mgmt_method_table * method,struct ib_mad_agent_private * agent)1469 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1470 struct ib_mad_agent_private *agent)
1471 {
1472 int i;
1473
1474 /* Remove any methods for this mad agent */
1475 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1476 if (method->agent[i] == agent) {
1477 method->agent[i] = NULL;
1478 }
1479 }
1480 }
1481
add_nonoui_reg_req(struct ib_mad_reg_req * mad_reg_req,struct ib_mad_agent_private * agent_priv,u8 mgmt_class)1482 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1483 struct ib_mad_agent_private *agent_priv,
1484 u8 mgmt_class)
1485 {
1486 struct ib_mad_port_private *port_priv;
1487 struct ib_mad_mgmt_class_table **class;
1488 struct ib_mad_mgmt_method_table **method;
1489 int i, ret;
1490
1491 port_priv = agent_priv->qp_info->port_priv;
1492 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1493 if (!*class) {
1494 /* Allocate management class table for "new" class version */
1495 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1496 if (!*class) {
1497 ret = -ENOMEM;
1498 goto error1;
1499 }
1500
1501 /* Allocate method table for this management class */
1502 method = &(*class)->method_table[mgmt_class];
1503 if ((ret = allocate_method_table(method)))
1504 goto error2;
1505 } else {
1506 method = &(*class)->method_table[mgmt_class];
1507 if (!*method) {
1508 /* Allocate method table for this management class */
1509 if ((ret = allocate_method_table(method)))
1510 goto error1;
1511 }
1512 }
1513
1514 /* Now, make sure methods are not already in use */
1515 if (method_in_use(method, mad_reg_req))
1516 goto error3;
1517
1518 /* Finally, add in methods being registered */
1519 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1520 (*method)->agent[i] = agent_priv;
1521
1522 return 0;
1523
1524 error3:
1525 /* Remove any methods for this mad agent */
1526 remove_methods_mad_agent(*method, agent_priv);
1527 /* Now, check to see if there are any methods in use */
1528 if (!check_method_table(*method)) {
1529 /* If not, release management method table */
1530 kfree(*method);
1531 *method = NULL;
1532 }
1533 ret = -EINVAL;
1534 goto error1;
1535 error2:
1536 kfree(*class);
1537 *class = NULL;
1538 error1:
1539 return ret;
1540 }
1541
add_oui_reg_req(struct ib_mad_reg_req * mad_reg_req,struct ib_mad_agent_private * agent_priv)1542 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1543 struct ib_mad_agent_private *agent_priv)
1544 {
1545 struct ib_mad_port_private *port_priv;
1546 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1547 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1548 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1549 struct ib_mad_mgmt_method_table **method;
1550 int i, ret = -ENOMEM;
1551 u8 vclass;
1552
1553 /* "New" vendor (with OUI) class */
1554 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1555 port_priv = agent_priv->qp_info->port_priv;
1556 vendor_table = &port_priv->version[
1557 mad_reg_req->mgmt_class_version].vendor;
1558 if (!*vendor_table) {
1559 /* Allocate mgmt vendor class table for "new" class version */
1560 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1561 if (!vendor)
1562 goto error1;
1563
1564 *vendor_table = vendor;
1565 }
1566 if (!(*vendor_table)->vendor_class[vclass]) {
1567 /* Allocate table for this management vendor class */
1568 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1569 if (!vendor_class)
1570 goto error2;
1571
1572 (*vendor_table)->vendor_class[vclass] = vendor_class;
1573 }
1574 for (i = 0; i < MAX_MGMT_OUI; i++) {
1575 /* Is there matching OUI for this vendor class ? */
1576 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1577 mad_reg_req->oui, 3)) {
1578 method = &(*vendor_table)->vendor_class[
1579 vclass]->method_table[i];
1580 if (!*method)
1581 goto error3;
1582 goto check_in_use;
1583 }
1584 }
1585 for (i = 0; i < MAX_MGMT_OUI; i++) {
1586 /* OUI slot available ? */
1587 if (!is_vendor_oui((*vendor_table)->vendor_class[
1588 vclass]->oui[i])) {
1589 method = &(*vendor_table)->vendor_class[
1590 vclass]->method_table[i];
1591 /* Allocate method table for this OUI */
1592 if (!*method) {
1593 ret = allocate_method_table(method);
1594 if (ret)
1595 goto error3;
1596 }
1597 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1598 mad_reg_req->oui, 3);
1599 goto check_in_use;
1600 }
1601 }
1602 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1603 goto error3;
1604
1605 check_in_use:
1606 /* Now, make sure methods are not already in use */
1607 if (method_in_use(method, mad_reg_req))
1608 goto error4;
1609
1610 /* Finally, add in methods being registered */
1611 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1612 (*method)->agent[i] = agent_priv;
1613
1614 return 0;
1615
1616 error4:
1617 /* Remove any methods for this mad agent */
1618 remove_methods_mad_agent(*method, agent_priv);
1619 /* Now, check to see if there are any methods in use */
1620 if (!check_method_table(*method)) {
1621 /* If not, release management method table */
1622 kfree(*method);
1623 *method = NULL;
1624 }
1625 ret = -EINVAL;
1626 error3:
1627 if (vendor_class) {
1628 (*vendor_table)->vendor_class[vclass] = NULL;
1629 kfree(vendor_class);
1630 }
1631 error2:
1632 if (vendor) {
1633 *vendor_table = NULL;
1634 kfree(vendor);
1635 }
1636 error1:
1637 return ret;
1638 }
1639
remove_mad_reg_req(struct ib_mad_agent_private * agent_priv)1640 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1641 {
1642 struct ib_mad_port_private *port_priv;
1643 struct ib_mad_mgmt_class_table *class;
1644 struct ib_mad_mgmt_method_table *method;
1645 struct ib_mad_mgmt_vendor_class_table *vendor;
1646 struct ib_mad_mgmt_vendor_class *vendor_class;
1647 int index;
1648 u8 mgmt_class;
1649
1650 /*
1651 * Was MAD registration request supplied
1652 * with original registration ?
1653 */
1654 if (!agent_priv->reg_req) {
1655 goto out;
1656 }
1657
1658 port_priv = agent_priv->qp_info->port_priv;
1659 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1660 class = port_priv->version[
1661 agent_priv->reg_req->mgmt_class_version].class;
1662 if (!class)
1663 goto vendor_check;
1664
1665 method = class->method_table[mgmt_class];
1666 if (method) {
1667 /* Remove any methods for this mad agent */
1668 remove_methods_mad_agent(method, agent_priv);
1669 /* Now, check to see if there are any methods still in use */
1670 if (!check_method_table(method)) {
1671 /* If not, release management method table */
1672 kfree(method);
1673 class->method_table[mgmt_class] = NULL;
1674 /* Any management classes left ? */
1675 if (!check_class_table(class)) {
1676 /* If not, release management class table */
1677 kfree(class);
1678 port_priv->version[
1679 agent_priv->reg_req->
1680 mgmt_class_version].class = NULL;
1681 }
1682 }
1683 }
1684
1685 vendor_check:
1686 if (!is_vendor_class(mgmt_class))
1687 goto out;
1688
1689 /* normalize mgmt_class to vendor range 2 */
1690 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1691 vendor = port_priv->version[
1692 agent_priv->reg_req->mgmt_class_version].vendor;
1693
1694 if (!vendor)
1695 goto out;
1696
1697 vendor_class = vendor->vendor_class[mgmt_class];
1698 if (vendor_class) {
1699 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1700 if (index < 0)
1701 goto out;
1702 method = vendor_class->method_table[index];
1703 if (method) {
1704 /* Remove any methods for this mad agent */
1705 remove_methods_mad_agent(method, agent_priv);
1706 /*
1707 * Now, check to see if there are
1708 * any methods still in use
1709 */
1710 if (!check_method_table(method)) {
1711 /* If not, release management method table */
1712 kfree(method);
1713 vendor_class->method_table[index] = NULL;
1714 memset(vendor_class->oui[index], 0, 3);
1715 /* Any OUIs left ? */
1716 if (!check_vendor_class(vendor_class)) {
1717 /* If not, release vendor class table */
1718 kfree(vendor_class);
1719 vendor->vendor_class[mgmt_class] = NULL;
1720 /* Any other vendor classes left ? */
1721 if (!check_vendor_table(vendor)) {
1722 kfree(vendor);
1723 port_priv->version[
1724 agent_priv->reg_req->
1725 mgmt_class_version].
1726 vendor = NULL;
1727 }
1728 }
1729 }
1730 }
1731 }
1732
1733 out:
1734 return;
1735 }
1736
1737 static struct ib_mad_agent_private *
find_mad_agent(struct ib_mad_port_private * port_priv,const struct ib_mad_hdr * mad_hdr)1738 find_mad_agent(struct ib_mad_port_private *port_priv,
1739 const struct ib_mad_hdr *mad_hdr)
1740 {
1741 struct ib_mad_agent_private *mad_agent = NULL;
1742 unsigned long flags;
1743
1744 if (ib_response_mad(mad_hdr)) {
1745 u32 hi_tid;
1746
1747 /*
1748 * Routing is based on high 32 bits of transaction ID
1749 * of MAD.
1750 */
1751 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1752 rcu_read_lock();
1753 mad_agent = idr_find(&ib_mad_clients, hi_tid);
1754 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1755 mad_agent = NULL;
1756 rcu_read_unlock();
1757 } else {
1758 struct ib_mad_mgmt_class_table *class;
1759 struct ib_mad_mgmt_method_table *method;
1760 struct ib_mad_mgmt_vendor_class_table *vendor;
1761 struct ib_mad_mgmt_vendor_class *vendor_class;
1762 const struct ib_vendor_mad *vendor_mad;
1763 int index;
1764
1765 spin_lock_irqsave(&port_priv->reg_lock, flags);
1766 /*
1767 * Routing is based on version, class, and method
1768 * For "newer" vendor MADs, also based on OUI
1769 */
1770 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1771 goto out;
1772 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1773 class = port_priv->version[
1774 mad_hdr->class_version].class;
1775 if (!class)
1776 goto out;
1777 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1778 ARRAY_SIZE(class->method_table))
1779 goto out;
1780 method = class->method_table[convert_mgmt_class(
1781 mad_hdr->mgmt_class)];
1782 if (method)
1783 mad_agent = method->agent[mad_hdr->method &
1784 ~IB_MGMT_METHOD_RESP];
1785 } else {
1786 vendor = port_priv->version[
1787 mad_hdr->class_version].vendor;
1788 if (!vendor)
1789 goto out;
1790 vendor_class = vendor->vendor_class[vendor_class_index(
1791 mad_hdr->mgmt_class)];
1792 if (!vendor_class)
1793 goto out;
1794 /* Find matching OUI */
1795 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1796 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1797 if (index == -1)
1798 goto out;
1799 method = vendor_class->method_table[index];
1800 if (method) {
1801 mad_agent = method->agent[mad_hdr->method &
1802 ~IB_MGMT_METHOD_RESP];
1803 }
1804 }
1805 if (mad_agent)
1806 atomic_inc(&mad_agent->refcount);
1807 out:
1808 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1809 }
1810
1811 if (mad_agent && !mad_agent->agent.recv_handler) {
1812 dev_notice(&port_priv->device->dev,
1813 "No receive handler for client %p on port %d\n",
1814 &mad_agent->agent, port_priv->port_num);
1815 deref_mad_agent(mad_agent);
1816 mad_agent = NULL;
1817 }
1818
1819 return mad_agent;
1820 }
1821
validate_mad(const struct ib_mad_hdr * mad_hdr,const struct ib_mad_qp_info * qp_info,bool opa)1822 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1823 const struct ib_mad_qp_info *qp_info,
1824 bool opa)
1825 {
1826 int valid = 0;
1827 u32 qp_num = qp_info->qp->qp_num;
1828
1829 /* Make sure MAD base version is understood */
1830 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1831 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1832 pr_err("MAD received with unsupported base version %d %s\n",
1833 mad_hdr->base_version, opa ? "(opa)" : "");
1834 goto out;
1835 }
1836
1837 /* Filter SMI packets sent to other than QP0 */
1838 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1839 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1840 if (qp_num == 0)
1841 valid = 1;
1842 } else {
1843 /* CM attributes other than ClassPortInfo only use Send method */
1844 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1845 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1846 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1847 goto out;
1848 /* Filter GSI packets sent to QP0 */
1849 if (qp_num != 0)
1850 valid = 1;
1851 }
1852
1853 out:
1854 return valid;
1855 }
1856
is_rmpp_data_mad(const struct ib_mad_agent_private * mad_agent_priv,const struct ib_mad_hdr * mad_hdr)1857 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1858 const struct ib_mad_hdr *mad_hdr)
1859 {
1860 struct ib_rmpp_mad *rmpp_mad;
1861
1862 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1863 return !mad_agent_priv->agent.rmpp_version ||
1864 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1865 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1866 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1867 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1868 }
1869
rcv_has_same_class(const struct ib_mad_send_wr_private * wr,const struct ib_mad_recv_wc * rwc)1870 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1871 const struct ib_mad_recv_wc *rwc)
1872 {
1873 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1874 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1875 }
1876
rcv_has_same_gid(const struct ib_mad_agent_private * mad_agent_priv,const struct ib_mad_send_wr_private * wr,const struct ib_mad_recv_wc * rwc)1877 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1878 const struct ib_mad_send_wr_private *wr,
1879 const struct ib_mad_recv_wc *rwc )
1880 {
1881 struct rdma_ah_attr attr;
1882 u8 send_resp, rcv_resp;
1883 union ib_gid sgid;
1884 struct ib_device *device = mad_agent_priv->agent.device;
1885 u8 port_num = mad_agent_priv->agent.port_num;
1886 u8 lmc;
1887 bool has_grh;
1888
1889 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1890 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1891
1892 if (send_resp == rcv_resp)
1893 /* both requests, or both responses. GIDs different */
1894 return 0;
1895
1896 if (rdma_query_ah(wr->send_buf.ah, &attr))
1897 /* Assume not equal, to avoid false positives. */
1898 return 0;
1899
1900 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1901 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1902 /* one has GID, other does not. Assume different */
1903 return 0;
1904
1905 if (!send_resp && rcv_resp) {
1906 /* is request/response. */
1907 if (!has_grh) {
1908 if (ib_get_cached_lmc(device, port_num, &lmc))
1909 return 0;
1910 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1911 rwc->wc->dlid_path_bits) &
1912 ((1 << lmc) - 1)));
1913 } else {
1914 const struct ib_global_route *grh =
1915 rdma_ah_read_grh(&attr);
1916
1917 if (rdma_query_gid(device, port_num,
1918 grh->sgid_index, &sgid))
1919 return 0;
1920 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1921 16);
1922 }
1923 }
1924
1925 if (!has_grh)
1926 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1927 else
1928 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1929 rwc->recv_buf.grh->sgid.raw,
1930 16);
1931 }
1932
is_direct(u8 class)1933 static inline int is_direct(u8 class)
1934 {
1935 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1936 }
1937
1938 struct ib_mad_send_wr_private*
ib_find_send_mad(const struct ib_mad_agent_private * mad_agent_priv,const struct ib_mad_recv_wc * wc)1939 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1940 const struct ib_mad_recv_wc *wc)
1941 {
1942 struct ib_mad_send_wr_private *wr;
1943 const struct ib_mad_hdr *mad_hdr;
1944
1945 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1946
1947 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1948 if ((wr->tid == mad_hdr->tid) &&
1949 rcv_has_same_class(wr, wc) &&
1950 /*
1951 * Don't check GID for direct routed MADs.
1952 * These might have permissive LIDs.
1953 */
1954 (is_direct(mad_hdr->mgmt_class) ||
1955 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1956 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1957 }
1958
1959 /*
1960 * It's possible to receive the response before we've
1961 * been notified that the send has completed
1962 */
1963 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1964 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1965 wr->tid == mad_hdr->tid &&
1966 wr->timeout &&
1967 rcv_has_same_class(wr, wc) &&
1968 /*
1969 * Don't check GID for direct routed MADs.
1970 * These might have permissive LIDs.
1971 */
1972 (is_direct(mad_hdr->mgmt_class) ||
1973 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1974 /* Verify request has not been canceled */
1975 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1976 }
1977 return NULL;
1978 }
1979
ib_mark_mad_done(struct ib_mad_send_wr_private * mad_send_wr)1980 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1981 {
1982 mad_send_wr->timeout = 0;
1983 if (mad_send_wr->refcount == 1)
1984 list_move_tail(&mad_send_wr->agent_list,
1985 &mad_send_wr->mad_agent_priv->done_list);
1986 }
1987
ib_mad_complete_recv(struct ib_mad_agent_private * mad_agent_priv,struct ib_mad_recv_wc * mad_recv_wc)1988 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1989 struct ib_mad_recv_wc *mad_recv_wc)
1990 {
1991 struct ib_mad_send_wr_private *mad_send_wr;
1992 struct ib_mad_send_wc mad_send_wc;
1993 unsigned long flags;
1994 int ret;
1995
1996 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1997 ret = ib_mad_enforce_security(mad_agent_priv,
1998 mad_recv_wc->wc->pkey_index);
1999 if (ret) {
2000 ib_free_recv_mad(mad_recv_wc);
2001 deref_mad_agent(mad_agent_priv);
2002 return;
2003 }
2004
2005 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2006 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2007 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2008 mad_recv_wc);
2009 if (!mad_recv_wc) {
2010 deref_mad_agent(mad_agent_priv);
2011 return;
2012 }
2013 }
2014
2015 /* Complete corresponding request */
2016 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2017 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2018 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2019 if (!mad_send_wr) {
2020 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2021 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2022 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2023 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2024 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2025 /* user rmpp is in effect
2026 * and this is an active RMPP MAD
2027 */
2028 mad_agent_priv->agent.recv_handler(
2029 &mad_agent_priv->agent, NULL,
2030 mad_recv_wc);
2031 atomic_dec(&mad_agent_priv->refcount);
2032 } else {
2033 /* not user rmpp, revert to normal behavior and
2034 * drop the mad */
2035 ib_free_recv_mad(mad_recv_wc);
2036 deref_mad_agent(mad_agent_priv);
2037 return;
2038 }
2039 } else {
2040 ib_mark_mad_done(mad_send_wr);
2041 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2042
2043 /* Defined behavior is to complete response before request */
2044 mad_agent_priv->agent.recv_handler(
2045 &mad_agent_priv->agent,
2046 &mad_send_wr->send_buf,
2047 mad_recv_wc);
2048 atomic_dec(&mad_agent_priv->refcount);
2049
2050 mad_send_wc.status = IB_WC_SUCCESS;
2051 mad_send_wc.vendor_err = 0;
2052 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2053 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2054 }
2055 } else {
2056 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2057 mad_recv_wc);
2058 deref_mad_agent(mad_agent_priv);
2059 }
2060
2061 return;
2062 }
2063
handle_ib_smi(const struct ib_mad_port_private * port_priv,const struct ib_mad_qp_info * qp_info,const struct ib_wc * wc,int port_num,struct ib_mad_private * recv,struct ib_mad_private * response)2064 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2065 const struct ib_mad_qp_info *qp_info,
2066 const struct ib_wc *wc,
2067 int port_num,
2068 struct ib_mad_private *recv,
2069 struct ib_mad_private *response)
2070 {
2071 enum smi_forward_action retsmi;
2072 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2073
2074 if (smi_handle_dr_smp_recv(smp,
2075 rdma_cap_ib_switch(port_priv->device),
2076 port_num,
2077 port_priv->device->phys_port_cnt) ==
2078 IB_SMI_DISCARD)
2079 return IB_SMI_DISCARD;
2080
2081 retsmi = smi_check_forward_dr_smp(smp);
2082 if (retsmi == IB_SMI_LOCAL)
2083 return IB_SMI_HANDLE;
2084
2085 if (retsmi == IB_SMI_SEND) { /* don't forward */
2086 if (smi_handle_dr_smp_send(smp,
2087 rdma_cap_ib_switch(port_priv->device),
2088 port_num) == IB_SMI_DISCARD)
2089 return IB_SMI_DISCARD;
2090
2091 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2092 return IB_SMI_DISCARD;
2093 } else if (rdma_cap_ib_switch(port_priv->device)) {
2094 /* forward case for switches */
2095 memcpy(response, recv, mad_priv_size(response));
2096 response->header.recv_wc.wc = &response->header.wc;
2097 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2098 response->header.recv_wc.recv_buf.grh = &response->grh;
2099
2100 agent_send_response((const struct ib_mad_hdr *)response->mad,
2101 &response->grh, wc,
2102 port_priv->device,
2103 smi_get_fwd_port(smp),
2104 qp_info->qp->qp_num,
2105 response->mad_size,
2106 false);
2107
2108 return IB_SMI_DISCARD;
2109 }
2110 return IB_SMI_HANDLE;
2111 }
2112
generate_unmatched_resp(const struct ib_mad_private * recv,struct ib_mad_private * response,size_t * resp_len,bool opa)2113 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2114 struct ib_mad_private *response,
2115 size_t *resp_len, bool opa)
2116 {
2117 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2118 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2119
2120 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2121 recv_hdr->method == IB_MGMT_METHOD_SET) {
2122 memcpy(response, recv, mad_priv_size(response));
2123 response->header.recv_wc.wc = &response->header.wc;
2124 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2125 response->header.recv_wc.recv_buf.grh = &response->grh;
2126 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2127 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2128 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2129 resp_hdr->status |= IB_SMP_DIRECTION;
2130
2131 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2132 if (recv_hdr->mgmt_class ==
2133 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2134 recv_hdr->mgmt_class ==
2135 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2136 *resp_len = opa_get_smp_header_size(
2137 (struct opa_smp *)recv->mad);
2138 else
2139 *resp_len = sizeof(struct ib_mad_hdr);
2140 }
2141
2142 return true;
2143 } else {
2144 return false;
2145 }
2146 }
2147
2148 static enum smi_action
handle_opa_smi(struct ib_mad_port_private * port_priv,struct ib_mad_qp_info * qp_info,struct ib_wc * wc,int port_num,struct ib_mad_private * recv,struct ib_mad_private * response)2149 handle_opa_smi(struct ib_mad_port_private *port_priv,
2150 struct ib_mad_qp_info *qp_info,
2151 struct ib_wc *wc,
2152 int port_num,
2153 struct ib_mad_private *recv,
2154 struct ib_mad_private *response)
2155 {
2156 enum smi_forward_action retsmi;
2157 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2158
2159 if (opa_smi_handle_dr_smp_recv(smp,
2160 rdma_cap_ib_switch(port_priv->device),
2161 port_num,
2162 port_priv->device->phys_port_cnt) ==
2163 IB_SMI_DISCARD)
2164 return IB_SMI_DISCARD;
2165
2166 retsmi = opa_smi_check_forward_dr_smp(smp);
2167 if (retsmi == IB_SMI_LOCAL)
2168 return IB_SMI_HANDLE;
2169
2170 if (retsmi == IB_SMI_SEND) { /* don't forward */
2171 if (opa_smi_handle_dr_smp_send(smp,
2172 rdma_cap_ib_switch(port_priv->device),
2173 port_num) == IB_SMI_DISCARD)
2174 return IB_SMI_DISCARD;
2175
2176 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2177 IB_SMI_DISCARD)
2178 return IB_SMI_DISCARD;
2179
2180 } else if (rdma_cap_ib_switch(port_priv->device)) {
2181 /* forward case for switches */
2182 memcpy(response, recv, mad_priv_size(response));
2183 response->header.recv_wc.wc = &response->header.wc;
2184 response->header.recv_wc.recv_buf.opa_mad =
2185 (struct opa_mad *)response->mad;
2186 response->header.recv_wc.recv_buf.grh = &response->grh;
2187
2188 agent_send_response((const struct ib_mad_hdr *)response->mad,
2189 &response->grh, wc,
2190 port_priv->device,
2191 opa_smi_get_fwd_port(smp),
2192 qp_info->qp->qp_num,
2193 recv->header.wc.byte_len,
2194 true);
2195
2196 return IB_SMI_DISCARD;
2197 }
2198
2199 return IB_SMI_HANDLE;
2200 }
2201
2202 static enum smi_action
handle_smi(struct ib_mad_port_private * port_priv,struct ib_mad_qp_info * qp_info,struct ib_wc * wc,int port_num,struct ib_mad_private * recv,struct ib_mad_private * response,bool opa)2203 handle_smi(struct ib_mad_port_private *port_priv,
2204 struct ib_mad_qp_info *qp_info,
2205 struct ib_wc *wc,
2206 int port_num,
2207 struct ib_mad_private *recv,
2208 struct ib_mad_private *response,
2209 bool opa)
2210 {
2211 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2212
2213 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2214 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2215 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2216 response);
2217
2218 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2219 }
2220
ib_mad_recv_done(struct ib_cq * cq,struct ib_wc * wc)2221 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2222 {
2223 struct ib_mad_port_private *port_priv = cq->cq_context;
2224 struct ib_mad_list_head *mad_list =
2225 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2226 struct ib_mad_qp_info *qp_info;
2227 struct ib_mad_private_header *mad_priv_hdr;
2228 struct ib_mad_private *recv, *response = NULL;
2229 struct ib_mad_agent_private *mad_agent;
2230 int port_num;
2231 int ret = IB_MAD_RESULT_SUCCESS;
2232 size_t mad_size;
2233 u16 resp_mad_pkey_index = 0;
2234 bool opa;
2235
2236 if (list_empty_careful(&port_priv->port_list))
2237 return;
2238
2239 if (wc->status != IB_WC_SUCCESS) {
2240 /*
2241 * Receive errors indicate that the QP has entered the error
2242 * state - error handling/shutdown code will cleanup
2243 */
2244 return;
2245 }
2246
2247 qp_info = mad_list->mad_queue->qp_info;
2248 dequeue_mad(mad_list);
2249
2250 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2251 qp_info->port_priv->port_num);
2252
2253 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2254 mad_list);
2255 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2256 ib_dma_unmap_single(port_priv->device,
2257 recv->header.mapping,
2258 mad_priv_dma_size(recv),
2259 DMA_FROM_DEVICE);
2260
2261 /* Setup MAD receive work completion from "normal" work completion */
2262 recv->header.wc = *wc;
2263 recv->header.recv_wc.wc = &recv->header.wc;
2264
2265 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2266 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2267 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2268 } else {
2269 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2270 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2271 }
2272
2273 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2274 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2275
2276 if (atomic_read(&qp_info->snoop_count))
2277 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2278
2279 /* Validate MAD */
2280 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2281 goto out;
2282
2283 mad_size = recv->mad_size;
2284 response = alloc_mad_private(mad_size, GFP_KERNEL);
2285 if (!response)
2286 goto out;
2287
2288 if (rdma_cap_ib_switch(port_priv->device))
2289 port_num = wc->port_num;
2290 else
2291 port_num = port_priv->port_num;
2292
2293 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2294 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2295 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2296 response, opa)
2297 == IB_SMI_DISCARD)
2298 goto out;
2299 }
2300
2301 /* Give driver "right of first refusal" on incoming MAD */
2302 if (port_priv->device->process_mad) {
2303 ret = port_priv->device->process_mad(port_priv->device, 0,
2304 port_priv->port_num,
2305 wc, &recv->grh,
2306 (const struct ib_mad_hdr *)recv->mad,
2307 recv->mad_size,
2308 (struct ib_mad_hdr *)response->mad,
2309 &mad_size, &resp_mad_pkey_index);
2310
2311 if (opa)
2312 wc->pkey_index = resp_mad_pkey_index;
2313
2314 if (ret & IB_MAD_RESULT_SUCCESS) {
2315 if (ret & IB_MAD_RESULT_CONSUMED)
2316 goto out;
2317 if (ret & IB_MAD_RESULT_REPLY) {
2318 agent_send_response((const struct ib_mad_hdr *)response->mad,
2319 &recv->grh, wc,
2320 port_priv->device,
2321 port_num,
2322 qp_info->qp->qp_num,
2323 mad_size, opa);
2324 goto out;
2325 }
2326 }
2327 }
2328
2329 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2330 if (mad_agent) {
2331 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2332 /*
2333 * recv is freed up in error cases in ib_mad_complete_recv
2334 * or via recv_handler in ib_mad_complete_recv()
2335 */
2336 recv = NULL;
2337 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2338 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2339 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2340 port_priv->device, port_num,
2341 qp_info->qp->qp_num, mad_size, opa);
2342 }
2343
2344 out:
2345 /* Post another receive request for this QP */
2346 if (response) {
2347 ib_mad_post_receive_mads(qp_info, response);
2348 kfree(recv);
2349 } else
2350 ib_mad_post_receive_mads(qp_info, recv);
2351 }
2352
adjust_timeout(struct ib_mad_agent_private * mad_agent_priv)2353 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2354 {
2355 struct ib_mad_send_wr_private *mad_send_wr;
2356 unsigned long delay;
2357
2358 if (list_empty(&mad_agent_priv->wait_list)) {
2359 cancel_delayed_work(&mad_agent_priv->timed_work);
2360 } else {
2361 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2362 struct ib_mad_send_wr_private,
2363 agent_list);
2364
2365 if (time_after(mad_agent_priv->timeout,
2366 mad_send_wr->timeout)) {
2367 mad_agent_priv->timeout = mad_send_wr->timeout;
2368 delay = mad_send_wr->timeout - jiffies;
2369 if ((long)delay <= 0)
2370 delay = 1;
2371 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2372 &mad_agent_priv->timed_work, delay);
2373 }
2374 }
2375 }
2376
wait_for_response(struct ib_mad_send_wr_private * mad_send_wr)2377 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2378 {
2379 struct ib_mad_agent_private *mad_agent_priv;
2380 struct ib_mad_send_wr_private *temp_mad_send_wr;
2381 struct list_head *list_item;
2382 unsigned long delay;
2383
2384 mad_agent_priv = mad_send_wr->mad_agent_priv;
2385 list_del(&mad_send_wr->agent_list);
2386
2387 delay = mad_send_wr->timeout;
2388 mad_send_wr->timeout += jiffies;
2389
2390 if (delay) {
2391 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2392 temp_mad_send_wr = list_entry(list_item,
2393 struct ib_mad_send_wr_private,
2394 agent_list);
2395 if (time_after(mad_send_wr->timeout,
2396 temp_mad_send_wr->timeout))
2397 break;
2398 }
2399 }
2400 else
2401 list_item = &mad_agent_priv->wait_list;
2402 list_add(&mad_send_wr->agent_list, list_item);
2403
2404 /* Reschedule a work item if we have a shorter timeout */
2405 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2406 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2407 &mad_agent_priv->timed_work, delay);
2408 }
2409
ib_reset_mad_timeout(struct ib_mad_send_wr_private * mad_send_wr,int timeout_ms)2410 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2411 int timeout_ms)
2412 {
2413 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2414 wait_for_response(mad_send_wr);
2415 }
2416
2417 /*
2418 * Process a send work completion
2419 */
ib_mad_complete_send_wr(struct ib_mad_send_wr_private * mad_send_wr,struct ib_mad_send_wc * mad_send_wc)2420 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2421 struct ib_mad_send_wc *mad_send_wc)
2422 {
2423 struct ib_mad_agent_private *mad_agent_priv;
2424 unsigned long flags;
2425 int ret;
2426
2427 mad_agent_priv = mad_send_wr->mad_agent_priv;
2428 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2429 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2430 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2431 if (ret == IB_RMPP_RESULT_CONSUMED)
2432 goto done;
2433 } else
2434 ret = IB_RMPP_RESULT_UNHANDLED;
2435
2436 if (mad_send_wc->status != IB_WC_SUCCESS &&
2437 mad_send_wr->status == IB_WC_SUCCESS) {
2438 mad_send_wr->status = mad_send_wc->status;
2439 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2440 }
2441
2442 if (--mad_send_wr->refcount > 0) {
2443 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2444 mad_send_wr->status == IB_WC_SUCCESS) {
2445 wait_for_response(mad_send_wr);
2446 }
2447 goto done;
2448 }
2449
2450 /* Remove send from MAD agent and notify client of completion */
2451 list_del(&mad_send_wr->agent_list);
2452 adjust_timeout(mad_agent_priv);
2453 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2454
2455 if (mad_send_wr->status != IB_WC_SUCCESS )
2456 mad_send_wc->status = mad_send_wr->status;
2457 if (ret == IB_RMPP_RESULT_INTERNAL)
2458 ib_rmpp_send_handler(mad_send_wc);
2459 else
2460 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2461 mad_send_wc);
2462
2463 /* Release reference on agent taken when sending */
2464 deref_mad_agent(mad_agent_priv);
2465 return;
2466 done:
2467 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2468 }
2469
ib_mad_send_done(struct ib_cq * cq,struct ib_wc * wc)2470 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2471 {
2472 struct ib_mad_port_private *port_priv = cq->cq_context;
2473 struct ib_mad_list_head *mad_list =
2474 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2475 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2476 struct ib_mad_qp_info *qp_info;
2477 struct ib_mad_queue *send_queue;
2478 struct ib_mad_send_wc mad_send_wc;
2479 unsigned long flags;
2480 int ret;
2481
2482 if (list_empty_careful(&port_priv->port_list))
2483 return;
2484
2485 if (wc->status != IB_WC_SUCCESS) {
2486 if (!ib_mad_send_error(port_priv, wc))
2487 return;
2488 }
2489
2490 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2491 mad_list);
2492 send_queue = mad_list->mad_queue;
2493 qp_info = send_queue->qp_info;
2494
2495 retry:
2496 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2497 mad_send_wr->header_mapping,
2498 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2499 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2500 mad_send_wr->payload_mapping,
2501 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2502 queued_send_wr = NULL;
2503 spin_lock_irqsave(&send_queue->lock, flags);
2504 list_del(&mad_list->list);
2505
2506 /* Move queued send to the send queue */
2507 if (send_queue->count-- > send_queue->max_active) {
2508 mad_list = container_of(qp_info->overflow_list.next,
2509 struct ib_mad_list_head, list);
2510 queued_send_wr = container_of(mad_list,
2511 struct ib_mad_send_wr_private,
2512 mad_list);
2513 list_move_tail(&mad_list->list, &send_queue->list);
2514 }
2515 spin_unlock_irqrestore(&send_queue->lock, flags);
2516
2517 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2518 mad_send_wc.status = wc->status;
2519 mad_send_wc.vendor_err = wc->vendor_err;
2520 if (atomic_read(&qp_info->snoop_count))
2521 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2522 IB_MAD_SNOOP_SEND_COMPLETIONS);
2523 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2524
2525 if (queued_send_wr) {
2526 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2527 NULL);
2528 if (ret) {
2529 dev_err(&port_priv->device->dev,
2530 "ib_post_send failed: %d\n", ret);
2531 mad_send_wr = queued_send_wr;
2532 wc->status = IB_WC_LOC_QP_OP_ERR;
2533 goto retry;
2534 }
2535 }
2536 }
2537
mark_sends_for_retry(struct ib_mad_qp_info * qp_info)2538 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2539 {
2540 struct ib_mad_send_wr_private *mad_send_wr;
2541 struct ib_mad_list_head *mad_list;
2542 unsigned long flags;
2543
2544 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2545 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2546 mad_send_wr = container_of(mad_list,
2547 struct ib_mad_send_wr_private,
2548 mad_list);
2549 mad_send_wr->retry = 1;
2550 }
2551 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2552 }
2553
ib_mad_send_error(struct ib_mad_port_private * port_priv,struct ib_wc * wc)2554 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2555 struct ib_wc *wc)
2556 {
2557 struct ib_mad_list_head *mad_list =
2558 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2559 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2560 struct ib_mad_send_wr_private *mad_send_wr;
2561 int ret;
2562
2563 /*
2564 * Send errors will transition the QP to SQE - move
2565 * QP to RTS and repost flushed work requests
2566 */
2567 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2568 mad_list);
2569 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2570 if (mad_send_wr->retry) {
2571 /* Repost send */
2572 mad_send_wr->retry = 0;
2573 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2574 NULL);
2575 if (!ret)
2576 return false;
2577 }
2578 } else {
2579 struct ib_qp_attr *attr;
2580
2581 /* Transition QP to RTS and fail offending send */
2582 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2583 if (attr) {
2584 attr->qp_state = IB_QPS_RTS;
2585 attr->cur_qp_state = IB_QPS_SQE;
2586 ret = ib_modify_qp(qp_info->qp, attr,
2587 IB_QP_STATE | IB_QP_CUR_STATE);
2588 kfree(attr);
2589 if (ret)
2590 dev_err(&port_priv->device->dev,
2591 "%s - ib_modify_qp to RTS: %d\n",
2592 __func__, ret);
2593 else
2594 mark_sends_for_retry(qp_info);
2595 }
2596 }
2597
2598 return true;
2599 }
2600
cancel_mads(struct ib_mad_agent_private * mad_agent_priv)2601 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2602 {
2603 unsigned long flags;
2604 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2605 struct ib_mad_send_wc mad_send_wc;
2606 struct list_head cancel_list;
2607
2608 INIT_LIST_HEAD(&cancel_list);
2609
2610 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2611 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2612 &mad_agent_priv->send_list, agent_list) {
2613 if (mad_send_wr->status == IB_WC_SUCCESS) {
2614 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2615 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2616 }
2617 }
2618
2619 /* Empty wait list to prevent receives from finding a request */
2620 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2621 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2622
2623 /* Report all cancelled requests */
2624 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2625 mad_send_wc.vendor_err = 0;
2626
2627 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2628 &cancel_list, agent_list) {
2629 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2630 list_del(&mad_send_wr->agent_list);
2631 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2632 &mad_send_wc);
2633 atomic_dec(&mad_agent_priv->refcount);
2634 }
2635 }
2636
2637 static struct ib_mad_send_wr_private*
find_send_wr(struct ib_mad_agent_private * mad_agent_priv,struct ib_mad_send_buf * send_buf)2638 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2639 struct ib_mad_send_buf *send_buf)
2640 {
2641 struct ib_mad_send_wr_private *mad_send_wr;
2642
2643 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2644 agent_list) {
2645 if (&mad_send_wr->send_buf == send_buf)
2646 return mad_send_wr;
2647 }
2648
2649 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2650 agent_list) {
2651 if (is_rmpp_data_mad(mad_agent_priv,
2652 mad_send_wr->send_buf.mad) &&
2653 &mad_send_wr->send_buf == send_buf)
2654 return mad_send_wr;
2655 }
2656 return NULL;
2657 }
2658
ib_modify_mad(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf,u32 timeout_ms)2659 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2660 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2661 {
2662 struct ib_mad_agent_private *mad_agent_priv;
2663 struct ib_mad_send_wr_private *mad_send_wr;
2664 unsigned long flags;
2665 int active;
2666
2667 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2668 agent);
2669 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2670 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2671 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2672 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2673 return -EINVAL;
2674 }
2675
2676 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2677 if (!timeout_ms) {
2678 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2679 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2680 }
2681
2682 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2683 if (active)
2684 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2685 else
2686 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2687
2688 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2689 return 0;
2690 }
2691 EXPORT_SYMBOL(ib_modify_mad);
2692
ib_cancel_mad(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf)2693 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2694 struct ib_mad_send_buf *send_buf)
2695 {
2696 ib_modify_mad(mad_agent, send_buf, 0);
2697 }
2698 EXPORT_SYMBOL(ib_cancel_mad);
2699
local_completions(struct work_struct * work)2700 static void local_completions(struct work_struct *work)
2701 {
2702 struct ib_mad_agent_private *mad_agent_priv;
2703 struct ib_mad_local_private *local;
2704 struct ib_mad_agent_private *recv_mad_agent;
2705 unsigned long flags;
2706 int free_mad;
2707 struct ib_wc wc;
2708 struct ib_mad_send_wc mad_send_wc;
2709 bool opa;
2710
2711 mad_agent_priv =
2712 container_of(work, struct ib_mad_agent_private, local_work);
2713
2714 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2715 mad_agent_priv->qp_info->port_priv->port_num);
2716
2717 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2718 while (!list_empty(&mad_agent_priv->local_list)) {
2719 local = list_entry(mad_agent_priv->local_list.next,
2720 struct ib_mad_local_private,
2721 completion_list);
2722 list_del(&local->completion_list);
2723 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2724 free_mad = 0;
2725 if (local->mad_priv) {
2726 u8 base_version;
2727 recv_mad_agent = local->recv_mad_agent;
2728 if (!recv_mad_agent) {
2729 dev_err(&mad_agent_priv->agent.device->dev,
2730 "No receive MAD agent for local completion\n");
2731 free_mad = 1;
2732 goto local_send_completion;
2733 }
2734
2735 /*
2736 * Defined behavior is to complete response
2737 * before request
2738 */
2739 build_smp_wc(recv_mad_agent->agent.qp,
2740 local->mad_send_wr->send_wr.wr.wr_cqe,
2741 be16_to_cpu(IB_LID_PERMISSIVE),
2742 local->mad_send_wr->send_wr.pkey_index,
2743 recv_mad_agent->agent.port_num, &wc);
2744
2745 local->mad_priv->header.recv_wc.wc = &wc;
2746
2747 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2748 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2749 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2750 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2751 } else {
2752 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2753 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2754 }
2755
2756 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2757 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2758 &local->mad_priv->header.recv_wc.rmpp_list);
2759 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2760 local->mad_priv->header.recv_wc.recv_buf.mad =
2761 (struct ib_mad *)local->mad_priv->mad;
2762 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2763 snoop_recv(recv_mad_agent->qp_info,
2764 &local->mad_priv->header.recv_wc,
2765 IB_MAD_SNOOP_RECVS);
2766 recv_mad_agent->agent.recv_handler(
2767 &recv_mad_agent->agent,
2768 &local->mad_send_wr->send_buf,
2769 &local->mad_priv->header.recv_wc);
2770 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2771 atomic_dec(&recv_mad_agent->refcount);
2772 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2773 }
2774
2775 local_send_completion:
2776 /* Complete send */
2777 mad_send_wc.status = IB_WC_SUCCESS;
2778 mad_send_wc.vendor_err = 0;
2779 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2780 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2781 snoop_send(mad_agent_priv->qp_info,
2782 &local->mad_send_wr->send_buf,
2783 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2784 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2785 &mad_send_wc);
2786
2787 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2788 atomic_dec(&mad_agent_priv->refcount);
2789 if (free_mad)
2790 kfree(local->mad_priv);
2791 kfree(local);
2792 }
2793 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2794 }
2795
retry_send(struct ib_mad_send_wr_private * mad_send_wr)2796 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2797 {
2798 int ret;
2799
2800 if (!mad_send_wr->retries_left)
2801 return -ETIMEDOUT;
2802
2803 mad_send_wr->retries_left--;
2804 mad_send_wr->send_buf.retries++;
2805
2806 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2807
2808 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2809 ret = ib_retry_rmpp(mad_send_wr);
2810 switch (ret) {
2811 case IB_RMPP_RESULT_UNHANDLED:
2812 ret = ib_send_mad(mad_send_wr);
2813 break;
2814 case IB_RMPP_RESULT_CONSUMED:
2815 ret = 0;
2816 break;
2817 default:
2818 ret = -ECOMM;
2819 break;
2820 }
2821 } else
2822 ret = ib_send_mad(mad_send_wr);
2823
2824 if (!ret) {
2825 mad_send_wr->refcount++;
2826 list_add_tail(&mad_send_wr->agent_list,
2827 &mad_send_wr->mad_agent_priv->send_list);
2828 }
2829 return ret;
2830 }
2831
timeout_sends(struct work_struct * work)2832 static void timeout_sends(struct work_struct *work)
2833 {
2834 struct ib_mad_agent_private *mad_agent_priv;
2835 struct ib_mad_send_wr_private *mad_send_wr;
2836 struct ib_mad_send_wc mad_send_wc;
2837 unsigned long flags, delay;
2838
2839 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2840 timed_work.work);
2841 mad_send_wc.vendor_err = 0;
2842
2843 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2844 while (!list_empty(&mad_agent_priv->wait_list)) {
2845 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2846 struct ib_mad_send_wr_private,
2847 agent_list);
2848
2849 if (time_after(mad_send_wr->timeout, jiffies)) {
2850 delay = mad_send_wr->timeout - jiffies;
2851 if ((long)delay <= 0)
2852 delay = 1;
2853 queue_delayed_work(mad_agent_priv->qp_info->
2854 port_priv->wq,
2855 &mad_agent_priv->timed_work, delay);
2856 break;
2857 }
2858
2859 list_del(&mad_send_wr->agent_list);
2860 if (mad_send_wr->status == IB_WC_SUCCESS &&
2861 !retry_send(mad_send_wr))
2862 continue;
2863
2864 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2865
2866 if (mad_send_wr->status == IB_WC_SUCCESS)
2867 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2868 else
2869 mad_send_wc.status = mad_send_wr->status;
2870 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2871 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2872 &mad_send_wc);
2873
2874 atomic_dec(&mad_agent_priv->refcount);
2875 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2876 }
2877 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2878 }
2879
2880 /*
2881 * Allocate receive MADs and post receive WRs for them
2882 */
ib_mad_post_receive_mads(struct ib_mad_qp_info * qp_info,struct ib_mad_private * mad)2883 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2884 struct ib_mad_private *mad)
2885 {
2886 unsigned long flags;
2887 int post, ret;
2888 struct ib_mad_private *mad_priv;
2889 struct ib_sge sg_list;
2890 struct ib_recv_wr recv_wr;
2891 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2892
2893 /* Initialize common scatter list fields */
2894 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2895
2896 /* Initialize common receive WR fields */
2897 recv_wr.next = NULL;
2898 recv_wr.sg_list = &sg_list;
2899 recv_wr.num_sge = 1;
2900
2901 do {
2902 /* Allocate and map receive buffer */
2903 if (mad) {
2904 mad_priv = mad;
2905 mad = NULL;
2906 } else {
2907 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2908 GFP_ATOMIC);
2909 if (!mad_priv) {
2910 ret = -ENOMEM;
2911 break;
2912 }
2913 }
2914 sg_list.length = mad_priv_dma_size(mad_priv);
2915 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2916 &mad_priv->grh,
2917 mad_priv_dma_size(mad_priv),
2918 DMA_FROM_DEVICE);
2919 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2920 sg_list.addr))) {
2921 ret = -ENOMEM;
2922 break;
2923 }
2924 mad_priv->header.mapping = sg_list.addr;
2925 mad_priv->header.mad_list.mad_queue = recv_queue;
2926 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2927 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2928
2929 /* Post receive WR */
2930 spin_lock_irqsave(&recv_queue->lock, flags);
2931 post = (++recv_queue->count < recv_queue->max_active);
2932 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2933 spin_unlock_irqrestore(&recv_queue->lock, flags);
2934 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2935 if (ret) {
2936 spin_lock_irqsave(&recv_queue->lock, flags);
2937 list_del(&mad_priv->header.mad_list.list);
2938 recv_queue->count--;
2939 spin_unlock_irqrestore(&recv_queue->lock, flags);
2940 ib_dma_unmap_single(qp_info->port_priv->device,
2941 mad_priv->header.mapping,
2942 mad_priv_dma_size(mad_priv),
2943 DMA_FROM_DEVICE);
2944 kfree(mad_priv);
2945 dev_err(&qp_info->port_priv->device->dev,
2946 "ib_post_recv failed: %d\n", ret);
2947 break;
2948 }
2949 } while (post);
2950
2951 return ret;
2952 }
2953
2954 /*
2955 * Return all the posted receive MADs
2956 */
cleanup_recv_queue(struct ib_mad_qp_info * qp_info)2957 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2958 {
2959 struct ib_mad_private_header *mad_priv_hdr;
2960 struct ib_mad_private *recv;
2961 struct ib_mad_list_head *mad_list;
2962
2963 if (!qp_info->qp)
2964 return;
2965
2966 while (!list_empty(&qp_info->recv_queue.list)) {
2967
2968 mad_list = list_entry(qp_info->recv_queue.list.next,
2969 struct ib_mad_list_head, list);
2970 mad_priv_hdr = container_of(mad_list,
2971 struct ib_mad_private_header,
2972 mad_list);
2973 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2974 header);
2975
2976 /* Remove from posted receive MAD list */
2977 list_del(&mad_list->list);
2978
2979 ib_dma_unmap_single(qp_info->port_priv->device,
2980 recv->header.mapping,
2981 mad_priv_dma_size(recv),
2982 DMA_FROM_DEVICE);
2983 kfree(recv);
2984 }
2985
2986 qp_info->recv_queue.count = 0;
2987 }
2988
2989 /*
2990 * Start the port
2991 */
ib_mad_port_start(struct ib_mad_port_private * port_priv)2992 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2993 {
2994 int ret, i;
2995 struct ib_qp_attr *attr;
2996 struct ib_qp *qp;
2997 u16 pkey_index;
2998
2999 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3000 if (!attr)
3001 return -ENOMEM;
3002
3003 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3004 IB_DEFAULT_PKEY_FULL, &pkey_index);
3005 if (ret)
3006 pkey_index = 0;
3007
3008 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3009 qp = port_priv->qp_info[i].qp;
3010 if (!qp)
3011 continue;
3012
3013 /*
3014 * PKey index for QP1 is irrelevant but
3015 * one is needed for the Reset to Init transition
3016 */
3017 attr->qp_state = IB_QPS_INIT;
3018 attr->pkey_index = pkey_index;
3019 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3020 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3021 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3022 if (ret) {
3023 dev_err(&port_priv->device->dev,
3024 "Couldn't change QP%d state to INIT: %d\n",
3025 i, ret);
3026 goto out;
3027 }
3028
3029 attr->qp_state = IB_QPS_RTR;
3030 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3031 if (ret) {
3032 dev_err(&port_priv->device->dev,
3033 "Couldn't change QP%d state to RTR: %d\n",
3034 i, ret);
3035 goto out;
3036 }
3037
3038 attr->qp_state = IB_QPS_RTS;
3039 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3040 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3041 if (ret) {
3042 dev_err(&port_priv->device->dev,
3043 "Couldn't change QP%d state to RTS: %d\n",
3044 i, ret);
3045 goto out;
3046 }
3047 }
3048
3049 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3050 if (ret) {
3051 dev_err(&port_priv->device->dev,
3052 "Failed to request completion notification: %d\n",
3053 ret);
3054 goto out;
3055 }
3056
3057 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3058 if (!port_priv->qp_info[i].qp)
3059 continue;
3060
3061 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3062 if (ret) {
3063 dev_err(&port_priv->device->dev,
3064 "Couldn't post receive WRs\n");
3065 goto out;
3066 }
3067 }
3068 out:
3069 kfree(attr);
3070 return ret;
3071 }
3072
qp_event_handler(struct ib_event * event,void * qp_context)3073 static void qp_event_handler(struct ib_event *event, void *qp_context)
3074 {
3075 struct ib_mad_qp_info *qp_info = qp_context;
3076
3077 /* It's worse than that! He's dead, Jim! */
3078 dev_err(&qp_info->port_priv->device->dev,
3079 "Fatal error (%d) on MAD QP (%d)\n",
3080 event->event, qp_info->qp->qp_num);
3081 }
3082
init_mad_queue(struct ib_mad_qp_info * qp_info,struct ib_mad_queue * mad_queue)3083 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3084 struct ib_mad_queue *mad_queue)
3085 {
3086 mad_queue->qp_info = qp_info;
3087 mad_queue->count = 0;
3088 spin_lock_init(&mad_queue->lock);
3089 INIT_LIST_HEAD(&mad_queue->list);
3090 }
3091
init_mad_qp(struct ib_mad_port_private * port_priv,struct ib_mad_qp_info * qp_info)3092 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3093 struct ib_mad_qp_info *qp_info)
3094 {
3095 qp_info->port_priv = port_priv;
3096 init_mad_queue(qp_info, &qp_info->send_queue);
3097 init_mad_queue(qp_info, &qp_info->recv_queue);
3098 INIT_LIST_HEAD(&qp_info->overflow_list);
3099 spin_lock_init(&qp_info->snoop_lock);
3100 qp_info->snoop_table = NULL;
3101 qp_info->snoop_table_size = 0;
3102 atomic_set(&qp_info->snoop_count, 0);
3103 }
3104
create_mad_qp(struct ib_mad_qp_info * qp_info,enum ib_qp_type qp_type)3105 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3106 enum ib_qp_type qp_type)
3107 {
3108 struct ib_qp_init_attr qp_init_attr;
3109 int ret;
3110
3111 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3112 qp_init_attr.send_cq = qp_info->port_priv->cq;
3113 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3114 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3115 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3116 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3117 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3118 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3119 qp_init_attr.qp_type = qp_type;
3120 qp_init_attr.port_num = qp_info->port_priv->port_num;
3121 qp_init_attr.qp_context = qp_info;
3122 qp_init_attr.event_handler = qp_event_handler;
3123 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3124 if (IS_ERR(qp_info->qp)) {
3125 dev_err(&qp_info->port_priv->device->dev,
3126 "Couldn't create ib_mad QP%d\n",
3127 get_spl_qp_index(qp_type));
3128 ret = PTR_ERR(qp_info->qp);
3129 goto error;
3130 }
3131 /* Use minimum queue sizes unless the CQ is resized */
3132 qp_info->send_queue.max_active = mad_sendq_size;
3133 qp_info->recv_queue.max_active = mad_recvq_size;
3134 return 0;
3135
3136 error:
3137 return ret;
3138 }
3139
destroy_mad_qp(struct ib_mad_qp_info * qp_info)3140 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3141 {
3142 if (!qp_info->qp)
3143 return;
3144
3145 ib_destroy_qp(qp_info->qp);
3146 kfree(qp_info->snoop_table);
3147 }
3148
3149 /*
3150 * Open the port
3151 * Create the QP, PD, MR, and CQ if needed
3152 */
ib_mad_port_open(struct ib_device * device,int port_num)3153 static int ib_mad_port_open(struct ib_device *device,
3154 int port_num)
3155 {
3156 int ret, cq_size;
3157 struct ib_mad_port_private *port_priv;
3158 unsigned long flags;
3159 char name[sizeof "ib_mad123"];
3160 int has_smi;
3161
3162 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3163 return -EFAULT;
3164
3165 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3166 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3167 return -EFAULT;
3168
3169 /* Create new device info */
3170 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3171 if (!port_priv)
3172 return -ENOMEM;
3173
3174 port_priv->device = device;
3175 port_priv->port_num = port_num;
3176 spin_lock_init(&port_priv->reg_lock);
3177 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3178 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3179
3180 cq_size = mad_sendq_size + mad_recvq_size;
3181 has_smi = rdma_cap_ib_smi(device, port_num);
3182 if (has_smi)
3183 cq_size *= 2;
3184
3185 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3186 IB_POLL_WORKQUEUE);
3187 if (IS_ERR(port_priv->cq)) {
3188 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3189 ret = PTR_ERR(port_priv->cq);
3190 goto error3;
3191 }
3192
3193 port_priv->pd = ib_alloc_pd(device, 0);
3194 if (IS_ERR(port_priv->pd)) {
3195 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3196 ret = PTR_ERR(port_priv->pd);
3197 goto error4;
3198 }
3199
3200 if (has_smi) {
3201 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3202 if (ret)
3203 goto error6;
3204 }
3205 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3206 if (ret)
3207 goto error7;
3208
3209 snprintf(name, sizeof name, "ib_mad%d", port_num);
3210 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3211 if (!port_priv->wq) {
3212 ret = -ENOMEM;
3213 goto error8;
3214 }
3215
3216 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3217 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3218 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3219
3220 ret = ib_mad_port_start(port_priv);
3221 if (ret) {
3222 dev_err(&device->dev, "Couldn't start port\n");
3223 goto error9;
3224 }
3225
3226 return 0;
3227
3228 error9:
3229 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3230 list_del_init(&port_priv->port_list);
3231 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3232
3233 destroy_workqueue(port_priv->wq);
3234 error8:
3235 destroy_mad_qp(&port_priv->qp_info[1]);
3236 error7:
3237 destroy_mad_qp(&port_priv->qp_info[0]);
3238 error6:
3239 ib_dealloc_pd(port_priv->pd);
3240 error4:
3241 ib_free_cq(port_priv->cq);
3242 cleanup_recv_queue(&port_priv->qp_info[1]);
3243 cleanup_recv_queue(&port_priv->qp_info[0]);
3244 error3:
3245 kfree(port_priv);
3246
3247 return ret;
3248 }
3249
3250 /*
3251 * Close the port
3252 * If there are no classes using the port, free the port
3253 * resources (CQ, MR, PD, QP) and remove the port's info structure
3254 */
ib_mad_port_close(struct ib_device * device,int port_num)3255 static int ib_mad_port_close(struct ib_device *device, int port_num)
3256 {
3257 struct ib_mad_port_private *port_priv;
3258 unsigned long flags;
3259
3260 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3261 port_priv = __ib_get_mad_port(device, port_num);
3262 if (port_priv == NULL) {
3263 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3264 dev_err(&device->dev, "Port %d not found\n", port_num);
3265 return -ENODEV;
3266 }
3267 list_del_init(&port_priv->port_list);
3268 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3269
3270 destroy_workqueue(port_priv->wq);
3271 destroy_mad_qp(&port_priv->qp_info[1]);
3272 destroy_mad_qp(&port_priv->qp_info[0]);
3273 ib_dealloc_pd(port_priv->pd);
3274 ib_free_cq(port_priv->cq);
3275 cleanup_recv_queue(&port_priv->qp_info[1]);
3276 cleanup_recv_queue(&port_priv->qp_info[0]);
3277 /* XXX: Handle deallocation of MAD registration tables */
3278
3279 kfree(port_priv);
3280
3281 return 0;
3282 }
3283
ib_mad_init_device(struct ib_device * device)3284 static void ib_mad_init_device(struct ib_device *device)
3285 {
3286 int start, i;
3287
3288 start = rdma_start_port(device);
3289
3290 for (i = start; i <= rdma_end_port(device); i++) {
3291 if (!rdma_cap_ib_mad(device, i))
3292 continue;
3293
3294 if (ib_mad_port_open(device, i)) {
3295 dev_err(&device->dev, "Couldn't open port %d\n", i);
3296 goto error;
3297 }
3298 if (ib_agent_port_open(device, i)) {
3299 dev_err(&device->dev,
3300 "Couldn't open port %d for agents\n", i);
3301 goto error_agent;
3302 }
3303 }
3304 return;
3305
3306 error_agent:
3307 if (ib_mad_port_close(device, i))
3308 dev_err(&device->dev, "Couldn't close port %d\n", i);
3309
3310 error:
3311 while (--i >= start) {
3312 if (!rdma_cap_ib_mad(device, i))
3313 continue;
3314
3315 if (ib_agent_port_close(device, i))
3316 dev_err(&device->dev,
3317 "Couldn't close port %d for agents\n", i);
3318 if (ib_mad_port_close(device, i))
3319 dev_err(&device->dev, "Couldn't close port %d\n", i);
3320 }
3321 }
3322
ib_mad_remove_device(struct ib_device * device,void * client_data)3323 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3324 {
3325 int i;
3326
3327 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3328 if (!rdma_cap_ib_mad(device, i))
3329 continue;
3330
3331 if (ib_agent_port_close(device, i))
3332 dev_err(&device->dev,
3333 "Couldn't close port %d for agents\n", i);
3334 if (ib_mad_port_close(device, i))
3335 dev_err(&device->dev, "Couldn't close port %d\n", i);
3336 }
3337 }
3338
3339 static struct ib_client mad_client = {
3340 .name = "mad",
3341 .add = ib_mad_init_device,
3342 .remove = ib_mad_remove_device
3343 };
3344
ib_mad_init(void)3345 int ib_mad_init(void)
3346 {
3347 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3348 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3349
3350 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3351 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3352
3353 INIT_LIST_HEAD(&ib_mad_port_list);
3354
3355 /* Client ID 0 is used for snoop-only clients */
3356 idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
3357
3358 if (ib_register_client(&mad_client)) {
3359 pr_err("Couldn't register ib_mad client\n");
3360 return -EINVAL;
3361 }
3362
3363 return 0;
3364 }
3365
ib_mad_cleanup(void)3366 void ib_mad_cleanup(void)
3367 {
3368 ib_unregister_client(&mad_client);
3369 }
3370