Lines Matching +full:entry +full:- +full:method
2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
18 * - Redistributions of source code must retain the above
22 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-mapping.h>
60 struct trace_event_raw_ib_mad_send_template *entry) in create_mad_addr_info() argument
63 struct ib_device *dev = qp_info->port_priv->device; in create_mad_addr_info()
64 u8 pnum = qp_info->port_priv->port_num; in create_mad_addr_info()
65 struct ib_ud_wr *wr = &mad_send_wr->send_wr; in create_mad_addr_info()
68 rdma_query_ah(wr->ah, &attr); in create_mad_addr_info()
71 entry->sl = attr.sl; in create_mad_addr_info()
72 ib_query_pkey(dev, pnum, wr->pkey_index, &pkey); in create_mad_addr_info()
73 entry->pkey = pkey; in create_mad_addr_info()
74 entry->rqpn = wr->remote_qpn; in create_mad_addr_info()
75 entry->rqkey = wr->remote_qkey; in create_mad_addr_info()
76 entry->dlid = rdma_ah_get_dlid(&attr); in create_mad_addr_info()
88 /* Client ID 0 is used for snoop-only clients */
97 static int method_in_use(struct ib_mad_mgmt_method_table **method,
124 struct ib_mad_port_private *entry; in __ib_get_mad_port() local
126 list_for_each_entry(entry, &ib_mad_port_list, port_list) { in __ib_get_mad_port()
127 if (entry->device == device && entry->port_num == port_num) in __ib_get_mad_port()
128 return entry; in __ib_get_mad_port()
140 struct ib_mad_port_private *entry; in ib_get_mad_port() local
144 entry = __ib_get_mad_port(device, port_num); in ib_get_mad_port()
147 return entry; in ib_get_mad_port()
166 return -1; in get_spl_qp_index()
172 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START; in vendor_class_index()
194 struct ib_mad_mgmt_method_table *method; in is_vendor_method_in_use() local
198 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) { in is_vendor_method_in_use()
199 method = vendor_class->method_table[i]; in is_vendor_method_in_use()
200 if (method) { in is_vendor_method_in_use()
201 if (method_in_use(&method, mad_reg_req)) in is_vendor_method_in_use()
213 return ((hdr->method & IB_MGMT_METHOD_RESP) || in ib_response_mad()
214 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || in ib_response_mad()
215 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && in ib_response_mad()
216 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); in ib_response_mad()
221 * ib_register_mad_agent - Register to send/receive MADs
236 struct ib_mad_agent *ret = ERR_PTR(-EINVAL); in ib_register_mad_agent()
242 struct ib_mad_mgmt_method_table *method; in ib_register_mad_agent() local
248 return ERR_PTR(-EPROTONOSUPPORT); in ib_register_mad_agent()
252 if (qpn == -1) { in ib_register_mad_agent()
253 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n", in ib_register_mad_agent()
259 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
267 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) { in ib_register_mad_agent()
268 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
271 mad_reg_req->mgmt_class_version); in ib_register_mad_agent()
275 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
279 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { in ib_register_mad_agent()
284 if (mad_reg_req->mgmt_class != in ib_register_mad_agent()
286 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
288 __func__, mad_reg_req->mgmt_class); in ib_register_mad_agent()
291 } else if (mad_reg_req->mgmt_class == 0) { in ib_register_mad_agent()
296 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
300 } else if (is_vendor_class(mad_reg_req->mgmt_class)) { in ib_register_mad_agent()
305 if (!is_vendor_oui(mad_reg_req->oui)) { in ib_register_mad_agent()
306 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
309 mad_reg_req->mgmt_class); in ib_register_mad_agent()
314 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { in ib_register_mad_agent()
316 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
317 "%s: RMPP version for non-RMPP class 0x%x\n", in ib_register_mad_agent()
318 __func__, mad_reg_req->mgmt_class); in ib_register_mad_agent()
325 if ((mad_reg_req->mgmt_class != in ib_register_mad_agent()
327 (mad_reg_req->mgmt_class != in ib_register_mad_agent()
329 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
331 __func__, mad_reg_req->mgmt_class); in ib_register_mad_agent()
335 if ((mad_reg_req->mgmt_class == in ib_register_mad_agent()
337 (mad_reg_req->mgmt_class == in ib_register_mad_agent()
339 dev_dbg_ratelimited(&device->dev, in ib_register_mad_agent()
341 __func__, mad_reg_req->mgmt_class); in ib_register_mad_agent()
356 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n", in ib_register_mad_agent()
358 ret = ERR_PTR(-ENODEV); in ib_register_mad_agent()
365 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
366 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n", in ib_register_mad_agent()
368 ret = ERR_PTR(-EPROTONOSUPPORT); in ib_register_mad_agent()
375 ret = ERR_PTR(-ENOMEM); in ib_register_mad_agent()
382 ret = ERR_PTR(-ENOMEM); in ib_register_mad_agent()
388 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
389 mad_agent_priv->reg_req = reg_req; in ib_register_mad_agent()
390 mad_agent_priv->agent.rmpp_version = rmpp_version; in ib_register_mad_agent()
391 mad_agent_priv->agent.device = device; in ib_register_mad_agent()
392 mad_agent_priv->agent.recv_handler = recv_handler; in ib_register_mad_agent()
393 mad_agent_priv->agent.send_handler = send_handler; in ib_register_mad_agent()
394 mad_agent_priv->agent.context = context; in ib_register_mad_agent()
395 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
396 mad_agent_priv->agent.port_num = port_num; in ib_register_mad_agent()
397 mad_agent_priv->agent.flags = registration_flags; in ib_register_mad_agent()
398 spin_lock_init(&mad_agent_priv->lock); in ib_register_mad_agent()
399 INIT_LIST_HEAD(&mad_agent_priv->send_list); in ib_register_mad_agent()
400 INIT_LIST_HEAD(&mad_agent_priv->wait_list); in ib_register_mad_agent()
401 INIT_LIST_HEAD(&mad_agent_priv->done_list); in ib_register_mad_agent()
402 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); in ib_register_mad_agent()
403 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); in ib_register_mad_agent()
404 INIT_LIST_HEAD(&mad_agent_priv->local_list); in ib_register_mad_agent()
405 INIT_WORK(&mad_agent_priv->local_work, local_completions); in ib_register_mad_agent()
406 atomic_set(&mad_agent_priv->refcount, 1); in ib_register_mad_agent()
407 init_completion(&mad_agent_priv->comp); in ib_register_mad_agent()
409 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); in ib_register_mad_agent()
419 ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid, in ib_register_mad_agent()
420 mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1), in ib_register_mad_agent()
431 spin_lock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
433 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); in ib_register_mad_agent()
435 class = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
438 method = class->method_table[mgmt_class]; in ib_register_mad_agent()
439 if (method) { in ib_register_mad_agent()
440 if (method_in_use(&method, in ib_register_mad_agent()
449 vendor = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
453 vendor_class = vendor->vendor_class[vclass]; in ib_register_mad_agent()
468 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
471 return &mad_agent_priv->agent; in ib_register_mad_agent()
473 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
474 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); in ib_register_mad_agent()
476 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); in ib_register_mad_agent()
509 spin_lock_irqsave(&qp_info->snoop_lock, flags); in register_snoop_agent()
511 for (i = 0; i < qp_info->snoop_table_size; i++) in register_snoop_agent()
512 if (!qp_info->snoop_table[i]) in register_snoop_agent()
515 if (i == qp_info->snoop_table_size) { in register_snoop_agent()
517 new_snoop_table = krealloc(qp_info->snoop_table, in register_snoop_agent()
519 (qp_info->snoop_table_size + 1), in register_snoop_agent()
522 i = -ENOMEM; in register_snoop_agent()
526 qp_info->snoop_table = new_snoop_table; in register_snoop_agent()
527 qp_info->snoop_table_size++; in register_snoop_agent()
529 qp_info->snoop_table[i] = mad_snoop_priv; in register_snoop_agent()
530 atomic_inc(&qp_info->snoop_count); in register_snoop_agent()
532 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in register_snoop_agent()
553 ret = ERR_PTR(-EINVAL); in ib_register_mad_snoop()
557 if (qpn == -1) { in ib_register_mad_snoop()
558 ret = ERR_PTR(-EINVAL); in ib_register_mad_snoop()
563 ret = ERR_PTR(-ENODEV); in ib_register_mad_snoop()
569 ret = ERR_PTR(-ENOMEM); in ib_register_mad_snoop()
574 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop()
575 mad_snoop_priv->agent.device = device; in ib_register_mad_snoop()
576 mad_snoop_priv->agent.recv_handler = recv_handler; in ib_register_mad_snoop()
577 mad_snoop_priv->agent.snoop_handler = snoop_handler; in ib_register_mad_snoop()
578 mad_snoop_priv->agent.context = context; in ib_register_mad_snoop()
579 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop()
580 mad_snoop_priv->agent.port_num = port_num; in ib_register_mad_snoop()
581 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; in ib_register_mad_snoop()
582 init_completion(&mad_snoop_priv->comp); in ib_register_mad_snoop()
584 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); in ib_register_mad_snoop()
590 mad_snoop_priv->snoop_index = register_snoop_agent( in ib_register_mad_snoop()
591 &port_priv->qp_info[qpn], in ib_register_mad_snoop()
593 if (mad_snoop_priv->snoop_index < 0) { in ib_register_mad_snoop()
594 ret = ERR_PTR(mad_snoop_priv->snoop_index); in ib_register_mad_snoop()
598 atomic_set(&mad_snoop_priv->refcount, 1); in ib_register_mad_snoop()
599 return &mad_snoop_priv->agent; in ib_register_mad_snoop()
601 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); in ib_register_mad_snoop()
611 if (atomic_dec_and_test(&mad_agent_priv->refcount)) in deref_mad_agent()
612 complete(&mad_agent_priv->comp); in deref_mad_agent()
617 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) in deref_snoop_agent()
618 complete(&mad_snoop_priv->comp); in deref_snoop_agent()
633 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
634 cancel_delayed_work(&mad_agent_priv->timed_work); in unregister_mad_agent()
636 spin_lock_irq(&port_priv->reg_lock); in unregister_mad_agent()
638 spin_unlock_irq(&port_priv->reg_lock); in unregister_mad_agent()
639 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid); in unregister_mad_agent()
641 flush_workqueue(port_priv->wq); in unregister_mad_agent()
645 wait_for_completion(&mad_agent_priv->comp); in unregister_mad_agent()
647 ib_mad_agent_security_cleanup(&mad_agent_priv->agent); in unregister_mad_agent()
649 kfree(mad_agent_priv->reg_req); in unregister_mad_agent()
658 qp_info = mad_snoop_priv->qp_info; in unregister_mad_snoop()
659 spin_lock_irqsave(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
660 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; in unregister_mad_snoop()
661 atomic_dec(&qp_info->snoop_count); in unregister_mad_snoop()
662 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
665 wait_for_completion(&mad_snoop_priv->comp); in unregister_mad_snoop()
667 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); in unregister_mad_snoop()
673 * ib_unregister_mad_agent - Unregisters a client from using MAD services
683 if (mad_agent->hi_tid) { in ib_unregister_mad_agent()
702 mad_queue = mad_list->mad_queue; in dequeue_mad()
703 spin_lock_irqsave(&mad_queue->lock, flags); in dequeue_mad()
704 list_del(&mad_list->list); in dequeue_mad()
705 mad_queue->count--; in dequeue_mad()
706 spin_unlock_irqrestore(&mad_queue->lock, flags); in dequeue_mad()
718 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
719 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_send()
720 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_send()
722 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) in snoop_send()
725 atomic_inc(&mad_snoop_priv->refcount); in snoop_send()
726 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
727 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, in snoop_send()
730 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
732 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
743 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
744 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_recv()
745 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_recv()
747 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags)) in snoop_recv()
750 atomic_inc(&mad_snoop_priv->refcount); in snoop_recv()
751 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
752 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL, in snoop_recv()
755 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
757 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
764 wc->wr_cqe = cqe; in build_smp_wc()
765 wc->status = IB_WC_SUCCESS; in build_smp_wc()
766 wc->opcode = IB_WC_RECV; in build_smp_wc()
767 wc->pkey_index = pkey_index; in build_smp_wc()
768 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh); in build_smp_wc()
769 wc->src_qp = IB_QP0; in build_smp_wc()
770 wc->qp = qp; in build_smp_wc()
771 wc->slid = slid; in build_smp_wc()
772 wc->sl = 0; in build_smp_wc()
773 wc->dlid_path_bits = 0; in build_smp_wc()
774 wc->port_num = port_num; in build_smp_wc()
779 return sizeof(struct ib_mad_private) + mp->mad_size; in mad_priv_size()
788 ret->mad_size = mad_size; in alloc_mad_private()
795 return rdma_max_mad_size(port_priv->device, port_priv->port_num); in port_mad_size()
800 return sizeof(struct ib_grh) + mp->mad_size; in mad_priv_dma_size()
812 struct ib_smp *smp = mad_send_wr->send_buf.mad; in handle_outgoing_dr_smp()
819 struct ib_device *device = mad_agent_priv->agent.device; in handle_outgoing_dr_smp()
822 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr; in handle_outgoing_dr_smp()
823 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
826 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
827 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
830 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) in handle_outgoing_dr_smp()
831 port_num = send_wr->port_num; in handle_outgoing_dr_smp()
833 port_num = mad_agent_priv->agent.port_num; in handle_outgoing_dr_smp()
841 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { in handle_outgoing_dr_smp()
847 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == in handle_outgoing_dr_smp()
852 ret = -EINVAL; in handle_outgoing_dr_smp()
853 dev_err(&device->dev, "OPA Invalid directed route\n"); in handle_outgoing_dr_smp()
856 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); in handle_outgoing_dr_smp()
859 ret = -EINVAL; in handle_outgoing_dr_smp()
860 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n", in handle_outgoing_dr_smp()
873 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) == in handle_outgoing_dr_smp()
877 ret = -EINVAL; in handle_outgoing_dr_smp()
878 dev_err(&device->dev, "Invalid directed route\n"); in handle_outgoing_dr_smp()
881 drslid = be16_to_cpu(smp->dr_slid); in handle_outgoing_dr_smp()
891 ret = -ENOMEM; in handle_outgoing_dr_smp()
894 local->mad_priv = NULL; in handle_outgoing_dr_smp()
895 local->recv_mad_agent = NULL; in handle_outgoing_dr_smp()
898 ret = -ENOMEM; in handle_outgoing_dr_smp()
903 build_smp_wc(mad_agent_priv->agent.qp, in handle_outgoing_dr_smp()
904 send_wr->wr.wr_cqe, drslid, in handle_outgoing_dr_smp()
905 send_wr->pkey_index, in handle_outgoing_dr_smp()
906 send_wr->port_num, &mad_wc); in handle_outgoing_dr_smp()
908 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) { in handle_outgoing_dr_smp()
909 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len in handle_outgoing_dr_smp()
910 + mad_send_wr->send_buf.data_len in handle_outgoing_dr_smp()
915 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, in handle_outgoing_dr_smp()
917 (struct ib_mad_hdr *)mad_priv->mad, in handle_outgoing_dr_smp()
922 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) && in handle_outgoing_dr_smp()
923 mad_agent_priv->agent.recv_handler) { in handle_outgoing_dr_smp()
924 local->mad_priv = mad_priv; in handle_outgoing_dr_smp()
925 local->recv_mad_agent = mad_agent_priv; in handle_outgoing_dr_smp()
930 atomic_inc(&mad_agent_priv->refcount); in handle_outgoing_dr_smp()
939 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, in handle_outgoing_dr_smp()
940 mad_agent_priv->agent.port_num); in handle_outgoing_dr_smp()
942 memcpy(mad_priv->mad, smp, mad_priv->mad_size); in handle_outgoing_dr_smp()
944 (const struct ib_mad_hdr *)mad_priv->mad); in handle_outgoing_dr_smp()
954 local->mad_priv = mad_priv; in handle_outgoing_dr_smp()
955 local->recv_mad_agent = recv_mad_agent; in handle_outgoing_dr_smp()
960 ret = -EINVAL; in handle_outgoing_dr_smp()
964 local->mad_send_wr = mad_send_wr; in handle_outgoing_dr_smp()
966 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index; in handle_outgoing_dr_smp()
967 local->return_wc_byte_len = mad_size; in handle_outgoing_dr_smp()
970 atomic_inc(&mad_agent_priv->refcount); in handle_outgoing_dr_smp()
972 spin_lock_irqsave(&mad_agent_priv->lock, flags); in handle_outgoing_dr_smp()
973 list_add_tail(&local->completion_list, &mad_agent_priv->local_list); in handle_outgoing_dr_smp()
974 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in handle_outgoing_dr_smp()
975 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
976 &mad_agent_priv->local_work); in handle_outgoing_dr_smp()
986 seg_size = mad_size - hdr_len; in get_pad_size()
988 pad = seg_size - data_len % seg_size; in get_pad_size()
998 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) { in free_send_rmpp_list()
999 list_del(&s->list); in free_send_rmpp_list()
1007 struct ib_mad_send_buf *send_buf = &send_wr->send_buf; in alloc_send_rmpp_list()
1008 struct ib_rmpp_mad *rmpp_mad = send_buf->mad; in alloc_send_rmpp_list()
1012 send_buf->seg_size = mad_size - send_buf->hdr_len; in alloc_send_rmpp_list()
1013 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR; in alloc_send_rmpp_list()
1014 seg_size = send_buf->seg_size; in alloc_send_rmpp_list()
1015 pad = send_wr->pad; in alloc_send_rmpp_list()
1018 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) { in alloc_send_rmpp_list()
1022 return -ENOMEM; in alloc_send_rmpp_list()
1024 seg->num = ++send_buf->seg_count; in alloc_send_rmpp_list()
1025 list_add_tail(&seg->list, &send_wr->rmpp_list); in alloc_send_rmpp_list()
1030 memset(seg->data + seg_size - pad, 0, pad); in alloc_send_rmpp_list()
1032 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv-> in alloc_send_rmpp_list()
1034 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; in alloc_send_rmpp_list()
1035 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); in alloc_send_rmpp_list()
1037 send_wr->cur_seg = container_of(send_wr->rmpp_list.next, in alloc_send_rmpp_list()
1039 send_wr->last_ack_seg = send_wr->cur_seg; in alloc_send_rmpp_list()
1045 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); in ib_mad_kernel_rmpp_agent()
1066 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num); in ib_create_send_mad()
1078 return ERR_PTR(-EINVAL); in ib_create_send_mad()
1081 return ERR_PTR(-EINVAL); in ib_create_send_mad()
1086 return ERR_PTR(-ENOMEM); in ib_create_send_mad()
1089 INIT_LIST_HEAD(&mad_send_wr->rmpp_list); in ib_create_send_mad()
1090 mad_send_wr->send_buf.mad = buf; in ib_create_send_mad()
1091 mad_send_wr->send_buf.hdr_len = hdr_len; in ib_create_send_mad()
1092 mad_send_wr->send_buf.data_len = data_len; in ib_create_send_mad()
1093 mad_send_wr->pad = pad; in ib_create_send_mad()
1095 mad_send_wr->mad_agent_priv = mad_agent_priv; in ib_create_send_mad()
1096 mad_send_wr->sg_list[0].length = hdr_len; in ib_create_send_mad()
1097 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad()
1101 data_len < mad_size - hdr_len) in ib_create_send_mad()
1102 mad_send_wr->sg_list[1].length = data_len; in ib_create_send_mad()
1104 mad_send_wr->sg_list[1].length = mad_size - hdr_len; in ib_create_send_mad()
1106 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey; in ib_create_send_mad()
1108 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; in ib_create_send_mad()
1110 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_create_send_mad()
1111 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list; in ib_create_send_mad()
1112 mad_send_wr->send_wr.wr.num_sge = 2; in ib_create_send_mad()
1113 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND; in ib_create_send_mad()
1114 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED; in ib_create_send_mad()
1115 mad_send_wr->send_wr.remote_qpn = remote_qpn; in ib_create_send_mad()
1116 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY; in ib_create_send_mad()
1117 mad_send_wr->send_wr.pkey_index = pkey_index; in ib_create_send_mad()
1127 mad_send_wr->send_buf.mad_agent = mad_agent; in ib_create_send_mad()
1128 atomic_inc(&mad_agent_priv->refcount); in ib_create_send_mad()
1129 return &mad_send_wr->send_buf; in ib_create_send_mad()
1169 list = &mad_send_wr->cur_seg->list; in ib_get_rmpp_segment()
1171 if (mad_send_wr->cur_seg->num < seg_num) { in ib_get_rmpp_segment()
1172 list_for_each_entry(mad_send_wr->cur_seg, list, list) in ib_get_rmpp_segment()
1173 if (mad_send_wr->cur_seg->num == seg_num) in ib_get_rmpp_segment()
1175 } else if (mad_send_wr->cur_seg->num > seg_num) { in ib_get_rmpp_segment()
1176 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list) in ib_get_rmpp_segment()
1177 if (mad_send_wr->cur_seg->num == seg_num) in ib_get_rmpp_segment()
1180 return mad_send_wr->cur_seg->data; in ib_get_rmpp_segment()
1186 if (mad_send_wr->send_buf.seg_count) in ib_get_payload()
1187 return ib_get_rmpp_segment(&mad_send_wr->send_buf, in ib_get_payload()
1188 mad_send_wr->seg_num); in ib_get_payload()
1190 return mad_send_wr->send_buf.mad + in ib_get_payload()
1191 mad_send_wr->send_buf.hdr_len; in ib_get_payload()
1199 mad_agent_priv = container_of(send_buf->mad_agent, in ib_free_send_mad()
1205 kfree(send_buf->mad); in ib_free_send_mad()
1220 qp_info = mad_send_wr->mad_agent_priv->qp_info; in ib_send_mad()
1221 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; in ib_send_mad()
1222 mad_send_wr->mad_list.cqe.done = ib_mad_send_done; in ib_send_mad()
1223 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_send_mad()
1225 mad_agent = mad_send_wr->send_buf.mad_agent; in ib_send_mad()
1226 sge = mad_send_wr->sg_list; in ib_send_mad()
1227 sge[0].addr = ib_dma_map_single(mad_agent->device, in ib_send_mad()
1228 mad_send_wr->send_buf.mad, in ib_send_mad()
1231 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) in ib_send_mad()
1232 return -ENOMEM; in ib_send_mad()
1234 mad_send_wr->header_mapping = sge[0].addr; in ib_send_mad()
1236 sge[1].addr = ib_dma_map_single(mad_agent->device, in ib_send_mad()
1240 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { in ib_send_mad()
1241 ib_dma_unmap_single(mad_agent->device, in ib_send_mad()
1242 mad_send_wr->header_mapping, in ib_send_mad()
1244 return -ENOMEM; in ib_send_mad()
1246 mad_send_wr->payload_mapping = sge[1].addr; in ib_send_mad()
1248 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in ib_send_mad()
1249 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { in ib_send_mad()
1251 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr, in ib_send_mad()
1253 list = &qp_info->send_queue.list; in ib_send_mad()
1256 list = &qp_info->overflow_list; in ib_send_mad()
1260 qp_info->send_queue.count++; in ib_send_mad()
1261 list_add_tail(&mad_send_wr->mad_list.list, list); in ib_send_mad()
1263 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in ib_send_mad()
1265 ib_dma_unmap_single(mad_agent->device, in ib_send_mad()
1266 mad_send_wr->header_mapping, in ib_send_mad()
1268 ib_dma_unmap_single(mad_agent->device, in ib_send_mad()
1269 mad_send_wr->payload_mapping, in ib_send_mad()
1276 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1286 int ret = -EINVAL; in ib_post_send_mad()
1293 mad_agent_priv = mad_send_wr->mad_agent_priv; in ib_post_send_mad()
1296 mad_send_wr->send_wr.pkey_index); in ib_post_send_mad()
1300 if (!send_buf->mad_agent->send_handler || in ib_post_send_mad()
1301 (send_buf->timeout_ms && in ib_post_send_mad()
1302 !send_buf->mad_agent->recv_handler)) { in ib_post_send_mad()
1303 ret = -EINVAL; in ib_post_send_mad()
1307 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { in ib_post_send_mad()
1308 if (mad_agent_priv->agent.rmpp_version) { in ib_post_send_mad()
1309 ret = -EINVAL; in ib_post_send_mad()
1319 next_send_buf = send_buf->next; in ib_post_send_mad()
1320 mad_send_wr->send_wr.ah = send_buf->ah; in ib_post_send_mad()
1322 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == in ib_post_send_mad()
1332 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; in ib_post_send_mad()
1334 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); in ib_post_send_mad()
1335 mad_send_wr->max_retries = send_buf->retries; in ib_post_send_mad()
1336 mad_send_wr->retries_left = send_buf->retries; in ib_post_send_mad()
1337 send_buf->retries = 0; in ib_post_send_mad()
1339 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); in ib_post_send_mad()
1340 mad_send_wr->status = IB_WC_SUCCESS; in ib_post_send_mad()
1343 atomic_inc(&mad_agent_priv->refcount); in ib_post_send_mad()
1344 spin_lock_irqsave(&mad_agent_priv->lock, flags); in ib_post_send_mad()
1345 list_add_tail(&mad_send_wr->agent_list, in ib_post_send_mad()
1346 &mad_agent_priv->send_list); in ib_post_send_mad()
1347 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_post_send_mad()
1349 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { in ib_post_send_mad()
1357 spin_lock_irqsave(&mad_agent_priv->lock, flags); in ib_post_send_mad()
1358 list_del(&mad_send_wr->agent_list); in ib_post_send_mad()
1359 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_post_send_mad()
1360 atomic_dec(&mad_agent_priv->refcount); in ib_post_send_mad()
1373 * ib_free_recv_mad - Returns data buffers used to receive
1384 list_splice_init(&mad_recv_wc->rmpp_list, &free_list); in ib_free_recv_mad()
1406 return ERR_PTR(-EINVAL); /* XXX: for now */ in ib_redirect_mad_qp()
1413 dev_err(&mad_agent->device->dev, in ib_process_mad_wc()
1419 static int method_in_use(struct ib_mad_mgmt_method_table **method, in method_in_use() argument
1424 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) { in method_in_use()
1425 if ((*method)->agent[i]) { in method_in_use()
1426 pr_err("Method %d already in use\n", i); in method_in_use()
1427 return -EINVAL; in method_in_use()
1433 static int allocate_method_table(struct ib_mad_mgmt_method_table **method) in allocate_method_table() argument
1435 /* Allocate management method table */ in allocate_method_table()
1436 *method = kzalloc(sizeof **method, GFP_ATOMIC); in allocate_method_table()
1437 return (*method) ? 0 : (-ENOMEM); in allocate_method_table()
1443 static int check_method_table(struct ib_mad_mgmt_method_table *method) in check_method_table() argument
1448 if (method->agent[i]) in check_method_table()
1454 * Check to see if there are any method tables for this class still in use
1461 if (class->method_table[i]) in check_class_table()
1471 if (vendor_class->method_table[i]) in check_vendor_class()
1483 if (!memcmp(vendor_class->oui[i], oui, 3)) in find_vendor_oui()
1486 return -1; in find_vendor_oui()
1494 if (vendor->vendor_class[i]) in check_vendor_table()
1500 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method, in remove_methods_mad_agent() argument
1507 if (method->agent[i] == agent) { in remove_methods_mad_agent()
1508 method->agent[i] = NULL; in remove_methods_mad_agent()
1519 struct ib_mad_mgmt_method_table **method; in add_nonoui_reg_req() local
1522 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1523 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; in add_nonoui_reg_req()
1528 ret = -ENOMEM; in add_nonoui_reg_req()
1532 /* Allocate method table for this management class */ in add_nonoui_reg_req()
1533 method = &(*class)->method_table[mgmt_class]; in add_nonoui_reg_req()
1534 if ((ret = allocate_method_table(method))) in add_nonoui_reg_req()
1537 method = &(*class)->method_table[mgmt_class]; in add_nonoui_reg_req()
1538 if (!*method) { in add_nonoui_reg_req()
1539 /* Allocate method table for this management class */ in add_nonoui_reg_req()
1540 if ((ret = allocate_method_table(method))) in add_nonoui_reg_req()
1546 if (method_in_use(method, mad_reg_req)) in add_nonoui_reg_req()
1550 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) in add_nonoui_reg_req()
1551 (*method)->agent[i] = agent_priv; in add_nonoui_reg_req()
1557 remove_methods_mad_agent(*method, agent_priv); in add_nonoui_reg_req()
1559 if (!check_method_table(*method)) { in add_nonoui_reg_req()
1560 /* If not, release management method table */ in add_nonoui_reg_req()
1561 kfree(*method); in add_nonoui_reg_req()
1562 *method = NULL; in add_nonoui_reg_req()
1564 ret = -EINVAL; in add_nonoui_reg_req()
1580 struct ib_mad_mgmt_method_table **method; in add_oui_reg_req() local
1581 int i, ret = -ENOMEM; in add_oui_reg_req()
1585 vclass = vendor_class_index(mad_reg_req->mgmt_class); in add_oui_reg_req()
1586 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1587 vendor_table = &port_priv->version[ in add_oui_reg_req()
1588 mad_reg_req->mgmt_class_version].vendor; in add_oui_reg_req()
1597 if (!(*vendor_table)->vendor_class[vclass]) { in add_oui_reg_req()
1603 (*vendor_table)->vendor_class[vclass] = vendor_class; in add_oui_reg_req()
1607 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i], in add_oui_reg_req()
1608 mad_reg_req->oui, 3)) { in add_oui_reg_req()
1609 method = &(*vendor_table)->vendor_class[ in add_oui_reg_req()
1610 vclass]->method_table[i]; in add_oui_reg_req()
1611 if (!*method) in add_oui_reg_req()
1618 if (!is_vendor_oui((*vendor_table)->vendor_class[ in add_oui_reg_req()
1619 vclass]->oui[i])) { in add_oui_reg_req()
1620 method = &(*vendor_table)->vendor_class[ in add_oui_reg_req()
1621 vclass]->method_table[i]; in add_oui_reg_req()
1622 /* Allocate method table for this OUI */ in add_oui_reg_req()
1623 if (!*method) { in add_oui_reg_req()
1624 ret = allocate_method_table(method); in add_oui_reg_req()
1628 memcpy((*vendor_table)->vendor_class[vclass]->oui[i], in add_oui_reg_req()
1629 mad_reg_req->oui, 3); in add_oui_reg_req()
1633 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n"); in add_oui_reg_req()
1638 if (method_in_use(method, mad_reg_req)) in add_oui_reg_req()
1642 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) in add_oui_reg_req()
1643 (*method)->agent[i] = agent_priv; in add_oui_reg_req()
1649 remove_methods_mad_agent(*method, agent_priv); in add_oui_reg_req()
1651 if (!check_method_table(*method)) { in add_oui_reg_req()
1652 /* If not, release management method table */ in add_oui_reg_req()
1653 kfree(*method); in add_oui_reg_req()
1654 *method = NULL; in add_oui_reg_req()
1656 ret = -EINVAL; in add_oui_reg_req()
1659 (*vendor_table)->vendor_class[vclass] = NULL; in add_oui_reg_req()
1675 struct ib_mad_mgmt_method_table *method; in remove_mad_reg_req() local
1685 if (!agent_priv->reg_req) { in remove_mad_reg_req()
1689 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1690 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class); in remove_mad_reg_req()
1691 class = port_priv->version[ in remove_mad_reg_req()
1692 agent_priv->reg_req->mgmt_class_version].class; in remove_mad_reg_req()
1696 method = class->method_table[mgmt_class]; in remove_mad_reg_req()
1697 if (method) { in remove_mad_reg_req()
1699 remove_methods_mad_agent(method, agent_priv); in remove_mad_reg_req()
1701 if (!check_method_table(method)) { in remove_mad_reg_req()
1702 /* If not, release management method table */ in remove_mad_reg_req()
1703 kfree(method); in remove_mad_reg_req()
1704 class->method_table[mgmt_class] = NULL; in remove_mad_reg_req()
1709 port_priv->version[ in remove_mad_reg_req()
1710 agent_priv->reg_req-> in remove_mad_reg_req()
1721 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class); in remove_mad_reg_req()
1722 vendor = port_priv->version[ in remove_mad_reg_req()
1723 agent_priv->reg_req->mgmt_class_version].vendor; in remove_mad_reg_req()
1728 vendor_class = vendor->vendor_class[mgmt_class]; in remove_mad_reg_req()
1730 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui); in remove_mad_reg_req()
1733 method = vendor_class->method_table[index]; in remove_mad_reg_req()
1734 if (method) { in remove_mad_reg_req()
1736 remove_methods_mad_agent(method, agent_priv); in remove_mad_reg_req()
1741 if (!check_method_table(method)) { in remove_mad_reg_req()
1742 /* If not, release management method table */ in remove_mad_reg_req()
1743 kfree(method); in remove_mad_reg_req()
1744 vendor_class->method_table[index] = NULL; in remove_mad_reg_req()
1745 memset(vendor_class->oui[index], 0, 3); in remove_mad_reg_req()
1750 vendor->vendor_class[mgmt_class] = NULL; in remove_mad_reg_req()
1754 port_priv->version[ in remove_mad_reg_req()
1755 agent_priv->reg_req-> in remove_mad_reg_req()
1782 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; in find_mad_agent()
1785 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount)) in find_mad_agent()
1790 struct ib_mad_mgmt_method_table *method; in find_mad_agent() local
1796 spin_lock_irqsave(&port_priv->reg_lock, flags); in find_mad_agent()
1798 * Routing is based on version, class, and method in find_mad_agent()
1801 if (mad_hdr->class_version >= MAX_MGMT_VERSION) in find_mad_agent()
1803 if (!is_vendor_class(mad_hdr->mgmt_class)) { in find_mad_agent()
1804 class = port_priv->version[ in find_mad_agent()
1805 mad_hdr->class_version].class; in find_mad_agent()
1808 if (convert_mgmt_class(mad_hdr->mgmt_class) >= in find_mad_agent()
1809 ARRAY_SIZE(class->method_table)) in find_mad_agent()
1811 method = class->method_table[convert_mgmt_class( in find_mad_agent()
1812 mad_hdr->mgmt_class)]; in find_mad_agent()
1813 if (method) in find_mad_agent()
1814 mad_agent = method->agent[mad_hdr->method & in find_mad_agent()
1817 vendor = port_priv->version[ in find_mad_agent()
1818 mad_hdr->class_version].vendor; in find_mad_agent()
1821 vendor_class = vendor->vendor_class[vendor_class_index( in find_mad_agent()
1822 mad_hdr->mgmt_class)]; in find_mad_agent()
1827 index = find_vendor_oui(vendor_class, vendor_mad->oui); in find_mad_agent()
1828 if (index == -1) in find_mad_agent()
1830 method = vendor_class->method_table[index]; in find_mad_agent()
1831 if (method) { in find_mad_agent()
1832 mad_agent = method->agent[mad_hdr->method & in find_mad_agent()
1837 atomic_inc(&mad_agent->refcount); in find_mad_agent()
1839 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in find_mad_agent()
1842 if (mad_agent && !mad_agent->agent.recv_handler) { in find_mad_agent()
1843 dev_notice(&port_priv->device->dev, in find_mad_agent()
1845 &mad_agent->agent, port_priv->port_num); in find_mad_agent()
1858 u32 qp_num = qp_info->qp->qp_num; in validate_mad()
1861 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION && in validate_mad()
1862 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) { in validate_mad()
1864 mad_hdr->base_version, opa ? "(opa)" : ""); in validate_mad()
1869 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) || in validate_mad()
1870 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) { in validate_mad()
1874 /* CM attributes other than ClassPortInfo only use Send method */ in validate_mad()
1875 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) && in validate_mad()
1876 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) && in validate_mad()
1877 (mad_hdr->method != IB_MGMT_METHOD_SEND)) in validate_mad()
1894 return !mad_agent_priv->agent.rmpp_version || in is_rmpp_data_mad()
1895 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) || in is_rmpp_data_mad()
1896 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & in is_rmpp_data_mad()
1898 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); in is_rmpp_data_mad()
1904 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class == in rcv_has_same_class()
1905 rwc->recv_buf.mad->mad_hdr.mgmt_class; in rcv_has_same_class()
1915 struct ib_device *device = mad_agent_priv->agent.device; in rcv_has_same_gid()
1916 u8 port_num = mad_agent_priv->agent.port_num; in rcv_has_same_gid()
1920 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); in rcv_has_same_gid()
1921 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); in rcv_has_same_gid()
1927 if (rdma_query_ah(wr->send_buf.ah, &attr)) in rcv_has_same_gid()
1932 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH)) in rcv_has_same_gid()
1942 rwc->wc->dlid_path_bits) & in rcv_has_same_gid()
1943 ((1 << lmc) - 1))); in rcv_has_same_gid()
1949 grh->sgid_index, &sgid)) in rcv_has_same_gid()
1951 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw, in rcv_has_same_gid()
1957 return rdma_ah_get_dlid(&attr) == rwc->wc->slid; in rcv_has_same_gid()
1959 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw, in rcv_has_same_gid()
1960 rwc->recv_buf.grh->sgid.raw, in rcv_has_same_gid()
1976 mad_hdr = &wc->recv_buf.mad->mad_hdr; in ib_find_send_mad()
1978 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) { in ib_find_send_mad()
1979 if ((wr->tid == mad_hdr->tid) && in ib_find_send_mad()
1985 (is_direct(mad_hdr->mgmt_class) || in ib_find_send_mad()
1987 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; in ib_find_send_mad()
1994 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) { in ib_find_send_mad()
1995 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) && in ib_find_send_mad()
1996 wr->tid == mad_hdr->tid && in ib_find_send_mad()
1997 wr->timeout && in ib_find_send_mad()
2003 (is_direct(mad_hdr->mgmt_class) || in ib_find_send_mad()
2006 return (wr->status == IB_WC_SUCCESS) ? wr : NULL; in ib_find_send_mad()
2013 mad_send_wr->timeout = 0; in ib_mark_mad_done()
2014 if (mad_send_wr->refcount == 1) in ib_mark_mad_done()
2015 list_move_tail(&mad_send_wr->agent_list, in ib_mark_mad_done()
2016 &mad_send_wr->mad_agent_priv->done_list); in ib_mark_mad_done()
2027 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); in ib_mad_complete_recv()
2029 mad_recv_wc->wc->pkey_index); in ib_mad_complete_recv()
2036 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); in ib_mad_complete_recv()
2037 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { in ib_mad_complete_recv()
2047 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { in ib_mad_complete_recv()
2048 spin_lock_irqsave(&mad_agent_priv->lock, flags); in ib_mad_complete_recv()
2051 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_mad_complete_recv()
2052 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) in ib_mad_complete_recv()
2053 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class) in ib_mad_complete_recv()
2054 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) in ib_mad_complete_recv()
2059 mad_agent_priv->agent.recv_handler( in ib_mad_complete_recv()
2060 &mad_agent_priv->agent, NULL, in ib_mad_complete_recv()
2062 atomic_dec(&mad_agent_priv->refcount); in ib_mad_complete_recv()
2072 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_mad_complete_recv()
2075 mad_agent_priv->agent.recv_handler( in ib_mad_complete_recv()
2076 &mad_agent_priv->agent, in ib_mad_complete_recv()
2077 &mad_send_wr->send_buf, in ib_mad_complete_recv()
2079 atomic_dec(&mad_agent_priv->refcount); in ib_mad_complete_recv()
2083 mad_send_wc.send_buf = &mad_send_wr->send_buf; in ib_mad_complete_recv()
2087 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL, in ib_mad_complete_recv()
2103 struct ib_smp *smp = (struct ib_smp *)recv->mad; in handle_ib_smi()
2108 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
2110 port_priv->device->phys_port_cnt) == in handle_ib_smi()
2120 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
2124 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) in handle_ib_smi()
2126 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_ib_smi()
2129 response->header.recv_wc.wc = &response->header.wc; in handle_ib_smi()
2130 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; in handle_ib_smi()
2131 response->header.recv_wc.recv_buf.grh = &response->grh; in handle_ib_smi()
2133 agent_send_response((const struct ib_mad_hdr *)response->mad, in handle_ib_smi()
2134 &response->grh, wc, in handle_ib_smi()
2135 port_priv->device, in handle_ib_smi()
2137 qp_info->qp->qp_num, in handle_ib_smi()
2138 response->mad_size, in handle_ib_smi()
2150 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad; in generate_unmatched_resp()
2151 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad; in generate_unmatched_resp()
2153 if (recv_hdr->method == IB_MGMT_METHOD_GET || in generate_unmatched_resp()
2154 recv_hdr->method == IB_MGMT_METHOD_SET) { in generate_unmatched_resp()
2156 response->header.recv_wc.wc = &response->header.wc; in generate_unmatched_resp()
2157 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad; in generate_unmatched_resp()
2158 response->header.recv_wc.recv_buf.grh = &response->grh; in generate_unmatched_resp()
2159 resp_hdr->method = IB_MGMT_METHOD_GET_RESP; in generate_unmatched_resp()
2160 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); in generate_unmatched_resp()
2161 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) in generate_unmatched_resp()
2162 resp_hdr->status |= IB_SMP_DIRECTION; in generate_unmatched_resp()
2164 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) { in generate_unmatched_resp()
2165 if (recv_hdr->mgmt_class == in generate_unmatched_resp()
2167 recv_hdr->mgmt_class == in generate_unmatched_resp()
2170 (struct opa_smp *)recv->mad); in generate_unmatched_resp()
2190 struct opa_smp *smp = (struct opa_smp *)recv->mad; in handle_opa_smi()
2195 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
2197 port_priv->device->phys_port_cnt) == in handle_opa_smi()
2207 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
2211 if (opa_smi_check_local_smp(smp, port_priv->device) == in handle_opa_smi()
2215 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_opa_smi()
2218 response->header.recv_wc.wc = &response->header.wc; in handle_opa_smi()
2219 response->header.recv_wc.recv_buf.opa_mad = in handle_opa_smi()
2220 (struct opa_mad *)response->mad; in handle_opa_smi()
2221 response->header.recv_wc.recv_buf.grh = &response->grh; in handle_opa_smi()
2223 agent_send_response((const struct ib_mad_hdr *)response->mad, in handle_opa_smi()
2224 &response->grh, wc, in handle_opa_smi()
2225 port_priv->device, in handle_opa_smi()
2227 qp_info->qp->qp_num, in handle_opa_smi()
2228 recv->header.wc.byte_len, in handle_opa_smi()
2246 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; in handle_smi()
2248 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && in handle_smi()
2249 mad_hdr->class_version == OPA_SM_CLASS_VERSION) in handle_smi()
2258 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_recv_done()
2260 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_recv_done()
2271 if (list_empty_careful(&port_priv->port_list)) in ib_mad_recv_done()
2274 if (wc->status != IB_WC_SUCCESS) { in ib_mad_recv_done()
2277 * state - error handling/shutdown code will cleanup in ib_mad_recv_done()
2282 qp_info = mad_list->mad_queue->qp_info; in ib_mad_recv_done()
2285 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done()
2286 qp_info->port_priv->port_num); in ib_mad_recv_done()
2291 ib_dma_unmap_single(port_priv->device, in ib_mad_recv_done()
2292 recv->header.mapping, in ib_mad_recv_done()
2297 recv->header.wc = *wc; in ib_mad_recv_done()
2298 recv->header.recv_wc.wc = &recv->header.wc; in ib_mad_recv_done()
2300 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) { in ib_mad_recv_done()
2301 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh); in ib_mad_recv_done()
2302 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); in ib_mad_recv_done()
2304 recv->header.recv_wc.mad_len = sizeof(struct ib_mad); in ib_mad_recv_done()
2305 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); in ib_mad_recv_done()
2308 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad; in ib_mad_recv_done()
2309 recv->header.recv_wc.recv_buf.grh = &recv->grh; in ib_mad_recv_done()
2311 if (atomic_read(&qp_info->snoop_count)) in ib_mad_recv_done()
2312 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); in ib_mad_recv_done()
2315 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) in ib_mad_recv_done()
2319 (struct ib_mad_hdr *)recv->mad); in ib_mad_recv_done()
2321 mad_size = recv->mad_size; in ib_mad_recv_done()
2326 if (rdma_cap_ib_switch(port_priv->device)) in ib_mad_recv_done()
2327 port_num = wc->port_num; in ib_mad_recv_done()
2329 port_num = port_priv->port_num; in ib_mad_recv_done()
2331 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class == in ib_mad_recv_done()
2340 if (port_priv->device->ops.process_mad) { in ib_mad_recv_done()
2341 ret = port_priv->device->ops.process_mad( in ib_mad_recv_done()
2342 port_priv->device, 0, port_priv->port_num, wc, in ib_mad_recv_done()
2343 &recv->grh, (const struct ib_mad_hdr *)recv->mad, in ib_mad_recv_done()
2344 recv->mad_size, (struct ib_mad_hdr *)response->mad, in ib_mad_recv_done()
2348 wc->pkey_index = resp_mad_pkey_index; in ib_mad_recv_done()
2354 agent_send_response((const struct ib_mad_hdr *)response->mad, in ib_mad_recv_done()
2355 &recv->grh, wc, in ib_mad_recv_done()
2356 port_priv->device, in ib_mad_recv_done()
2358 qp_info->qp->qp_num, in ib_mad_recv_done()
2365 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); in ib_mad_recv_done()
2368 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc); in ib_mad_recv_done()
2376 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc, in ib_mad_recv_done()
2377 port_priv->device, port_num, in ib_mad_recv_done()
2378 qp_info->qp->qp_num, mad_size, opa); in ib_mad_recv_done()
2395 if (list_empty(&mad_agent_priv->wait_list)) { in adjust_timeout()
2396 cancel_delayed_work(&mad_agent_priv->timed_work); in adjust_timeout()
2398 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, in adjust_timeout()
2402 if (time_after(mad_agent_priv->timeout, in adjust_timeout()
2403 mad_send_wr->timeout)) { in adjust_timeout()
2404 mad_agent_priv->timeout = mad_send_wr->timeout; in adjust_timeout()
2405 delay = mad_send_wr->timeout - jiffies; in adjust_timeout()
2408 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2409 &mad_agent_priv->timed_work, delay); in adjust_timeout()
2421 mad_agent_priv = mad_send_wr->mad_agent_priv; in wait_for_response()
2422 list_del(&mad_send_wr->agent_list); in wait_for_response()
2424 delay = mad_send_wr->timeout; in wait_for_response()
2425 mad_send_wr->timeout += jiffies; in wait_for_response()
2428 list_for_each_prev(list_item, &mad_agent_priv->wait_list) { in wait_for_response()
2432 if (time_after(mad_send_wr->timeout, in wait_for_response()
2433 temp_mad_send_wr->timeout)) in wait_for_response()
2438 list_item = &mad_agent_priv->wait_list; in wait_for_response()
2439 list_add(&mad_send_wr->agent_list, list_item); in wait_for_response()
2442 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) in wait_for_response()
2443 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2444 &mad_agent_priv->timed_work, delay); in wait_for_response()
2450 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); in ib_reset_mad_timeout()
2464 mad_agent_priv = mad_send_wr->mad_agent_priv; in ib_mad_complete_send_wr()
2465 spin_lock_irqsave(&mad_agent_priv->lock, flags); in ib_mad_complete_send_wr()
2466 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) { in ib_mad_complete_send_wr()
2473 if (mad_send_wc->status != IB_WC_SUCCESS && in ib_mad_complete_send_wr()
2474 mad_send_wr->status == IB_WC_SUCCESS) { in ib_mad_complete_send_wr()
2475 mad_send_wr->status = mad_send_wc->status; in ib_mad_complete_send_wr()
2476 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); in ib_mad_complete_send_wr()
2479 if (--mad_send_wr->refcount > 0) { in ib_mad_complete_send_wr()
2480 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout && in ib_mad_complete_send_wr()
2481 mad_send_wr->status == IB_WC_SUCCESS) { in ib_mad_complete_send_wr()
2488 list_del(&mad_send_wr->agent_list); in ib_mad_complete_send_wr()
2490 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_mad_complete_send_wr()
2492 if (mad_send_wr->status != IB_WC_SUCCESS ) in ib_mad_complete_send_wr()
2493 mad_send_wc->status = mad_send_wr->status; in ib_mad_complete_send_wr()
2497 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, in ib_mad_complete_send_wr()
2504 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_mad_complete_send_wr()
2509 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_send_done()
2511 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_done()
2519 if (list_empty_careful(&port_priv->port_list)) in ib_mad_send_done()
2522 if (wc->status != IB_WC_SUCCESS) { in ib_mad_send_done()
2529 send_queue = mad_list->mad_queue; in ib_mad_send_done()
2530 qp_info = send_queue->qp_info; in ib_mad_send_done()
2532 trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv); in ib_mad_send_done()
2536 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, in ib_mad_send_done()
2537 mad_send_wr->header_mapping, in ib_mad_send_done()
2538 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); in ib_mad_send_done()
2539 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, in ib_mad_send_done()
2540 mad_send_wr->payload_mapping, in ib_mad_send_done()
2541 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); in ib_mad_send_done()
2543 spin_lock_irqsave(&send_queue->lock, flags); in ib_mad_send_done()
2544 list_del(&mad_list->list); in ib_mad_send_done()
2547 if (send_queue->count-- > send_queue->max_active) { in ib_mad_send_done()
2548 mad_list = container_of(qp_info->overflow_list.next, in ib_mad_send_done()
2553 list_move_tail(&mad_list->list, &send_queue->list); in ib_mad_send_done()
2555 spin_unlock_irqrestore(&send_queue->lock, flags); in ib_mad_send_done()
2557 mad_send_wc.send_buf = &mad_send_wr->send_buf; in ib_mad_send_done()
2558 mad_send_wc.status = wc->status; in ib_mad_send_done()
2559 mad_send_wc.vendor_err = wc->vendor_err; in ib_mad_send_done()
2560 if (atomic_read(&qp_info->snoop_count)) in ib_mad_send_done()
2561 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, in ib_mad_send_done()
2567 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, in ib_mad_send_done()
2570 dev_err(&port_priv->device->dev, in ib_mad_send_done()
2573 wc->status = IB_WC_LOC_QP_OP_ERR; in ib_mad_send_done()
2585 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2586 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { in mark_sends_for_retry()
2590 mad_send_wr->retry = 1; in mark_sends_for_retry()
2592 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2599 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_error()
2600 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; in ib_mad_send_error()
2605 * Send errors will transition the QP to SQE - move in ib_mad_send_error()
2610 if (wc->status == IB_WC_WR_FLUSH_ERR) { in ib_mad_send_error()
2611 if (mad_send_wr->retry) { in ib_mad_send_error()
2613 mad_send_wr->retry = 0; in ib_mad_send_error()
2615 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, in ib_mad_send_error()
2626 attr->qp_state = IB_QPS_RTS; in ib_mad_send_error()
2627 attr->cur_qp_state = IB_QPS_SQE; in ib_mad_send_error()
2628 ret = ib_modify_qp(qp_info->qp, attr, in ib_mad_send_error()
2632 dev_err(&port_priv->device->dev, in ib_mad_send_error()
2633 "%s - ib_modify_qp to RTS: %d\n", in ib_mad_send_error()
2652 spin_lock_irqsave(&mad_agent_priv->lock, flags); in cancel_mads()
2654 &mad_agent_priv->send_list, agent_list) { in cancel_mads()
2655 if (mad_send_wr->status == IB_WC_SUCCESS) { in cancel_mads()
2656 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; in cancel_mads()
2657 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); in cancel_mads()
2662 list_splice_init(&mad_agent_priv->wait_list, &cancel_list); in cancel_mads()
2663 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in cancel_mads()
2671 mad_send_wc.send_buf = &mad_send_wr->send_buf; in cancel_mads()
2672 list_del(&mad_send_wr->agent_list); in cancel_mads()
2673 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, in cancel_mads()
2675 atomic_dec(&mad_agent_priv->refcount); in cancel_mads()
2685 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, in find_send_wr()
2687 if (&mad_send_wr->send_buf == send_buf) in find_send_wr()
2691 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, in find_send_wr()
2694 mad_send_wr->send_buf.mad) && in find_send_wr()
2695 &mad_send_wr->send_buf == send_buf) in find_send_wr()
2711 spin_lock_irqsave(&mad_agent_priv->lock, flags); in ib_modify_mad()
2713 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { in ib_modify_mad()
2714 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_modify_mad()
2715 return -EINVAL; in ib_modify_mad()
2718 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); in ib_modify_mad()
2720 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; in ib_modify_mad()
2721 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); in ib_modify_mad()
2724 mad_send_wr->send_buf.timeout_ms = timeout_ms; in ib_modify_mad()
2726 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); in ib_modify_mad()
2730 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in ib_modify_mad()
2756 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2757 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2759 spin_lock_irqsave(&mad_agent_priv->lock, flags); in local_completions()
2760 while (!list_empty(&mad_agent_priv->local_list)) { in local_completions()
2761 local = list_entry(mad_agent_priv->local_list.next, in local_completions()
2764 list_del(&local->completion_list); in local_completions()
2765 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in local_completions()
2767 if (local->mad_priv) { in local_completions()
2769 recv_mad_agent = local->recv_mad_agent; in local_completions()
2771 dev_err(&mad_agent_priv->agent.device->dev, in local_completions()
2781 build_smp_wc(recv_mad_agent->agent.qp, in local_completions()
2782 local->mad_send_wr->send_wr.wr.wr_cqe, in local_completions()
2784 local->mad_send_wr->send_wr.pkey_index, in local_completions()
2785 recv_mad_agent->agent.port_num, &wc); in local_completions()
2787 local->mad_priv->header.recv_wc.wc = &wc; in local_completions()
2789 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version; in local_completions()
2791 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len; in local_completions()
2792 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad); in local_completions()
2794 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad); in local_completions()
2795 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad); in local_completions()
2798 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); in local_completions()
2799 list_add(&local->mad_priv->header.recv_wc.recv_buf.list, in local_completions()
2800 &local->mad_priv->header.recv_wc.rmpp_list); in local_completions()
2801 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; in local_completions()
2802 local->mad_priv->header.recv_wc.recv_buf.mad = in local_completions()
2803 (struct ib_mad *)local->mad_priv->mad; in local_completions()
2804 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) in local_completions()
2805 snoop_recv(recv_mad_agent->qp_info, in local_completions()
2806 &local->mad_priv->header.recv_wc, in local_completions()
2808 recv_mad_agent->agent.recv_handler( in local_completions()
2809 &recv_mad_agent->agent, in local_completions()
2810 &local->mad_send_wr->send_buf, in local_completions()
2811 &local->mad_priv->header.recv_wc); in local_completions()
2812 spin_lock_irqsave(&recv_mad_agent->lock, flags); in local_completions()
2813 atomic_dec(&recv_mad_agent->refcount); in local_completions()
2814 spin_unlock_irqrestore(&recv_mad_agent->lock, flags); in local_completions()
2821 mad_send_wc.send_buf = &local->mad_send_wr->send_buf; in local_completions()
2822 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) in local_completions()
2823 snoop_send(mad_agent_priv->qp_info, in local_completions()
2824 &local->mad_send_wr->send_buf, in local_completions()
2826 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, in local_completions()
2829 spin_lock_irqsave(&mad_agent_priv->lock, flags); in local_completions()
2830 atomic_dec(&mad_agent_priv->refcount); in local_completions()
2832 kfree(local->mad_priv); in local_completions()
2835 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in local_completions()
2842 if (!mad_send_wr->retries_left) in retry_send()
2843 return -ETIMEDOUT; in retry_send()
2845 mad_send_wr->retries_left--; in retry_send()
2846 mad_send_wr->send_buf.retries++; in retry_send()
2848 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); in retry_send()
2850 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) { in retry_send()
2860 ret = -ECOMM; in retry_send()
2867 mad_send_wr->refcount++; in retry_send()
2868 list_add_tail(&mad_send_wr->agent_list, in retry_send()
2869 &mad_send_wr->mad_agent_priv->send_list); in retry_send()
2885 spin_lock_irqsave(&mad_agent_priv->lock, flags); in timeout_sends()
2886 while (!list_empty(&mad_agent_priv->wait_list)) { in timeout_sends()
2887 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, in timeout_sends()
2891 if (time_after(mad_send_wr->timeout, jiffies)) { in timeout_sends()
2892 delay = mad_send_wr->timeout - jiffies; in timeout_sends()
2895 queue_delayed_work(mad_agent_priv->qp_info-> in timeout_sends()
2896 port_priv->wq, in timeout_sends()
2897 &mad_agent_priv->timed_work, delay); in timeout_sends()
2901 list_del(&mad_send_wr->agent_list); in timeout_sends()
2902 if (mad_send_wr->status == IB_WC_SUCCESS && in timeout_sends()
2906 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in timeout_sends()
2908 if (mad_send_wr->status == IB_WC_SUCCESS) in timeout_sends()
2911 mad_send_wc.status = mad_send_wr->status; in timeout_sends()
2912 mad_send_wc.send_buf = &mad_send_wr->send_buf; in timeout_sends()
2913 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, in timeout_sends()
2916 atomic_dec(&mad_agent_priv->refcount); in timeout_sends()
2917 spin_lock_irqsave(&mad_agent_priv->lock, flags); in timeout_sends()
2919 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); in timeout_sends()
2933 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; in ib_mad_post_receive_mads()
2936 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2949 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2952 ret = -ENOMEM; in ib_mad_post_receive_mads()
2957 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2958 &mad_priv->grh, in ib_mad_post_receive_mads()
2961 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2963 ret = -ENOMEM; in ib_mad_post_receive_mads()
2966 mad_priv->header.mapping = sg_list.addr; in ib_mad_post_receive_mads()
2967 mad_priv->header.mad_list.mad_queue = recv_queue; in ib_mad_post_receive_mads()
2968 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; in ib_mad_post_receive_mads()
2969 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; in ib_mad_post_receive_mads()
2972 spin_lock_irqsave(&recv_queue->lock, flags); in ib_mad_post_receive_mads()
2973 post = (++recv_queue->count < recv_queue->max_active); in ib_mad_post_receive_mads()
2974 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); in ib_mad_post_receive_mads()
2975 spin_unlock_irqrestore(&recv_queue->lock, flags); in ib_mad_post_receive_mads()
2976 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); in ib_mad_post_receive_mads()
2978 spin_lock_irqsave(&recv_queue->lock, flags); in ib_mad_post_receive_mads()
2979 list_del(&mad_priv->header.mad_list.list); in ib_mad_post_receive_mads()
2980 recv_queue->count--; in ib_mad_post_receive_mads()
2981 spin_unlock_irqrestore(&recv_queue->lock, flags); in ib_mad_post_receive_mads()
2982 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2983 mad_priv->header.mapping, in ib_mad_post_receive_mads()
2987 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
3005 if (!qp_info->qp) in cleanup_recv_queue()
3008 while (!list_empty(&qp_info->recv_queue.list)) { in cleanup_recv_queue()
3010 mad_list = list_entry(qp_info->recv_queue.list.next, in cleanup_recv_queue()
3019 list_del(&mad_list->list); in cleanup_recv_queue()
3021 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
3022 recv->header.mapping, in cleanup_recv_queue()
3028 qp_info->recv_queue.count = 0; in cleanup_recv_queue()
3043 return -ENOMEM; in ib_mad_port_start()
3045 ret = ib_find_pkey(port_priv->device, port_priv->port_num, in ib_mad_port_start()
3051 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
3059 attr->qp_state = IB_QPS_INIT; in ib_mad_port_start()
3060 attr->pkey_index = pkey_index; in ib_mad_port_start()
3061 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY; in ib_mad_port_start()
3065 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3071 attr->qp_state = IB_QPS_RTR; in ib_mad_port_start()
3074 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3080 attr->qp_state = IB_QPS_RTS; in ib_mad_port_start()
3081 attr->sq_psn = IB_MAD_SEND_Q_PSN; in ib_mad_port_start()
3084 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3091 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_port_start()
3093 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3100 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
3103 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
3105 dev_err(&port_priv->device->dev, in ib_mad_port_start()
3120 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
3122 event->event, qp_info->qp->qp_num); in qp_event_handler()
3128 mad_queue->qp_info = qp_info; in init_mad_queue()
3129 mad_queue->count = 0; in init_mad_queue()
3130 spin_lock_init(&mad_queue->lock); in init_mad_queue()
3131 INIT_LIST_HEAD(&mad_queue->list); in init_mad_queue()
3137 qp_info->port_priv = port_priv; in init_mad_qp()
3138 init_mad_queue(qp_info, &qp_info->send_queue); in init_mad_qp()
3139 init_mad_queue(qp_info, &qp_info->recv_queue); in init_mad_qp()
3140 INIT_LIST_HEAD(&qp_info->overflow_list); in init_mad_qp()
3141 spin_lock_init(&qp_info->snoop_lock); in init_mad_qp()
3142 qp_info->snoop_table = NULL; in init_mad_qp()
3143 qp_info->snoop_table_size = 0; in init_mad_qp()
3144 atomic_set(&qp_info->snoop_count, 0); in init_mad_qp()
3154 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
3155 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
3162 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
3165 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
3166 if (IS_ERR(qp_info->qp)) { in create_mad_qp()
3167 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
3170 ret = PTR_ERR(qp_info->qp); in create_mad_qp()
3174 qp_info->send_queue.max_active = mad_sendq_size; in create_mad_qp()
3175 qp_info->recv_queue.max_active = mad_recvq_size; in create_mad_qp()
3184 if (!qp_info->qp) in destroy_mad_qp()
3187 ib_destroy_qp(qp_info->qp); in destroy_mad_qp()
3188 kfree(qp_info->snoop_table); in destroy_mad_qp()
3205 return -EFAULT; in ib_mad_port_open()
3209 return -EFAULT; in ib_mad_port_open()
3214 return -ENOMEM; in ib_mad_port_open()
3216 port_priv->device = device; in ib_mad_port_open()
3217 port_priv->port_num = port_num; in ib_mad_port_open()
3218 spin_lock_init(&port_priv->reg_lock); in ib_mad_port_open()
3219 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
3220 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
3227 port_priv->pd = ib_alloc_pd(device, 0); in ib_mad_port_open()
3228 if (IS_ERR(port_priv->pd)) { in ib_mad_port_open()
3229 dev_err(&device->dev, "Couldn't create ib_mad PD\n"); in ib_mad_port_open()
3230 ret = PTR_ERR(port_priv->pd); in ib_mad_port_open()
3234 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, in ib_mad_port_open()
3236 if (IS_ERR(port_priv->cq)) { in ib_mad_port_open()
3237 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); in ib_mad_port_open()
3238 ret = PTR_ERR(port_priv->cq); in ib_mad_port_open()
3243 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
3247 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3252 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in ib_mad_port_open()
3253 if (!port_priv->wq) { in ib_mad_port_open()
3254 ret = -ENOMEM; in ib_mad_port_open()
3259 list_add_tail(&port_priv->port_list, &ib_mad_port_list); in ib_mad_port_open()
3264 dev_err(&device->dev, "Couldn't start port\n"); in ib_mad_port_open()
3272 list_del_init(&port_priv->port_list); in ib_mad_port_open()
3275 destroy_workqueue(port_priv->wq); in ib_mad_port_open()
3277 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3279 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3281 ib_free_cq(port_priv->cq); in ib_mad_port_open()
3282 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3283 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3285 ib_dealloc_pd(port_priv->pd); in ib_mad_port_open()
3306 dev_err(&device->dev, "Port %d not found\n", port_num); in ib_mad_port_close()
3307 return -ENODEV; in ib_mad_port_close()
3309 list_del_init(&port_priv->port_list); in ib_mad_port_close()
3312 destroy_workqueue(port_priv->wq); in ib_mad_port_close()
3313 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3314 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3315 ib_free_cq(port_priv->cq); in ib_mad_port_close()
3316 ib_dealloc_pd(port_priv->pd); in ib_mad_port_close()
3317 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3318 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()
3337 dev_err(&device->dev, "Couldn't open port %d\n", i); in ib_mad_init_device()
3341 dev_err(&device->dev, in ib_mad_init_device()
3350 dev_err(&device->dev, "Couldn't close port %d\n", i); in ib_mad_init_device()
3353 while (--i >= start) { in ib_mad_init_device()
3358 dev_err(&device->dev, in ib_mad_init_device()
3361 dev_err(&device->dev, "Couldn't close port %d\n", i); in ib_mad_init_device()
3374 dev_err(&device->dev, in ib_mad_remove_device()
3377 dev_err(&device->dev, "Couldn't close port %d\n", i); in ib_mad_remove_device()
3399 return -EINVAL; in ib_mad_init()