Lines Matching refs:qp_info

59 			  struct ib_mad_qp_info *qp_info,  in create_mad_addr_info()  argument
63 struct ib_device *dev = qp_info->port_priv->device; in create_mad_addr_info()
64 u8 pnum = qp_info->port_priv->port_num; in create_mad_addr_info()
103 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
365 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
388 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
395 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
502 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, in register_snoop_agent() argument
509 spin_lock_irqsave(&qp_info->snoop_lock, flags); in register_snoop_agent()
511 for (i = 0; i < qp_info->snoop_table_size; i++) in register_snoop_agent()
512 if (!qp_info->snoop_table[i]) in register_snoop_agent()
515 if (i == qp_info->snoop_table_size) { in register_snoop_agent()
517 new_snoop_table = krealloc(qp_info->snoop_table, in register_snoop_agent()
519 (qp_info->snoop_table_size + 1), in register_snoop_agent()
526 qp_info->snoop_table = new_snoop_table; in register_snoop_agent()
527 qp_info->snoop_table_size++; in register_snoop_agent()
529 qp_info->snoop_table[i] = mad_snoop_priv; in register_snoop_agent()
530 atomic_inc(&qp_info->snoop_count); in register_snoop_agent()
532 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in register_snoop_agent()
574 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop()
579 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop()
591 &port_priv->qp_info[qpn], in ib_register_mad_snoop()
633 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
655 struct ib_mad_qp_info *qp_info; in unregister_mad_snoop() local
658 qp_info = mad_snoop_priv->qp_info; in unregister_mad_snoop()
659 spin_lock_irqsave(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
660 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; in unregister_mad_snoop()
661 atomic_dec(&qp_info->snoop_count); in unregister_mad_snoop()
662 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
709 static void snoop_send(struct ib_mad_qp_info *qp_info, in snoop_send() argument
718 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
719 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_send()
720 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_send()
726 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
730 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
732 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
735 static void snoop_recv(struct ib_mad_qp_info *qp_info, in snoop_recv() argument
743 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
744 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_recv()
745 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_recv()
751 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
755 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
757 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
823 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
826 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
827 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
975 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1212 struct ib_mad_qp_info *qp_info; in ib_send_mad() local
1220 qp_info = mad_send_wr->mad_agent_priv->qp_info; in ib_send_mad()
1221 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; in ib_send_mad()
1248 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in ib_send_mad()
1249 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { in ib_send_mad()
1250 trace_ib_mad_ib_send_mad(mad_send_wr, qp_info); in ib_send_mad()
1253 list = &qp_info->send_queue.list; in ib_send_mad()
1256 list = &qp_info->overflow_list; in ib_send_mad()
1260 qp_info->send_queue.count++; in ib_send_mad()
1263 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in ib_send_mad()
1522 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1586 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1689 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1854 const struct ib_mad_qp_info *qp_info, in validate_mad() argument
1858 u32 qp_num = qp_info->qp->qp_num; in validate_mad()
2096 const struct ib_mad_qp_info *qp_info, in handle_ib_smi() argument
2137 qp_info->qp->qp_num, in handle_ib_smi()
2183 struct ib_mad_qp_info *qp_info, in handle_opa_smi() argument
2227 qp_info->qp->qp_num, in handle_opa_smi()
2239 struct ib_mad_qp_info *qp_info, in handle_smi() argument
2250 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2253 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2261 struct ib_mad_qp_info *qp_info; in ib_mad_recv_done() local
2282 qp_info = mad_list->mad_queue->qp_info; in ib_mad_recv_done()
2285 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done()
2286 qp_info->port_priv->port_num); in ib_mad_recv_done()
2311 if (atomic_read(&qp_info->snoop_count)) in ib_mad_recv_done()
2312 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); in ib_mad_recv_done()
2315 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) in ib_mad_recv_done()
2318 trace_ib_mad_recv_done_handler(qp_info, wc, in ib_mad_recv_done()
2333 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done()
2358 qp_info->qp->qp_num, in ib_mad_recv_done()
2378 qp_info->qp->qp_num, mad_size, opa); in ib_mad_recv_done()
2384 ib_mad_post_receive_mads(qp_info, response); in ib_mad_recv_done()
2387 ib_mad_post_receive_mads(qp_info, recv); in ib_mad_recv_done()
2408 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2443 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2513 struct ib_mad_qp_info *qp_info; in ib_mad_send_done() local
2530 qp_info = send_queue->qp_info; in ib_mad_send_done()
2548 mad_list = container_of(qp_info->overflow_list.next, in ib_mad_send_done()
2560 if (atomic_read(&qp_info->snoop_count)) in ib_mad_send_done()
2561 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, in ib_mad_send_done()
2566 trace_ib_mad_send_done_resend(queued_send_wr, qp_info); in ib_mad_send_done()
2567 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, in ib_mad_send_done()
2579 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) in mark_sends_for_retry() argument
2585 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2586 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { in mark_sends_for_retry()
2592 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2600 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; in ib_mad_send_error() local
2614 trace_ib_mad_error_handler(mad_send_wr, qp_info); in ib_mad_send_error()
2615 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, in ib_mad_send_error()
2628 ret = ib_modify_qp(qp_info->qp, attr, in ib_mad_send_error()
2636 mark_sends_for_retry(qp_info); in ib_mad_send_error()
2756 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2757 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2804 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) in local_completions()
2805 snoop_recv(recv_mad_agent->qp_info, in local_completions()
2822 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) in local_completions()
2823 snoop_send(mad_agent_priv->qp_info, in local_completions()
2895 queue_delayed_work(mad_agent_priv->qp_info-> in timeout_sends()
2925 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, in ib_mad_post_receive_mads() argument
2933 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; in ib_mad_post_receive_mads()
2936 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2949 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2957 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2961 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2976 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); in ib_mad_post_receive_mads()
2982 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2987 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2999 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) in cleanup_recv_queue() argument
3005 if (!qp_info->qp) in cleanup_recv_queue()
3008 while (!list_empty(&qp_info->recv_queue.list)) { in cleanup_recv_queue()
3010 mad_list = list_entry(qp_info->recv_queue.list.next, in cleanup_recv_queue()
3021 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
3028 qp_info->recv_queue.count = 0; in cleanup_recv_queue()
3051 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
3100 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
3103 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
3117 struct ib_mad_qp_info *qp_info = qp_context; in qp_event_handler() local
3120 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
3122 event->event, qp_info->qp->qp_num); in qp_event_handler()
3125 static void init_mad_queue(struct ib_mad_qp_info *qp_info, in init_mad_queue() argument
3128 mad_queue->qp_info = qp_info; in init_mad_queue()
3135 struct ib_mad_qp_info *qp_info) in init_mad_qp() argument
3137 qp_info->port_priv = port_priv; in init_mad_qp()
3138 init_mad_queue(qp_info, &qp_info->send_queue); in init_mad_qp()
3139 init_mad_queue(qp_info, &qp_info->recv_queue); in init_mad_qp()
3140 INIT_LIST_HEAD(&qp_info->overflow_list); in init_mad_qp()
3141 spin_lock_init(&qp_info->snoop_lock); in init_mad_qp()
3142 qp_info->snoop_table = NULL; in init_mad_qp()
3143 qp_info->snoop_table_size = 0; in init_mad_qp()
3144 atomic_set(&qp_info->snoop_count, 0); in init_mad_qp()
3147 static int create_mad_qp(struct ib_mad_qp_info *qp_info, in create_mad_qp() argument
3154 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
3155 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
3162 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
3163 qp_init_attr.qp_context = qp_info; in create_mad_qp()
3165 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
3166 if (IS_ERR(qp_info->qp)) { in create_mad_qp()
3167 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
3170 ret = PTR_ERR(qp_info->qp); in create_mad_qp()
3174 qp_info->send_queue.max_active = mad_sendq_size; in create_mad_qp()
3175 qp_info->recv_queue.max_active = mad_recvq_size; in create_mad_qp()
3182 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) in destroy_mad_qp() argument
3184 if (!qp_info->qp) in destroy_mad_qp()
3187 ib_destroy_qp(qp_info->qp); in destroy_mad_qp()
3188 kfree(qp_info->snoop_table); in destroy_mad_qp()
3219 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
3220 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
3243 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
3247 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3277 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3279 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3282 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3283 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3313 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3314 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3317 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3318 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()