Lines Matching refs:qp_info
80 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
336 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
359 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
366 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
475 static int register_snoop_agent(struct ib_mad_qp_info *qp_info, in register_snoop_agent() argument
482 spin_lock_irqsave(&qp_info->snoop_lock, flags); in register_snoop_agent()
484 for (i = 0; i < qp_info->snoop_table_size; i++) in register_snoop_agent()
485 if (!qp_info->snoop_table[i]) in register_snoop_agent()
488 if (i == qp_info->snoop_table_size) { in register_snoop_agent()
490 new_snoop_table = krealloc(qp_info->snoop_table, in register_snoop_agent()
492 (qp_info->snoop_table_size + 1), in register_snoop_agent()
499 qp_info->snoop_table = new_snoop_table; in register_snoop_agent()
500 qp_info->snoop_table_size++; in register_snoop_agent()
502 qp_info->snoop_table[i] = mad_snoop_priv; in register_snoop_agent()
503 atomic_inc(&qp_info->snoop_count); in register_snoop_agent()
505 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in register_snoop_agent()
547 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_snoop()
552 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_snoop()
564 &port_priv->qp_info[qpn], in ib_register_mad_snoop()
605 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
629 struct ib_mad_qp_info *qp_info; in unregister_mad_snoop() local
632 qp_info = mad_snoop_priv->qp_info; in unregister_mad_snoop()
633 spin_lock_irqsave(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
634 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL; in unregister_mad_snoop()
635 atomic_dec(&qp_info->snoop_count); in unregister_mad_snoop()
636 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in unregister_mad_snoop()
683 static void snoop_send(struct ib_mad_qp_info *qp_info, in snoop_send() argument
692 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
693 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_send()
694 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_send()
700 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
704 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_send()
706 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_send()
709 static void snoop_recv(struct ib_mad_qp_info *qp_info, in snoop_recv() argument
717 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
718 for (i = 0; i < qp_info->snoop_table_size; i++) { in snoop_recv()
719 mad_snoop_priv = qp_info->snoop_table[i]; in snoop_recv()
725 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
729 spin_lock_irqsave(&qp_info->snoop_lock, flags); in snoop_recv()
731 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); in snoop_recv()
797 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
800 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
801 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
945 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1182 struct ib_mad_qp_info *qp_info; in ib_send_mad() local
1190 qp_info = mad_send_wr->mad_agent_priv->qp_info; in ib_send_mad()
1191 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; in ib_send_mad()
1218 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in ib_send_mad()
1219 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { in ib_send_mad()
1222 list = &qp_info->send_queue.list; in ib_send_mad()
1225 list = &qp_info->overflow_list; in ib_send_mad()
1229 qp_info->send_queue.count++; in ib_send_mad()
1232 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in ib_send_mad()
1491 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1555 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1658 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1823 const struct ib_mad_qp_info *qp_info, in validate_mad() argument
1827 u32 qp_num = qp_info->qp->qp_num; in validate_mad()
2065 const struct ib_mad_qp_info *qp_info, in handle_ib_smi() argument
2104 qp_info->qp->qp_num, in handle_ib_smi()
2150 struct ib_mad_qp_info *qp_info, in handle_opa_smi() argument
2192 qp_info->qp->qp_num, in handle_opa_smi()
2204 struct ib_mad_qp_info *qp_info, in handle_smi() argument
2215 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2218 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2226 struct ib_mad_qp_info *qp_info; in ib_mad_recv_done() local
2247 qp_info = mad_list->mad_queue->qp_info; in ib_mad_recv_done()
2250 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done()
2251 qp_info->port_priv->port_num); in ib_mad_recv_done()
2276 if (atomic_read(&qp_info->snoop_count)) in ib_mad_recv_done()
2277 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS); in ib_mad_recv_done()
2280 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa)) in ib_mad_recv_done()
2295 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done()
2322 qp_info->qp->qp_num, in ib_mad_recv_done()
2341 qp_info->qp->qp_num, mad_size, opa); in ib_mad_recv_done()
2347 ib_mad_post_receive_mads(qp_info, response); in ib_mad_recv_done()
2350 ib_mad_post_receive_mads(qp_info, recv); in ib_mad_recv_done()
2371 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2406 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2476 struct ib_mad_qp_info *qp_info; in ib_mad_send_done() local
2493 qp_info = send_queue->qp_info; in ib_mad_send_done()
2508 mad_list = container_of(qp_info->overflow_list.next, in ib_mad_send_done()
2520 if (atomic_read(&qp_info->snoop_count)) in ib_mad_send_done()
2521 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, in ib_mad_send_done()
2526 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr, in ib_mad_send_done()
2538 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info) in mark_sends_for_retry() argument
2544 spin_lock_irqsave(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2545 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) { in mark_sends_for_retry()
2551 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); in mark_sends_for_retry()
2559 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; in ib_mad_send_error() local
2573 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr, in ib_mad_send_error()
2586 ret = ib_modify_qp(qp_info->qp, attr, in ib_mad_send_error()
2594 mark_sends_for_retry(qp_info); in ib_mad_send_error()
2714 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2715 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2762 if (atomic_read(&recv_mad_agent->qp_info->snoop_count)) in local_completions()
2763 snoop_recv(recv_mad_agent->qp_info, in local_completions()
2780 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) in local_completions()
2781 snoop_send(mad_agent_priv->qp_info, in local_completions()
2853 queue_delayed_work(mad_agent_priv->qp_info-> in timeout_sends()
2883 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, in ib_mad_post_receive_mads() argument
2891 struct ib_mad_queue *recv_queue = &qp_info->recv_queue; in ib_mad_post_receive_mads()
2894 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2907 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2915 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2919 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2934 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); in ib_mad_post_receive_mads()
2940 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2945 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2957 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) in cleanup_recv_queue() argument
2963 if (!qp_info->qp) in cleanup_recv_queue()
2966 while (!list_empty(&qp_info->recv_queue.list)) { in cleanup_recv_queue()
2968 mad_list = list_entry(qp_info->recv_queue.list.next, in cleanup_recv_queue()
2979 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2986 qp_info->recv_queue.count = 0; in cleanup_recv_queue()
3009 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
3058 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
3061 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
3075 struct ib_mad_qp_info *qp_info = qp_context; in qp_event_handler() local
3078 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
3080 event->event, qp_info->qp->qp_num); in qp_event_handler()
3083 static void init_mad_queue(struct ib_mad_qp_info *qp_info, in init_mad_queue() argument
3086 mad_queue->qp_info = qp_info; in init_mad_queue()
3093 struct ib_mad_qp_info *qp_info) in init_mad_qp() argument
3095 qp_info->port_priv = port_priv; in init_mad_qp()
3096 init_mad_queue(qp_info, &qp_info->send_queue); in init_mad_qp()
3097 init_mad_queue(qp_info, &qp_info->recv_queue); in init_mad_qp()
3098 INIT_LIST_HEAD(&qp_info->overflow_list); in init_mad_qp()
3099 spin_lock_init(&qp_info->snoop_lock); in init_mad_qp()
3100 qp_info->snoop_table = NULL; in init_mad_qp()
3101 qp_info->snoop_table_size = 0; in init_mad_qp()
3102 atomic_set(&qp_info->snoop_count, 0); in init_mad_qp()
3105 static int create_mad_qp(struct ib_mad_qp_info *qp_info, in create_mad_qp() argument
3112 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
3113 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
3120 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
3121 qp_init_attr.qp_context = qp_info; in create_mad_qp()
3123 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
3124 if (IS_ERR(qp_info->qp)) { in create_mad_qp()
3125 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
3128 ret = PTR_ERR(qp_info->qp); in create_mad_qp()
3132 qp_info->send_queue.max_active = mad_sendq_size; in create_mad_qp()
3133 qp_info->recv_queue.max_active = mad_recvq_size; in create_mad_qp()
3140 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) in destroy_mad_qp() argument
3142 if (!qp_info->qp) in destroy_mad_qp()
3145 ib_destroy_qp(qp_info->qp); in destroy_mad_qp()
3146 kfree(qp_info->snoop_table); in destroy_mad_qp()
3177 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
3178 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
3201 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
3205 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3235 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3237 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3242 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3243 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3271 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3272 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3275 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3276 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()