Lines Matching refs:port_priv

63 	struct ib_device *dev = qp_info->port_priv->device;  in create_mad_addr_info()
64 u8 pnum = qp_info->port_priv->port_num; in create_mad_addr_info()
100 struct ib_mad_port_private *port_priv,
112 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
234 struct ib_mad_port_private *port_priv; in ib_register_mad_agent() local
353 port_priv = ib_get_mad_port(device, port_num); in ib_register_mad_agent()
354 if (!port_priv) { in ib_register_mad_agent()
364 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
387 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
394 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
430 spin_lock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
434 class = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
448 vendor = port_priv->version[mad_reg_req-> in ib_register_mad_agent()
467 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
472 spin_unlock_irq(&port_priv->reg_lock); in ib_register_mad_agent()
493 struct ib_mad_port_private *port_priv; in unregister_mad_agent() local
503 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent()
506 spin_lock_irq(&port_priv->reg_lock); in unregister_mad_agent()
508 spin_unlock_irq(&port_priv->reg_lock); in unregister_mad_agent()
511 flush_workqueue(port_priv->wq); in unregister_mad_agent()
584 static size_t port_mad_size(const struct ib_mad_port_private *port_priv) in port_mad_size() argument
586 return rdma_max_mad_size(port_priv->device, port_priv->port_num); in port_mad_size()
608 struct ib_mad_port_private *port_priv; in handle_outgoing_dr_smp() local
614 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp()
617 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp()
618 mad_agent_priv->qp_info->port_priv->port_num); in handle_outgoing_dr_smp()
730 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, in handle_outgoing_dr_smp()
732 if (port_priv) { in handle_outgoing_dr_smp()
734 recv_mad_agent = find_mad_agent(port_priv, in handle_outgoing_dr_smp()
737 if (!port_priv || !recv_mad_agent) { in handle_outgoing_dr_smp()
766 queue_work(mad_agent_priv->qp_info->port_priv->wq, in handle_outgoing_dr_smp()
1289 struct ib_mad_port_private *port_priv; in add_nonoui_reg_req() local
1294 port_priv = agent_priv->qp_info->port_priv; in add_nonoui_reg_req()
1295 class = &port_priv->version[mad_reg_req->mgmt_class_version].class; in add_nonoui_reg_req()
1348 struct ib_mad_port_private *port_priv; in add_oui_reg_req() local
1358 port_priv = agent_priv->qp_info->port_priv; in add_oui_reg_req()
1359 vendor_table = &port_priv->version[ in add_oui_reg_req()
1445 struct ib_mad_port_private *port_priv; in remove_mad_reg_req() local
1461 port_priv = agent_priv->qp_info->port_priv; in remove_mad_reg_req()
1463 class = port_priv->version[ in remove_mad_reg_req()
1481 port_priv->version[ in remove_mad_reg_req()
1494 vendor = port_priv->version[ in remove_mad_reg_req()
1526 port_priv->version[ in remove_mad_reg_req()
1541 find_mad_agent(struct ib_mad_port_private *port_priv, in find_mad_agent() argument
1568 spin_lock_irqsave(&port_priv->reg_lock, flags); in find_mad_agent()
1576 class = port_priv->version[ in find_mad_agent()
1589 vendor = port_priv->version[ in find_mad_agent()
1611 spin_unlock_irqrestore(&port_priv->reg_lock, flags); in find_mad_agent()
1615 dev_notice(&port_priv->device->dev, in find_mad_agent()
1617 &mad_agent->agent, port_priv->port_num); in find_mad_agent()
1867 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, in handle_ib_smi() argument
1880 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
1882 port_priv->device->phys_port_cnt) == in handle_ib_smi()
1892 rdma_cap_ib_switch(port_priv->device), in handle_ib_smi()
1896 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD) in handle_ib_smi()
1898 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_ib_smi()
1907 port_priv->device, in handle_ib_smi()
1954 handle_opa_smi(struct ib_mad_port_private *port_priv, in handle_opa_smi() argument
1967 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
1969 port_priv->device->phys_port_cnt) == in handle_opa_smi()
1979 rdma_cap_ib_switch(port_priv->device), in handle_opa_smi()
1983 if (opa_smi_check_local_smp(smp, port_priv->device) == in handle_opa_smi()
1987 } else if (rdma_cap_ib_switch(port_priv->device)) { in handle_opa_smi()
1997 port_priv->device, in handle_opa_smi()
2010 handle_smi(struct ib_mad_port_private *port_priv, in handle_smi() argument
2022 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, in handle_smi()
2025 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response); in handle_smi()
2030 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_recv_done() local
2043 if (list_empty_careful(&port_priv->port_list)) in ib_mad_recv_done()
2057 opa = rdma_cap_opa_mad(qp_info->port_priv->device, in ib_mad_recv_done()
2058 qp_info->port_priv->port_num); in ib_mad_recv_done()
2063 ib_dma_unmap_single(port_priv->device, in ib_mad_recv_done()
2095 if (rdma_cap_ib_switch(port_priv->device)) in ib_mad_recv_done()
2098 port_num = port_priv->port_num; in ib_mad_recv_done()
2102 if (handle_smi(port_priv, qp_info, wc, port_num, recv, in ib_mad_recv_done()
2109 if (port_priv->device->ops.process_mad) { in ib_mad_recv_done()
2110 ret = port_priv->device->ops.process_mad( in ib_mad_recv_done()
2111 port_priv->device, 0, port_priv->port_num, wc, in ib_mad_recv_done()
2125 port_priv->device, in ib_mad_recv_done()
2134 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad); in ib_mad_recv_done()
2146 port_priv->device, port_num, in ib_mad_recv_done()
2177 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in adjust_timeout()
2212 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, in wait_for_response()
2278 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_send_done() local
2288 if (list_empty_careful(&port_priv->port_list)) in ib_mad_send_done()
2292 if (!ib_mad_send_error(port_priv, wc)) in ib_mad_send_done()
2336 dev_err(&port_priv->device->dev, in ib_mad_send_done()
2361 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv, in ib_mad_send_error() argument
2398 dev_err(&port_priv->device->dev, in ib_mad_send_error()
2522 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in local_completions()
2523 mad_agent_priv->qp_info->port_priv->port_num); in local_completions()
2654 port_priv->wq, in timeout_sends()
2694 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; in ib_mad_post_receive_mads()
2707 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), in ib_mad_post_receive_mads()
2715 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2719 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2741 ib_dma_unmap_single(qp_info->port_priv->device, in ib_mad_post_receive_mads()
2746 dev_err(&qp_info->port_priv->device->dev, in ib_mad_post_receive_mads()
2780 ib_dma_unmap_single(qp_info->port_priv->device, in cleanup_recv_queue()
2793 static int ib_mad_port_start(struct ib_mad_port_private *port_priv) in ib_mad_port_start() argument
2804 ret = ib_find_pkey(port_priv->device, port_priv->port_num, in ib_mad_port_start()
2810 qp = port_priv->qp_info[i].qp; in ib_mad_port_start()
2824 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2833 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2843 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2850 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_port_start()
2852 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2859 if (!port_priv->qp_info[i].qp) in ib_mad_port_start()
2862 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); in ib_mad_port_start()
2864 dev_err(&port_priv->device->dev, in ib_mad_port_start()
2879 dev_err(&qp_info->port_priv->device->dev, in qp_event_handler()
2893 static void init_mad_qp(struct ib_mad_port_private *port_priv, in init_mad_qp() argument
2896 qp_info->port_priv = port_priv; in init_mad_qp()
2909 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp()
2910 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp()
2917 qp_init_attr.port_num = qp_info->port_priv->port_num; in create_mad_qp()
2920 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr); in create_mad_qp()
2922 dev_err(&qp_info->port_priv->device->dev, in create_mad_qp()
2953 struct ib_mad_port_private *port_priv; in ib_mad_port_open() local
2966 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); in ib_mad_port_open()
2967 if (!port_priv) in ib_mad_port_open()
2970 port_priv->device = device; in ib_mad_port_open()
2971 port_priv->port_num = port_num; in ib_mad_port_open()
2972 spin_lock_init(&port_priv->reg_lock); in ib_mad_port_open()
2973 init_mad_qp(port_priv, &port_priv->qp_info[0]); in ib_mad_port_open()
2974 init_mad_qp(port_priv, &port_priv->qp_info[1]); in ib_mad_port_open()
2981 port_priv->pd = ib_alloc_pd(device, 0); in ib_mad_port_open()
2982 if (IS_ERR(port_priv->pd)) { in ib_mad_port_open()
2984 ret = PTR_ERR(port_priv->pd); in ib_mad_port_open()
2988 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, in ib_mad_port_open()
2990 if (IS_ERR(port_priv->cq)) { in ib_mad_port_open()
2992 ret = PTR_ERR(port_priv->cq); in ib_mad_port_open()
2997 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); in ib_mad_port_open()
3001 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); in ib_mad_port_open()
3006 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); in ib_mad_port_open()
3007 if (!port_priv->wq) { in ib_mad_port_open()
3013 list_add_tail(&port_priv->port_list, &ib_mad_port_list); in ib_mad_port_open()
3016 ret = ib_mad_port_start(port_priv); in ib_mad_port_open()
3026 list_del_init(&port_priv->port_list); in ib_mad_port_open()
3029 destroy_workqueue(port_priv->wq); in ib_mad_port_open()
3031 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_open()
3033 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_open()
3035 ib_free_cq(port_priv->cq); in ib_mad_port_open()
3036 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_open()
3037 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_open()
3039 ib_dealloc_pd(port_priv->pd); in ib_mad_port_open()
3041 kfree(port_priv); in ib_mad_port_open()
3053 struct ib_mad_port_private *port_priv; in ib_mad_port_close() local
3057 port_priv = __ib_get_mad_port(device, port_num); in ib_mad_port_close()
3058 if (port_priv == NULL) { in ib_mad_port_close()
3063 list_del_init(&port_priv->port_list); in ib_mad_port_close()
3066 destroy_workqueue(port_priv->wq); in ib_mad_port_close()
3067 destroy_mad_qp(&port_priv->qp_info[1]); in ib_mad_port_close()
3068 destroy_mad_qp(&port_priv->qp_info[0]); in ib_mad_port_close()
3069 ib_free_cq(port_priv->cq); in ib_mad_port_close()
3070 ib_dealloc_pd(port_priv->pd); in ib_mad_port_close()
3071 cleanup_recv_queue(&port_priv->qp_info[1]); in ib_mad_port_close()
3072 cleanup_recv_queue(&port_priv->qp_info[0]); in ib_mad_port_close()
3075 kfree(port_priv); in ib_mad_port_close()