Lines Matching refs:ibdev
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
134 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_get_netdev() local
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num); in mlx4_ib_get_netdev()
141 if (mlx4_is_bonded(ibdev->dev)) { in mlx4_ib_get_netdev()
162 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids_v1() argument
167 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1()
195 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids_v1_v2() argument
200 struct mlx4_dev *dev = ibdev->dev; in mlx4_ib_update_gids_v1_v2()
240 struct mlx4_ib_dev *ibdev, in mlx4_ib_update_gids() argument
243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) in mlx4_ib_update_gids()
244 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); in mlx4_ib_update_gids()
246 return mlx4_ib_update_gids_v1(gids, ibdev, port_num); in mlx4_ib_update_gids()
251 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_add_gid() local
252 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_add_gid()
319 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); in mlx4_ib_add_gid()
329 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_del_gid() local
330 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_del_gid()
376 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); in mlx4_ib_del_gid()
382 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, in mlx4_ib_gid_index_to_real_index() argument
385 struct mlx4_ib_iboe *iboe = &ibdev->iboe; in mlx4_ib_gid_index_to_real_index()
396 if (mlx4_is_bonded(ibdev->dev)) in mlx4_ib_gid_index_to_real_index()
399 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) in mlx4_ib_gid_index_to_real_index()
421 static int mlx4_ib_query_device(struct ib_device *ibdev, in mlx4_ib_query_device() argument
425 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_query_device()
460 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, in mlx4_ib_query_device()
550 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET || in mlx4_ib_query_device()
551 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) { in mlx4_ib_query_device()
612 ((mlx4_ib_port_link_layer(ibdev, 1) == in mlx4_ib_query_device()
614 (mlx4_ib_port_link_layer(ibdev, 2) == in mlx4_ib_query_device()
645 static int ib_link_query_port(struct ib_device *ibdev, u8 port, in ib_link_query_port() argument
663 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in ib_link_query_port()
666 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in ib_link_query_port()
682 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; in ib_link_query_port()
683 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; in ib_link_query_port()
684 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; in ib_link_query_port()
715 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, in ib_link_query_port()
740 static int eth_link_query_port(struct ib_device *ibdev, u8 port, in eth_link_query_port() argument
744 struct mlx4_ib_dev *mdev = to_mdev(ibdev); in eth_link_query_port()
800 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port, in __mlx4_ib_query_port() argument
807 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? in __mlx4_ib_query_port()
808 ib_link_query_port(ibdev, port, props, netw_view) : in __mlx4_ib_query_port()
809 eth_link_query_port(ibdev, port, props); in __mlx4_ib_query_port()
814 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, in mlx4_ib_query_port() argument
818 return __mlx4_ib_query_port(ibdev, port, props, 0); in mlx4_ib_query_port()
821 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, in __mlx4_ib_query_gid() argument
827 struct mlx4_ib_dev *dev = to_mdev(ibdev); in __mlx4_ib_query_gid()
877 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, in mlx4_ib_query_gid() argument
880 if (rdma_protocol_ib(ibdev, port)) in mlx4_ib_query_gid()
881 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0); in mlx4_ib_query_gid()
885 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl) in mlx4_ib_query_sl2vl() argument
894 if (mlx4_is_slave(to_mdev(ibdev)->dev)) { in mlx4_ib_query_sl2vl()
908 if (mlx4_is_mfunc(to_mdev(ibdev)->dev)) in mlx4_ib_query_sl2vl()
911 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in mlx4_ib_query_sl2vl()
945 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, in __mlx4_ib_query_pkey() argument
962 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in __mlx4_ib_query_pkey()
965 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in __mlx4_ib_query_pkey()
978 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) in mlx4_ib_query_pkey() argument
980 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0); in mlx4_ib_query_pkey()
983 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, in mlx4_ib_modify_device() argument
995 if (mlx4_is_slave(to_mdev(ibdev)->dev)) in mlx4_ib_modify_device()
998 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); in mlx4_ib_modify_device()
999 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); in mlx4_ib_modify_device()
1000 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); in mlx4_ib_modify_device()
1006 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev); in mlx4_ib_modify_device()
1011 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0, in mlx4_ib_modify_device()
1014 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox); in mlx4_ib_modify_device()
1045 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, in mlx4_ib_modify_port() argument
1048 struct mlx4_ib_dev *mdev = to_mdev(ibdev); in mlx4_ib_modify_port()
1063 err = ib_query_port(ibdev, port, &attr); in mlx4_ib_modify_port()
1075 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mlx4_ib_modify_port()
1079 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, in mlx4_ib_alloc_ucontext() argument
1082 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_alloc_ucontext()
1091 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { in mlx4_ib_alloc_ucontext()
1107 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar); in mlx4_ib_alloc_ucontext()
1119 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) in mlx4_ib_alloc_ucontext()
1125 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); in mlx4_ib_alloc_ucontext()
1283 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev, in mlx4_ib_alloc_pd() argument
1294 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn); in mlx4_ib_alloc_pd()
1302 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn); in mlx4_ib_alloc_pd()
1317 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev, in mlx4_ib_alloc_xrcd() argument
1325 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) in mlx4_ib_alloc_xrcd()
1332 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn); in mlx4_ib_alloc_xrcd()
1336 xrcd->pd = ib_alloc_pd(ibdev, 0); in mlx4_ib_alloc_xrcd()
1343 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); in mlx4_ib_alloc_xrcd()
1354 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn); in mlx4_ib_alloc_xrcd()
1393 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, in mlx4_ib_delete_counters_table() argument
1402 mlx4_counter_free(ibdev->dev, counter->index); in mlx4_ib_delete_counters_table()
2213 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev, in mlx4_ib_alloc_hw_stats() argument
2216 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_alloc_hw_stats()
2227 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, in mlx4_ib_get_hw_stats() argument
2231 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_get_hw_stats()
2252 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, in __mlx4_ib_alloc_diag_counters() argument
2262 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) in __mlx4_ib_alloc_diag_counters()
2285 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, in mlx4_ib_fill_diag_counters() argument
2298 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { in mlx4_ib_fill_diag_counters()
2313 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) in mlx4_ib_alloc_diag_counters() argument
2315 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; in mlx4_ib_alloc_diag_counters()
2318 bool per_port = !!(ibdev->dev->caps.flags2 & in mlx4_ib_alloc_diag_counters()
2321 if (mlx4_is_slave(ibdev->dev)) in mlx4_ib_alloc_diag_counters()
2329 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, in mlx4_ib_alloc_diag_counters()
2335 mlx4_ib_fill_diag_counters(ibdev, diag[i].name, in mlx4_ib_alloc_diag_counters()
2339 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats; in mlx4_ib_alloc_diag_counters()
2340 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats; in mlx4_ib_alloc_diag_counters()
2353 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) in mlx4_ib_diag_cleanup() argument
2358 kfree(ibdev->diag_counters[i].offset); in mlx4_ib_diag_cleanup()
2359 kfree(ibdev->diag_counters[i].name); in mlx4_ib_diag_cleanup()
2364 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, in mlx4_ib_update_qps() argument
2376 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); in mlx4_ib_update_qps()
2379 if (!mlx4_is_mfunc(ibdev->dev)) in mlx4_ib_update_qps()
2382 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); in mlx4_ib_update_qps()
2383 qp = ibdev->qp1_proxy[port - 1]; in mlx4_ib_update_qps()
2394 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); in mlx4_ib_update_qps()
2400 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, in mlx4_ib_update_qps()
2415 mlx4_unregister_mac(ibdev->dev, port, release_mac); in mlx4_ib_update_qps()
2418 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); in mlx4_ib_update_qps()
2421 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, in mlx4_ib_scan_netdevs() argument
2432 iboe = &ibdev->iboe; in mlx4_ib_scan_netdevs()
2435 mlx4_foreach_ib_transport_port(port, ibdev->dev) { in mlx4_ib_scan_netdevs()
2438 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port); in mlx4_ib_scan_netdevs()
2449 mlx4_ib_update_qps(ibdev, dev, update_qps_port); in mlx4_ib_scan_netdevs()
2456 struct mlx4_ib_dev *ibdev; in mlx4_ib_netdev_event() local
2461 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); in mlx4_ib_netdev_event()
2462 mlx4_ib_scan_netdevs(ibdev, dev, event); in mlx4_ib_netdev_event()
2467 static void init_pkeys(struct mlx4_ib_dev *ibdev) in init_pkeys() argument
2473 if (mlx4_is_master(ibdev->dev)) { in init_pkeys()
2474 for (slave = 0; slave <= ibdev->dev->persist->num_vfs; in init_pkeys()
2476 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { in init_pkeys()
2478 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; in init_pkeys()
2480 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = in init_pkeys()
2482 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i : in init_pkeys()
2483 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; in init_pkeys()
2484 mlx4_sync_pkey_table(ibdev->dev, slave, port, i, in init_pkeys()
2485 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); in init_pkeys()
2490 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { in init_pkeys()
2492 i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; in init_pkeys()
2494 ibdev->pkeys.phys_pkey_cache[port-1][i] = in init_pkeys()
2500 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) in mlx4_ib_alloc_eqs() argument
2504 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, in mlx4_ib_alloc_eqs()
2505 sizeof(ibdev->eq_table[0]), GFP_KERNEL); in mlx4_ib_alloc_eqs()
2506 if (!ibdev->eq_table) in mlx4_ib_alloc_eqs()
2514 ibdev->eq_table[eq] = total_eqs; in mlx4_ib_alloc_eqs()
2516 &ibdev->eq_table[eq])) in mlx4_ib_alloc_eqs()
2519 ibdev->eq_table[eq] = -1; in mlx4_ib_alloc_eqs()
2524 ibdev->eq_table[i++] = -1) in mlx4_ib_alloc_eqs()
2528 ibdev->ib_dev.num_comp_vectors = eq; in mlx4_ib_alloc_eqs()
2531 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) in mlx4_ib_free_eqs() argument
2534 int total_eqs = ibdev->ib_dev.num_comp_vectors; in mlx4_ib_free_eqs()
2537 if (!ibdev->eq_table) in mlx4_ib_free_eqs()
2541 ibdev->ib_dev.num_comp_vectors = 0; in mlx4_ib_free_eqs()
2544 mlx4_release_eq(dev, ibdev->eq_table[i]); in mlx4_ib_free_eqs()
2546 kfree(ibdev->eq_table); in mlx4_ib_free_eqs()
2547 ibdev->eq_table = NULL; in mlx4_ib_free_eqs()
2550 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, in mlx4_port_immutable() argument
2554 struct mlx4_ib_dev *mdev = to_mdev(ibdev); in mlx4_port_immutable()
2557 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) { in mlx4_port_immutable()
2572 err = ib_query_port(ibdev, port_num, &attr); in mlx4_port_immutable()
2594 struct mlx4_ib_dev *ibdev; in mlx4_ib_add() local
2615 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); in mlx4_ib_add()
2616 if (!ibdev) { in mlx4_ib_add()
2622 iboe = &ibdev->iboe; in mlx4_ib_add()
2624 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) in mlx4_ib_add()
2627 if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) in mlx4_ib_add()
2630 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, in mlx4_ib_add()
2632 if (!ibdev->uar_map) in mlx4_ib_add()
2634 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); in mlx4_ib_add()
2636 ibdev->dev = dev; in mlx4_ib_add()
2637 ibdev->bond_next_port = 0; in mlx4_ib_add()
2639 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); in mlx4_ib_add()
2640 ibdev->ib_dev.owner = THIS_MODULE; in mlx4_ib_add()
2641 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; in mlx4_ib_add()
2642 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; in mlx4_ib_add()
2643 ibdev->num_ports = num_ports; in mlx4_ib_add()
2644 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? in mlx4_ib_add()
2645 1 : ibdev->num_ports; in mlx4_ib_add()
2646 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; in mlx4_ib_add()
2647 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; in mlx4_ib_add()
2648 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; in mlx4_ib_add()
2649 ibdev->ib_dev.add_gid = mlx4_ib_add_gid; in mlx4_ib_add()
2650 ibdev->ib_dev.del_gid = mlx4_ib_del_gid; in mlx4_ib_add()
2653 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; in mlx4_ib_add()
2655 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; in mlx4_ib_add()
2657 ibdev->ib_dev.uverbs_cmd_mask = in mlx4_ib_add()
2683 ibdev->ib_dev.query_device = mlx4_ib_query_device; in mlx4_ib_add()
2684 ibdev->ib_dev.query_port = mlx4_ib_query_port; in mlx4_ib_add()
2685 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; in mlx4_ib_add()
2686 ibdev->ib_dev.query_gid = mlx4_ib_query_gid; in mlx4_ib_add()
2687 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; in mlx4_ib_add()
2688 ibdev->ib_dev.modify_device = mlx4_ib_modify_device; in mlx4_ib_add()
2689 ibdev->ib_dev.modify_port = mlx4_ib_modify_port; in mlx4_ib_add()
2690 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext; in mlx4_ib_add()
2691 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext; in mlx4_ib_add()
2692 ibdev->ib_dev.mmap = mlx4_ib_mmap; in mlx4_ib_add()
2693 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd; in mlx4_ib_add()
2694 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd; in mlx4_ib_add()
2695 ibdev->ib_dev.create_ah = mlx4_ib_create_ah; in mlx4_ib_add()
2696 ibdev->ib_dev.query_ah = mlx4_ib_query_ah; in mlx4_ib_add()
2697 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah; in mlx4_ib_add()
2698 ibdev->ib_dev.create_srq = mlx4_ib_create_srq; in mlx4_ib_add()
2699 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq; in mlx4_ib_add()
2700 ibdev->ib_dev.query_srq = mlx4_ib_query_srq; in mlx4_ib_add()
2701 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq; in mlx4_ib_add()
2702 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv; in mlx4_ib_add()
2703 ibdev->ib_dev.create_qp = mlx4_ib_create_qp; in mlx4_ib_add()
2704 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp; in mlx4_ib_add()
2705 ibdev->ib_dev.query_qp = mlx4_ib_query_qp; in mlx4_ib_add()
2706 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp; in mlx4_ib_add()
2707 ibdev->ib_dev.drain_sq = mlx4_ib_drain_sq; in mlx4_ib_add()
2708 ibdev->ib_dev.drain_rq = mlx4_ib_drain_rq; in mlx4_ib_add()
2709 ibdev->ib_dev.post_send = mlx4_ib_post_send; in mlx4_ib_add()
2710 ibdev->ib_dev.post_recv = mlx4_ib_post_recv; in mlx4_ib_add()
2711 ibdev->ib_dev.create_cq = mlx4_ib_create_cq; in mlx4_ib_add()
2712 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq; in mlx4_ib_add()
2713 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq; in mlx4_ib_add()
2714 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq; in mlx4_ib_add()
2715 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq; in mlx4_ib_add()
2716 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq; in mlx4_ib_add()
2717 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr; in mlx4_ib_add()
2718 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr; in mlx4_ib_add()
2719 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr; in mlx4_ib_add()
2720 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr; in mlx4_ib_add()
2721 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr; in mlx4_ib_add()
2722 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg; in mlx4_ib_add()
2723 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; in mlx4_ib_add()
2724 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; in mlx4_ib_add()
2725 ibdev->ib_dev.process_mad = mlx4_ib_process_mad; in mlx4_ib_add()
2726 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; in mlx4_ib_add()
2727 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str; in mlx4_ib_add()
2728 ibdev->ib_dev.disassociate_ucontext = mlx4_ib_disassociate_ucontext; in mlx4_ib_add()
2730 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2734 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == in mlx4_ib_add()
2736 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == in mlx4_ib_add()
2738 ibdev->ib_dev.create_wq = mlx4_ib_create_wq; in mlx4_ib_add()
2739 ibdev->ib_dev.modify_wq = mlx4_ib_modify_wq; in mlx4_ib_add()
2740 ibdev->ib_dev.destroy_wq = mlx4_ib_destroy_wq; in mlx4_ib_add()
2741 ibdev->ib_dev.create_rwq_ind_table = in mlx4_ib_add()
2743 ibdev->ib_dev.destroy_rwq_ind_table = in mlx4_ib_add()
2745 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2753 if (!mlx4_is_slave(ibdev->dev)) { in mlx4_ib_add()
2754 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; in mlx4_ib_add()
2755 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr; in mlx4_ib_add()
2756 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; in mlx4_ib_add()
2757 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; in mlx4_ib_add()
2762 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; in mlx4_ib_add()
2763 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; in mlx4_ib_add()
2765 ibdev->ib_dev.uverbs_cmd_mask |= in mlx4_ib_add()
2771 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd; in mlx4_ib_add()
2772 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; in mlx4_ib_add()
2773 ibdev->ib_dev.uverbs_cmd_mask |= in mlx4_ib_add()
2779 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; in mlx4_ib_add()
2780 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; in mlx4_ib_add()
2781 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; in mlx4_ib_add()
2783 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2788 ibdev->ib_dev.uverbs_ex_cmd_mask |= in mlx4_ib_add()
2793 mlx4_ib_alloc_eqs(dev, ibdev); in mlx4_ib_add()
2797 if (init_node_data(ibdev)) in mlx4_ib_add()
2799 mlx4_init_sl2vl_tbl(ibdev); in mlx4_ib_add()
2801 for (i = 0; i < ibdev->num_ports; ++i) { in mlx4_ib_add()
2802 mutex_init(&ibdev->counters_table[i].mutex); in mlx4_ib_add()
2803 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list); in mlx4_ib_add()
2806 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; in mlx4_ib_add()
2808 mutex_init(&ibdev->qp1_proxy_lock[i]); in mlx4_ib_add()
2810 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == in mlx4_ib_add()
2812 err = mlx4_counter_alloc(ibdev->dev, &counter_index, in mlx4_ib_add()
2829 mlx4_counter_free(ibdev->dev, counter_index); in mlx4_ib_add()
2835 &ibdev->counters_table[i].counters_list); in mlx4_ib_add()
2836 ibdev->counters_table[i].default_counter = counter_index; in mlx4_ib_add()
2841 for (i = 1; i < ibdev->num_ports ; ++i) { in mlx4_ib_add()
2850 &ibdev->counters_table[i].counters_list); in mlx4_ib_add()
2851 ibdev->counters_table[i].default_counter = in mlx4_ib_add()
2858 spin_lock_init(&ibdev->sm_lock); in mlx4_ib_add()
2859 mutex_init(&ibdev->cap_mask_mutex); in mlx4_ib_add()
2860 INIT_LIST_HEAD(&ibdev->qp_list); in mlx4_ib_add()
2861 spin_lock_init(&ibdev->reset_flow_resource_lock); in mlx4_ib_add()
2863 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && in mlx4_ib_add()
2865 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; in mlx4_ib_add()
2866 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, in mlx4_ib_add()
2868 &ibdev->steer_qpn_base, 0, in mlx4_ib_add()
2873 ibdev->ib_uc_qpns_bitmap = in mlx4_ib_add()
2874 kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), in mlx4_ib_add()
2877 if (!ibdev->ib_uc_qpns_bitmap) in mlx4_ib_add()
2881 bitmap_zero(ibdev->ib_uc_qpns_bitmap, in mlx4_ib_add()
2882 ibdev->steer_qpn_count); in mlx4_ib_add()
2884 dev, ibdev->steer_qpn_base, in mlx4_ib_add()
2885 ibdev->steer_qpn_base + in mlx4_ib_add()
2886 ibdev->steer_qpn_count - 1); in mlx4_ib_add()
2890 bitmap_fill(ibdev->ib_uc_qpns_bitmap, in mlx4_ib_add()
2891 ibdev->steer_qpn_count); in mlx4_ib_add()
2895 for (j = 1; j <= ibdev->dev->caps.num_ports; j++) in mlx4_ib_add()
2896 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); in mlx4_ib_add()
2898 if (mlx4_ib_alloc_diag_counters(ibdev)) in mlx4_ib_add()
2901 ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4; in mlx4_ib_add()
2902 if (ib_register_device(&ibdev->ib_dev, NULL)) in mlx4_ib_add()
2905 if (mlx4_ib_mad_init(ibdev)) in mlx4_ib_add()
2908 if (mlx4_ib_init_sriov(ibdev)) in mlx4_ib_add()
2926 if (device_create_file(&ibdev->ib_dev.dev, in mlx4_ib_add()
2931 ibdev->ib_active = true; in mlx4_ib_add()
2934 &ibdev->ib_dev); in mlx4_ib_add()
2936 if (mlx4_is_mfunc(ibdev->dev)) in mlx4_ib_add()
2937 init_pkeys(ibdev); in mlx4_ib_add()
2940 if (mlx4_is_master(ibdev->dev)) { in mlx4_ib_add()
2942 if (j == mlx4_master_func_num(ibdev->dev)) in mlx4_ib_add()
2944 if (mlx4_is_slave_active(ibdev->dev, j)) in mlx4_ib_add()
2945 do_slave_init(ibdev, j, 1); in mlx4_ib_add()
2948 return ibdev; in mlx4_ib_add()
2951 if (ibdev->iboe.nb.notifier_call) { in mlx4_ib_add()
2952 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) in mlx4_ib_add()
2954 ibdev->iboe.nb.notifier_call = NULL; in mlx4_ib_add()
2958 mlx4_ib_close_sriov(ibdev); in mlx4_ib_add()
2961 mlx4_ib_mad_cleanup(ibdev); in mlx4_ib_add()
2964 ib_unregister_device(&ibdev->ib_dev); in mlx4_ib_add()
2967 mlx4_ib_diag_cleanup(ibdev); in mlx4_ib_add()
2970 kfree(ibdev->ib_uc_qpns_bitmap); in mlx4_ib_add()
2973 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, in mlx4_ib_add()
2974 ibdev->steer_qpn_count); in mlx4_ib_add()
2976 for (i = 0; i < ibdev->num_ports; ++i) in mlx4_ib_add()
2977 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]); in mlx4_ib_add()
2980 mlx4_ib_free_eqs(dev, ibdev); in mlx4_ib_add()
2981 iounmap(ibdev->uar_map); in mlx4_ib_add()
2984 mlx4_uar_free(dev, &ibdev->priv_uar); in mlx4_ib_add()
2987 mlx4_pd_free(dev, ibdev->priv_pdn); in mlx4_ib_add()
2990 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_add()
3063 struct mlx4_ib_dev *ibdev = ibdev_ptr; in mlx4_ib_remove() local
3069 ibdev->ib_active = false; in mlx4_ib_remove()
3072 mlx4_ib_close_sriov(ibdev); in mlx4_ib_remove()
3073 mlx4_ib_mad_cleanup(ibdev); in mlx4_ib_remove()
3074 ib_unregister_device(&ibdev->ib_dev); in mlx4_ib_remove()
3075 mlx4_ib_diag_cleanup(ibdev); in mlx4_ib_remove()
3076 if (ibdev->iboe.nb.notifier_call) { in mlx4_ib_remove()
3077 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) in mlx4_ib_remove()
3079 ibdev->iboe.nb.notifier_call = NULL; in mlx4_ib_remove()
3082 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, in mlx4_ib_remove()
3083 ibdev->steer_qpn_count); in mlx4_ib_remove()
3084 kfree(ibdev->ib_uc_qpns_bitmap); in mlx4_ib_remove()
3086 iounmap(ibdev->uar_map); in mlx4_ib_remove()
3087 for (p = 0; p < ibdev->num_ports; ++p) in mlx4_ib_remove()
3088 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]); in mlx4_ib_remove()
3093 mlx4_ib_free_eqs(dev, ibdev); in mlx4_ib_remove()
3095 mlx4_uar_free(dev, &ibdev->priv_uar); in mlx4_ib_remove()
3096 mlx4_pd_free(dev, ibdev->priv_pdn); in mlx4_ib_remove()
3097 ib_dealloc_device(&ibdev->ib_dev); in mlx4_ib_remove()
3100 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) in do_slave_init() argument
3103 struct mlx4_dev *dev = ibdev->dev; in do_slave_init()
3132 dm[i]->dev = ibdev; in do_slave_init()
3135 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3136 if (!ibdev->sriov.is_going_down) { in do_slave_init()
3138 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); in do_slave_init()
3139 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3141 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); in do_slave_init()
3150 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) in mlx4_ib_handle_catas_error() argument
3164 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); in mlx4_ib_handle_catas_error()
3166 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { in mlx4_ib_handle_catas_error()
3207 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); in mlx4_ib_handle_catas_error()
3215 struct mlx4_ib_dev *ibdev = ew->ib_dev; in handle_bonded_port_state_event() local
3221 spin_lock_bh(&ibdev->iboe.lock); in handle_bonded_port_state_event()
3223 struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; in handle_bonded_port_state_event()
3237 spin_unlock_bh(&ibdev->iboe.lock); in handle_bonded_port_state_event()
3239 ibev.device = &ibdev->ib_dev; in handle_bonded_port_state_event()
3272 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, in mlx4_sched_ib_sl2vl_update_work() argument
3281 ew->ib_dev = ibdev; in mlx4_sched_ib_sl2vl_update_work()
3290 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); in mlx4_ib_event() local
3302 ew->ib_dev = ibdev; in mlx4_ib_event()
3314 if (p > ibdev->num_ports) in mlx4_ib_event()
3317 rdma_port_get_link_layer(&ibdev->ib_dev, p) == in mlx4_ib_event()
3320 mlx4_ib_invalidate_all_guid_record(ibdev, p); in mlx4_ib_event()
3321 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST && in mlx4_ib_event()
3322 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) in mlx4_ib_event()
3323 mlx4_sched_ib_sl2vl_update_work(ibdev, p); in mlx4_ib_event()
3329 if (p > ibdev->num_ports) in mlx4_ib_event()
3335 ibdev->ib_active = false; in mlx4_ib_event()
3337 mlx4_ib_handle_catas_error(ibdev); in mlx4_ib_event()
3347 ew->ib_dev = ibdev; in mlx4_ib_event()
3357 do_slave_init(ibdev, p, 1); in mlx4_ib_event()
3361 for (i = 1; i <= ibdev->num_ports; i++) { in mlx4_ib_event()
3362 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) in mlx4_ib_event()
3364 mlx4_ib_slave_alias_guid_event(ibdev, in mlx4_ib_event()
3375 for (i = 1; i <= ibdev->num_ports; i++) { in mlx4_ib_event()
3376 if (rdma_port_get_link_layer(&ibdev->ib_dev, i) in mlx4_ib_event()
3378 mlx4_ib_slave_alias_guid_event(ibdev, in mlx4_ib_event()
3384 do_slave_init(ibdev, p, 0); in mlx4_ib_event()
3392 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; in mlx4_ib_event()