Home
last modified time | relevance | path

Searched refs:ib_dev (Results 1 – 25 of 68) sorted by relevance

123

/Linux-v4.19/drivers/infiniband/hw/usnic/
Dusnic_ib_main.c79 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name); in usnic_ib_dump_vf_hdr()
141 usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name); in usnic_ib_handle_usdev_event()
144 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event()
154 usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name); in usnic_ib_handle_usdev_event()
156 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event()
162 usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name); in usnic_ib_handle_usdev_event()
165 ib_event.device = &us_ibdev->ib_dev; in usnic_ib_handle_usdev_event()
171 us_ibdev->ib_dev.name); in usnic_ib_handle_usdev_event()
178 us_ibdev->ib_dev.name); in usnic_ib_handle_usdev_event()
181 us_ibdev->ib_dev.name, in usnic_ib_handle_usdev_event()
[all …]
Dusnic_ib_sysfs.c54 container_of(device, struct usnic_ib_dev, ib_dev.dev); in usnic_ib_show_board()
77 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); in usnic_ib_show_config()
97 us_ibdev->ib_dev.name, in usnic_ib_show_config()
122 us_ibdev->ib_dev.name); in usnic_ib_show_config()
136 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); in usnic_ib_show_iface()
148 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); in usnic_ib_show_max_vf()
161 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); in usnic_ib_show_qp_per_vf()
175 us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev); in usnic_ib_show_cq_per_vf()
284 err = device_create_file(&us_ibdev->ib_dev.dev, in usnic_ib_sysfs_register_usdev()
288 i, us_ibdev->ib_dev.name, err); in usnic_ib_sysfs_register_usdev()
[all …]
Dusnic_ib.h70 struct ib_device ib_dev; member
99 return container_of(ibdev, struct usnic_ib_dev, ib_dev); in to_usdev()
/Linux-v4.19/drivers/target/
Dtarget_core_iblock.c69 struct iblock_dev *ib_dev = NULL; in iblock_alloc_device() local
71 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); in iblock_alloc_device()
72 if (!ib_dev) { in iblock_alloc_device()
79 return &ib_dev->dev; in iblock_alloc_device()
84 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); in iblock_configure_device() local
92 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { in iblock_configure_device()
97 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); in iblock_configure_device()
104 ib_dev->ibd_udev_path); in iblock_configure_device()
107 if (!ib_dev->ibd_readonly) in iblock_configure_device()
112 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); in iblock_configure_device()
[all …]
/Linux-v4.19/drivers/infiniband/hw/hns/
Dhns_roce_main.c191 static int hns_roce_query_device(struct ib_device *ib_dev, in hns_roce_query_device() argument
195 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_device()
225 static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev, in hns_roce_get_netdev() argument
228 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_get_netdev()
244 static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, in hns_roce_query_port() argument
247 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_query_port()
295 static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, in hns_roce_query_pkey() argument
303 static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, in hns_roce_modify_device() argument
312 spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags); in hns_roce_modify_device()
313 memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE); in hns_roce_modify_device()
[all …]
Dhns_roce_pd.c60 struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, in hns_roce_alloc_pd() argument
64 struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); in hns_roce_alloc_pd()
73 ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); in hns_roce_alloc_pd()
84 hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); in hns_roce_alloc_pd()
/Linux-v4.19/drivers/infiniband/core/
Droce_gid_mgmt.c82 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) in roce_gid_type_mask_support() argument
87 if (!rdma_protocol_roce(ib_dev, port)) in roce_gid_type_mask_support()
91 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) in roce_gid_type_mask_support()
98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, in update_gid() argument
103 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); in update_gid()
110 ib_cache_gid_add(ib_dev, port, in update_gid()
114 ib_cache_gid_del(ib_dev, port, in update_gid()
147 is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, in is_eth_port_of_netdev_filter() argument
171 is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, in is_eth_port_inactive_slave_filter() argument
200 is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, in is_ndev_for_default_gid_filter() argument
[all …]
Dcache.c114 static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) in dispatch_gid_change_event() argument
118 event.device = ib_dev; in dispatch_gid_change_event()
349 static void del_gid(struct ib_device *ib_dev, u8 port, in del_gid() argument
357 ib_dev->name, port, ix, in del_gid()
366 if (!rdma_protocol_roce(ib_dev, port)) in del_gid()
503 static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, in __ib_cache_gid_add() argument
519 table = rdma_gid_table(ib_dev, port); in __ib_cache_gid_add()
531 attr->device = ib_dev; in __ib_cache_gid_add()
537 dispatch_gid_change_event(ib_dev, port); in __ib_cache_gid_add()
547 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, in ib_cache_gid_add() argument
[all …]
Duverbs_std_types_dm.c57 struct ib_device *ib_dev = uobj->context->device; in UVERBS_HANDLER() local
61 if (!ib_dev->alloc_dm) in UVERBS_HANDLER()
74 dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs); in UVERBS_HANDLER()
78 dm->device = ib_dev; in UVERBS_HANDLER()
Duverbs_main.c150 if (!srcu_dereference(ufile->device->ib_dev, in ib_uverbs_get_ucontext()
253 struct ib_device *ib_dev; in ib_uverbs_release_file() local
259 ib_dev = srcu_dereference(file->device->ib_dev, in ib_uverbs_release_file()
261 if (ib_dev && !ib_dev->disassociate_ucontext) in ib_uverbs_release_file()
262 module_put(ib_dev->owner); in ib_uverbs_release_file()
295 !uverbs_file->device->ib_dev))) in ib_uverbs_event_read()
300 !uverbs_file->device->ib_dev) in ib_uverbs_event_read()
612 struct ib_device *ib_dev) in ib_uverbs_alloc_async_event_file() argument
639 ib_dev, in ib_uverbs_alloc_async_event_file()
833 struct ib_device *ib_dev; in ib_uverbs_open() local
[all …]
Duverbs_std_types_counters.c55 struct ib_device *ib_dev = uobj->context->device; in UVERBS_HANDLER() local
64 if (!ib_dev->create_counters) in UVERBS_HANDLER()
67 counters = ib_dev->create_counters(ib_dev, attrs); in UVERBS_HANDLER()
73 counters->device = ib_dev; in UVERBS_HANDLER()
Duverbs_std_types_cq.c66 struct ib_device *ib_dev = obj->uobject.context->device; in UVERBS_HANDLER() local
75 if (!ib_dev->create_cq || !ib_dev->destroy_cq) in UVERBS_HANDLER()
117 cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, &uhw); in UVERBS_HANDLER()
123 cq->device = ib_dev; in UVERBS_HANDLER()
Duverbs_std_types_flow_action.c225 static int parse_flow_action_esp(struct ib_device *ib_dev, in parse_flow_action_esp() argument
312 struct ib_device *ib_dev = uobj->context->device; in UVERBS_HANDLER() local
317 if (!ib_dev->create_flow_action_esp) in UVERBS_HANDLER()
320 ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr, false); in UVERBS_HANDLER()
325 action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs); in UVERBS_HANDLER()
330 action->device = ib_dev; in UVERBS_HANDLER()
Dcore_priv.h97 void ib_enum_roce_netdev(struct ib_device *ib_dev,
124 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
129 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
132 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
135 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
141 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
Duverbs_cmd.c78 struct ib_device *ib_dev; in ib_uverbs_get_context() local
88 ib_dev = srcu_dereference(file->device->ib_dev, in ib_uverbs_get_context()
90 if (!ib_dev) { in ib_uverbs_get_context()
105 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); in ib_uverbs_get_context()
109 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); in ib_uverbs_get_context()
115 ucontext->device = ib_dev; in ib_uverbs_get_context()
132 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) in ib_uverbs_get_context()
144 filp = ib_uverbs_alloc_async_event_file(file, ib_dev); in ib_uverbs_get_context()
176 ib_dev->dealloc_ucontext(ucontext); in ib_uverbs_get_context()
179 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); in ib_uverbs_get_context()
[all …]
Ddevice.c874 void ib_enum_roce_netdev(struct ib_device *ib_dev, in ib_enum_roce_netdev() argument
882 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev); in ib_enum_roce_netdev()
884 if (rdma_protocol_roce(ib_dev, port)) { in ib_enum_roce_netdev()
887 if (ib_dev->get_netdev) in ib_enum_roce_netdev()
888 idev = ib_dev->get_netdev(ib_dev, port); in ib_enum_roce_netdev()
896 if (filter(ib_dev, port, idev, filter_cookie)) in ib_enum_roce_netdev()
897 cb(ib_dev, port, idev, cookie); in ib_enum_roce_netdev()
/Linux-v4.19/drivers/infiniband/hw/vmw_pvrdma/
Dpvrdma_main.c99 container_of(device, struct pvrdma_dev, ib_dev); in pvrdma_get_fw_ver_str()
165 strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX); in pvrdma_register_device()
166 dev->ib_dev.node_guid = dev->dsr->caps.node_guid; in pvrdma_register_device()
169 dev->ib_dev.owner = THIS_MODULE; in pvrdma_register_device()
170 dev->ib_dev.num_comp_vectors = 1; in pvrdma_register_device()
171 dev->ib_dev.dev.parent = &dev->pdev->dev; in pvrdma_register_device()
172 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; in pvrdma_register_device()
173 dev->ib_dev.uverbs_cmd_mask = in pvrdma_register_device()
195 dev->ib_dev.node_type = RDMA_NODE_IB_CA; in pvrdma_register_device()
196 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; in pvrdma_register_device()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mthca/
Dmthca_provider.c1083 container_of(device, struct mthca_dev, ib_dev.dev); in show_rev()
1091 container_of(device, struct mthca_dev, ib_dev.dev); in show_hca()
1111 container_of(device, struct mthca_dev, ib_dev.dev); in show_board()
1144 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in mthca_init_node_data()
1155 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); in mthca_init_node_data()
1185 container_of(device, struct mthca_dev, ib_dev); in get_dev_fw_str()
1201 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); in mthca_register_device()
1202 dev->ib_dev.owner = THIS_MODULE; in mthca_register_device()
1204 dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION; in mthca_register_device()
1205 dev->ib_dev.uverbs_cmd_mask = in mthca_register_device()
[all …]
Dmthca_mad.c59 ret = ib_query_port(&dev->ib_dev, port_num, tprops); in mthca_update_rate()
62 ret, dev->ib_dev.name, port_num); in mthca_update_rate()
85 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); in update_sm_ah()
304 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, in mthca_create_agents()
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dmain.c399 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) in mlx4_ib_gid_index_to_real_index()
935 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl); in mlx4_init_sl2vl_tbl()
2119 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); in init_node_data()
2128 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); in init_node_data()
2140 container_of(device, struct mlx4_ib_dev, ib_dev.dev); in show_hca()
2148 container_of(device, struct mlx4_ib_dev, ib_dev.dev); in show_rev()
2156 container_of(device, struct mlx4_ib_dev, ib_dev.dev); in show_board()
2339 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats; in mlx4_ib_alloc_diag_counters()
2340 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats; in mlx4_ib_alloc_diag_counters()
2528 ibdev->ib_dev.num_comp_vectors = eq; in mlx4_ib_alloc_eqs()
[all …]
Dmad.c199 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); in update_sm_ah()
453 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); in find_slave_port_pkey_ix()
463 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); in find_slave_port_pkey_ix()
540 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); in mlx4_ib_send_to_slave()
559 attr.type = rdma_ah_find_type(&dev->ib_dev, port); in mlx4_ib_send_to_slave()
589 ib_dma_sync_single_for_cpu(&dev->ib_dev, in mlx4_ib_send_to_slave()
631 ib_dma_sync_single_for_device(&dev->ib_dev, in mlx4_ib_send_to_slave()
1037 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); in mlx4_ib_mad_init()
1040 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, in mlx4_ib_mad_init()
1165 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); in handle_slaves_guid_change()
[all …]
Dsysfs.c122 ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num, in show_port_gid()
149 ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num, in show_phys_port_pkey()
231 ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1); in add_port_entries()
594 int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == in add_vf_smi_entries()
629 int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == in remove_vf_smi_entries()
644 int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) == in add_port()
823 kobject_get(dev->ib_dev.ports_parent->parent)); in mlx4_ib_device_register_sysfs()
836 for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) { in mlx4_ib_device_register_sysfs()
853 kobject_put(dev->ib_dev.ports_parent->parent); in mlx4_ib_device_register_sysfs()
889 kobject_put(device->ib_dev.ports_parent->parent); in mlx4_ib_device_unregister_sysfs()
/Linux-v4.19/drivers/infiniband/ulp/isert/
Dib_isert.c178 struct ib_device *ib_dev = device->ib_device; in isert_alloc_rx_descriptors() local
193 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, in isert_alloc_rx_descriptors()
195 if (ib_dma_mapping_error(ib_dev, dma_addr)) in isert_alloc_rx_descriptors()
212 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_alloc_rx_descriptors()
224 struct ib_device *ib_dev = isert_conn->device->ib_device; in isert_free_rx_descriptors() local
233 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, in isert_free_rx_descriptors()
299 struct ib_device *ib_dev = device->ib_device; in isert_create_device_ib_res() local
303 ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); in isert_create_device_ib_res()
304 isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); in isert_create_device_ib_res()
310 device->pd = ib_alloc_pd(ib_dev, 0); in isert_create_device_ib_res()
[all …]
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dmain.c205 if (get_port_state(&ibdev->ib_dev, port_num, in mlx5_netdev_event()
213 ibev.device = &ibdev->ib_dev; in mlx5_netdev_event()
267 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, in mlx5_ib_get_native_port_mdev()
304 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, in mlx5_ib_put_native_port_mdev()
683 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { in mlx5_query_node_guid()
4043 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); in init_node_data()
4049 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); in init_node_data()
4056 container_of(device, struct mlx5_ib_dev, ib_dev.dev); in show_fw_pages()
4065 container_of(device, struct mlx5_ib_dev, ib_dev.dev); in show_reg_pages()
4074 container_of(device, struct mlx5_ib_dev, ib_dev.dev); in show_hca()
[all …]
/Linux-v4.19/drivers/infiniband/ulp/iser/
Diser_verbs.c70 struct ib_device *ib_dev = device->ib_device; in iser_create_device_ib_res() local
78 ib_dev->num_comp_vectors); in iser_create_device_ib_res()
85 max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe); in iser_create_device_ib_res()
88 device->comps_used, ib_dev->name, in iser_create_device_ib_res()
89 ib_dev->num_comp_vectors, max_cqe); in iser_create_device_ib_res()
91 device->pd = ib_alloc_pd(ib_dev, in iser_create_device_ib_res()
99 comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, in iser_create_device_ib_res()
107 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, in iser_create_device_ib_res()
242 struct ib_device *ib_dev = device->ib_device; in iser_alloc_reg_res() local
246 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) in iser_alloc_reg_res()
[all …]

123