Home
last modified time | relevance | path

Searched refs:to_mdev (Results 1 – 25 of 34) sorted by relevance

12

/Linux-v5.4/drivers/infiniband/hw/mthca/
Dmthca_provider.c67 struct mthca_dev *mdev = to_mdev(ibdev); in mthca_query_device()
157 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, in mthca_query_port()
169 props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; in mthca_query_port()
171 props->pkey_tbl_len = to_mdev(ibdev)->limits.pkey_table_len; in mthca_query_port()
196 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) in mthca_modify_device()
200 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mthca_modify_device()
214 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) in mthca_modify_port()
227 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); in mthca_modify_port()
231 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); in mthca_modify_port()
251 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, in mthca_query_pkey()
[all …]
Dmthca_mad.c122 mthca_update_rate(to_mdev(ibdev), port_num); in smp_snoop()
123 update_sm_ah(to_mdev(ibdev), port_num, in smp_snoop()
157 mutex_lock(&to_mdev(dev)->cap_mask_mutex); in node_desc_override()
160 mutex_unlock(&to_mdev(dev)->cap_mask_mutex); in node_desc_override()
222 forward_trap(to_mdev(ibdev), port_num, in_mad); in mthca_process_mad()
262 err = mthca_MAD_IFC(to_mdev(ibdev), in mthca_process_mad()
269 mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err); in mthca_process_mad()
Dmthca_cq.c335 if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && in mthca_cq_resize_copy_cqes()
657 struct mthca_dev *dev = to_mdev(ibcq->device); in mthca_poll_cq()
730 mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, in mthca_tavor_arm_cq()
731 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); in mthca_tavor_arm_cq()
762 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, in mthca_arbel_arm_cq()
763 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); in mthca_arbel_arm_cq()
Dmthca_srq.c375 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_modify_srq()
398 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_query_srq()
486 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_tavor_post_srq_recv()
580 struct mthca_dev *dev = to_mdev(ibsrq->device); in mthca_arbel_post_srq_recv()
Dmthca_mcg.c122 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_attach()
216 struct mthca_dev *dev = to_mdev(ibqp->device); in mthca_multicast_detach()
/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dmr.c67 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0, in mlx4_ib_get_dma_mr()
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_get_dma_mr()
408 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_reg_user_mr()
448 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); in mlx4_ib_reg_user_mr()
464 struct mlx4_ib_dev *dev = to_mdev(mr->device); in mlx4_ib_rereg_user_mr()
604 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); in mlx4_ib_dereg_mr()
617 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_alloc_mw()
651 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); in mlx4_ib_dealloc_mw()
660 struct mlx4_ib_dev *dev = to_mdev(pd->device); in mlx4_ib_alloc_mr()
[all …]
Dmain.c134 struct mlx4_ib_dev *ibdev = to_mdev(device); in mlx4_ib_get_netdev()
251 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_add_gid()
329 struct mlx4_ib_dev *ibdev = to_mdev(attr->device); in mlx4_ib_del_gid()
425 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_query_device()
460 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, in mlx4_ib_query_device()
639 struct mlx4_dev *dev = to_mdev(device)->dev; in mlx4_ib_port_link_layer()
663 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view) in ib_link_query_port()
666 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, in ib_link_query_port()
682 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; in ib_link_query_port()
683 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; in ib_link_query_port()
[all …]
Dcm.c146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in id_map_find_by_sl_id()
191 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_find_del()
208 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; in sl_id_map_add()
243 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_alloc()
252 ent->dev = to_mdev(ibdev); in id_map_alloc()
275 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in id_map_get()
291 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; in schedule_delayed()
Dsrq.c76 struct mlx4_ib_dev *dev = to_mdev(ib_srq->device); in mlx4_ib_create_srq()
221 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); in mlx4_ib_modify_srq()
246 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device); in mlx4_ib_query_srq()
264 struct mlx4_ib_dev *dev = to_mdev(srq->device); in mlx4_ib_destroy_srq()
310 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device); in mlx4_ib_post_srq_recv()
Dcq.c96 struct mlx4_ib_dev *dev = to_mdev(cq->device); in mlx4_ib_modify_cq()
181 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_create_cq()
382 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); in mlx4_ib_resize_cq()
480 struct mlx4_ib_dev *dev = to_mdev(cq->device); in mlx4_ib_destroy_cq()
700 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx4_ib_poll_one()
720 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
732 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one()
842 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { in mlx4_ib_poll_one()
887 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); in mlx4_ib_poll_cq()
913 to_mdev(ibcq->device)->uar_map, in mlx4_ib_arm_cq()
[all …]
Dqp.c764 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); in _mlx4_ib_create_qp_rss()
783 struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); in mlx4_ib_alloc_wqn()
833 struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); in mlx4_ib_release_wqn()
861 struct mlx4_ib_dev *dev = to_mdev(pd->device); in create_rq()
994 struct mlx4_ib_dev *dev = to_mdev(pd->device); in create_qp_common()
1583 if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) in _mlx4_ib_create_qp()
1618 int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, in _mlx4_ib_create_qp()
1625 sqpn = get_sqp_num(to_mdev(pd->device), init_attr); in _mlx4_ib_create_qp()
1650 struct mlx4_ib_dev *dev = to_mdev(device); in mlx4_ib_create_qp()
1681 struct mlx4_ib_dev *dev = to_mdev(qp->device); in _mlx4_ib_destroy_qp()
[all …]
Dmad.c230 struct mlx4_ib_dev *dev = to_mdev(ibdev); in smp_snoop()
376 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); in node_desc_override()
379 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); in node_desc_override()
434 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_find_real_gid()
668 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_demux_mad()
834 forward_trap(to_mdev(ibdev), port_num, in_mad); in ib_process_mad()
867 err = mlx4_MAD_IFC(to_mdev(ibdev), in ib_process_mad()
878 if (!mlx4_is_slave(to_mdev(ibdev)->dev)) in ib_process_mad()
942 struct mlx4_ib_dev *dev = to_mdev(ibdev); in iboe_process_mad()
991 struct mlx4_ib_dev *dev = to_mdev(ibdev); in mlx4_ib_process_mad()
[all …]
Dah.c46 struct mlx4_dev *dev = to_mdev(ib_ah->device)->dev; in create_ib_ah()
79 struct mlx4_ib_dev *ibdev = to_mdev(ib_ah->device); in create_iboe_ah()
/Linux-v5.4/drivers/infiniband/hw/mlx5/
Dib_virt.c54 struct mlx5_ib_dev *dev = to_mdev(device); in mlx5_ib_get_vf_config()
96 struct mlx5_ib_dev *dev = to_mdev(device); in mlx5_ib_set_vf_link_state()
130 dev = to_mdev(device); in mlx5_ib_get_vf_stats()
154 struct mlx5_ib_dev *dev = to_mdev(device); in set_vf_node_guid()
175 struct mlx5_ib_dev *dev = to_mdev(device); in set_vf_port_guid()
Dgsi.c118 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_gsi_create_qp()
215 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_gsi_destroy_qp()
275 struct mlx5_ib_dev *dev = to_mdev(qp->device); in modify_to_rts()
315 struct mlx5_ib_dev *dev = to_mdev(device); in setup_qp()
376 struct mlx5_ib_dev *dev = to_mdev(qp->device); in mlx5_ib_gsi_modify_qp()
417 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); in mlx5_ib_add_outstanding_wr()
468 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); in get_tx_qp()
Dmad.c111 err = mlx5_MAD_IFC(to_mdev(ibdev), in process_mad()
278 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_process_mad()
342 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, in mlx5_query_mad_ifc_smp_attr_node_info()
481 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, in mlx5_query_mad_ifc_pkey()
510 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, in mlx5_query_mad_ifc_gids()
521 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, in mlx5_query_mad_ifc_gids()
537 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_mad_ifc_port()
Dsrq.c48 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_srq_user()
221 struct mlx5_ib_dev *dev = to_mdev(ib_srq->device); in mlx5_ib_create_srq()
339 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); in mlx5_ib_modify_srq()
364 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); in mlx5_ib_query_srq()
388 struct mlx5_ib_dev *dev = to_mdev(srq->device); in mlx5_ib_destroy_srq()
426 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device); in mlx5_ib_post_srq_recv()
Dcq.c50 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
169 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device); in handle_responder()
421 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
550 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in poll_soft_wc()
577 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_poll_cq()
612 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; in mlx5_ib_arm_cq()
893 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_create_cq()
996 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq()
1074 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq()
1153 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes()
[all …]
Dmain.c140 struct mlx5_ib_dev *dev = to_mdev(device); in mlx5_ib_port_link_layer()
274 struct mlx5_ib_dev *ibdev = to_mdev(device); in mlx5_ib_get_netdev()
485 struct mlx5_ib_dev *dev = to_mdev(device); in mlx5_query_port_roce()
621 return set_roce_addr(to_mdev(attr->device), attr->port_num, in mlx5_ib_add_gid()
628 return set_roce_addr(to_mdev(attr->device), attr->port_num, in mlx5_ib_del_gid()
656 if (mlx5_use_mad_ifc(to_mdev(ibdev))) in mlx5_get_vport_access_method()
714 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_system_image_guid()
746 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_max_pkeys()
767 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_query_vendor_id()
832 struct mlx5_ib_dev *dev = to_mdev(ibdev); in mlx5_ib_query_device()
[all …]
Dmr.c684 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dma_mr()
854 struct mlx5_ib_dev *dev = to_mdev(pd->device); in alloc_mr_from_cache()
1056 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_create()
1150 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dm_mr()
1223 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; in mlx5_ib_reg_dm_mr()
1254 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr()
1372 struct mlx5_ib_dev *dev = to_mdev(pd->device); in rereg_umr()
1396 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); in mlx5_ib_rereg_user_mr()
1633 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr); in mlx5_ib_dereg_mr()
1634 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr); in mlx5_ib_dereg_mr()
[all …]
Ddevx.c901 dev = to_mdev(c->ibucontext.device); in devx_get_uid()
965 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1009 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1042 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1410 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1511 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1559 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1636 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1697 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1923 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
[all …]
Dah.c76 struct mlx5_ib_dev *dev = to_mdev(ibah->device); in mlx5_ib_create_ah()
Dodp.c167 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_odp_populate_klm()
423 struct mlx5_ib_dev *dev = to_mdev(pd->device); in implicit_mr_alloc()
478 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device); in implicit_mr_get_data()
1658 struct mlx5_ib_dev *dev = to_mdev(pd->device); in num_pending_prefetch_inc()
1704 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_prefetch_sg_list()
1732 num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list, in mlx5_ib_prefetch_mr_work()
1741 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_advise_mr_prefetch()
/Linux-v5.4/drivers/dma/
Daltera-msgdma.c199 #define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan) macro
303 struct msgdma_device *mdev = to_mdev(tx->chan); in msgdma_tx_submit()
332 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_memcpy()
390 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_slave_sg()
461 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_dma_config()
568 struct msgdma_device *mdev = to_mdev(chan); in msgdma_issue_pending()
637 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_free_chan_resources()
654 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_alloc_chan_resources()
/Linux-v5.4/drivers/staging/most/usb/
Dusb.c124 #define to_mdev(d) container_of(d, struct most_dev, iface) macro
230 struct most_dev *mdev = to_mdev(iface); in hdm_poison_channel()
339 struct most_dev *mdev = to_mdev(mbo->ifp); in hdm_write_completion()
488 struct most_dev *mdev = to_mdev(mbo->ifp); in hdm_read_completion()
565 mdev = to_mdev(iface); in hdm_enqueue()
633 struct most_dev *mdev = to_mdev(mbo->ifp); in hdm_dma_alloc()
641 struct most_dev *mdev = to_mdev(mbo->ifp); in hdm_dma_free()
667 struct most_dev *mdev = to_mdev(iface); in hdm_configure_channel()
749 mdev = to_mdev(iface); in hdm_request_netinfo()

12