Lines Matching +full:sub +full:- +full:mailboxes
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
43 struct list_head list; /* headed in ev_file->event_list */
60 struct list_head file_list; /* headed in ev_file->
63 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
64 * devx_obj_event->obj_sub_list
67 struct list_head event_list; /* headed in ev_file->event_list or in
122 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx)) in mlx5_ib_devx_create()
123 return -EINVAL; in mlx5_ib_devx_create()
127 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX)) in mlx5_ib_devx_create()
130 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & in mlx5_ib_devx_create()
137 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_create()
153 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); in mlx5_ib_devx_destroy()
210 opcode = (obj->obj_id >> 32) & 0xffff; in get_dec_obj_type()
217 return (obj->obj_id >> 48); in get_dec_obj_type()
264 return eqe->data.qp_srq.type; in get_event_obj_type()
272 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); in get_event_obj_type()
594 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_is_valid_obj_id()
603 to_mcq(uobj->object)->mcq.cqn) == in devx_is_valid_obj_id()
608 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); in devx_is_valid_obj_id()
611 switch (srq->common.res) { in devx_is_valid_obj_id()
619 if (!dev->mdev->issi) in devx_is_valid_obj_id()
626 to_msrq(uobj->object)->msrq.srqn) == in devx_is_valid_obj_id()
632 struct mlx5_ib_qp *qp = to_mqp(uobj->object); in devx_is_valid_obj_id()
634 if (qp->type == IB_QPT_RAW_PACKET || in devx_is_valid_obj_id()
635 (qp->flags & IB_QP_CREATE_SOURCE_QPN)) { in devx_is_valid_obj_id()
637 &qp->raw_packet_qp; in devx_is_valid_obj_id()
638 struct mlx5_ib_rq *rq = &raw_packet_qp->rq; in devx_is_valid_obj_id()
639 struct mlx5_ib_sq *sq = &raw_packet_qp->sq; in devx_is_valid_obj_id()
642 rq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
644 sq->base.mqp.qpn) == obj_id || in devx_is_valid_obj_id()
646 rq->tirn) == obj_id || in devx_is_valid_obj_id()
648 sq->tisn) == obj_id); in devx_is_valid_obj_id()
651 if (qp->type == MLX5_IB_QPT_DCT) in devx_is_valid_obj_id()
653 qp->dct.mdct.mqp.qpn) == obj_id; in devx_is_valid_obj_id()
655 qp->ibqp.qp_num) == obj_id; in devx_is_valid_obj_id()
660 to_mrwq(uobj->object)->core_qp.qpn) == in devx_is_valid_obj_id()
665 to_mrwq_ind_table(uobj->object)->rqtn) == in devx_is_valid_obj_id()
669 return ((struct devx_obj *)uobj->object)->obj_id == obj_id; in devx_is_valid_obj_id()
921 if (c->devx_uid) in devx_get_uid()
922 return c->devx_uid; in devx_get_uid()
924 dev = to_mdev(c->ibucontext.device); in devx_get_uid()
925 if (dev->devx_whitelist_uid) in devx_get_uid()
926 return dev->devx_whitelist_uid; in devx_get_uid()
928 return -EOPNOTSUPP; in devx_get_uid()
931 if (!c->devx_uid) in devx_get_uid()
932 return -EINVAL; in devx_get_uid()
934 return c->devx_uid; in devx_get_uid()
942 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) && in devx_is_general_cmd()
982 return -EFAULT; in UVERBS_HANDLER()
987 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
989 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn); in UVERBS_HANDLER()
995 return -EFAULT; in UVERBS_HANDLER()
1011 * mailboxes (except tagging them with UID), we expose to the user its UAR
1031 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1035 return -EFAULT; in UVERBS_HANDLER()
1037 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true); in UVERBS_HANDLER()
1043 return -EFAULT; in UVERBS_HANDLER()
1064 dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1072 return -EINVAL; in UVERBS_HANDLER()
1079 err = mlx5_cmd_exec(dev->mdev, cmd_in, in UVERBS_HANDLER()
1295 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; in devx_handle_mkey_indirect()
1300 mkey = &devx_mr->mmkey; in devx_handle_mkey_indirect()
1303 mkey->key = mlx5_idx_to_mkey( in devx_handle_mkey_indirect()
1305 mkey->type = MLX5_MKEY_INDIRECT_DEVX; in devx_handle_mkey_indirect()
1306 mkey->iova = MLX5_GET64(mkc, mkc, start_addr); in devx_handle_mkey_indirect()
1307 mkey->size = MLX5_GET64(mkc, mkc, len); in devx_handle_mkey_indirect()
1308 mkey->pd = MLX5_GET(mkc, mkc, pd); in devx_handle_mkey_indirect()
1309 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); in devx_handle_mkey_indirect()
1310 init_waitqueue_head(&mkey->wait); in devx_handle_mkey_indirect()
1326 return -EINVAL; in devx_handle_mkey_create()
1336 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY; in devx_handle_mkey_create()
1345 struct devx_event_subscription *sub) in devx_cleanup_subscription() argument
1350 if (sub->is_cleaned) in devx_cleanup_subscription()
1353 sub->is_cleaned = 1; in devx_cleanup_subscription()
1354 list_del_rcu(&sub->xa_list); in devx_cleanup_subscription()
1356 if (list_empty(&sub->obj_list)) in devx_cleanup_subscription()
1359 list_del_rcu(&sub->obj_list); in devx_cleanup_subscription()
1361 event = xa_load(&dev->devx_event_table.event_xa, in devx_cleanup_subscription()
1362 sub->xa_key_level1); in devx_cleanup_subscription()
1365 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2); in devx_cleanup_subscription()
1366 if (list_empty(&xa_val_level2->obj_sub_list)) { in devx_cleanup_subscription()
1367 xa_erase(&event->object_ids, in devx_cleanup_subscription()
1368 sub->xa_key_level2); in devx_cleanup_subscription()
1379 struct devx_obj *obj = uobject->object; in devx_obj_cleanup()
1384 dev = mlx5_udata_to_mdev(&attrs->driver_udata); in devx_obj_cleanup()
1385 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY && in devx_obj_cleanup()
1386 xa_erase(&obj->ib_dev->odp_mkeys, in devx_obj_cleanup()
1387 mlx5_base_mkey(obj->devx_mr.mmkey.key))) in devx_obj_cleanup()
1393 mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey); in devx_obj_cleanup()
1395 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in devx_obj_cleanup()
1396 ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in devx_obj_cleanup()
1397 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in devx_obj_cleanup()
1398 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in devx_obj_cleanup()
1400 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, in devx_obj_cleanup()
1401 obj->dinlen, out, sizeof(out)); in devx_obj_cleanup()
1405 devx_event_table = &dev->devx_event_table; in devx_obj_cleanup()
1407 mutex_lock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1408 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list) in devx_obj_cleanup()
1410 mutex_unlock(&devx_event_table->event_xa_lock); in devx_obj_cleanup()
1422 u32 obj_id = mcq->cqn; in devx_cq_comp()
1424 table = &obj->ib_dev->devx_event_table; in devx_cq_comp()
1426 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP); in devx_cq_comp()
1430 obj_event = xa_load(&event->object_ids, obj_id); in devx_cq_comp()
1434 dispatch_event_fd(&obj_event->obj_sub_list, eqe); in devx_cq_comp()
1441 if (!MLX5_CAP_GEN(dev->mdev, apu) || in is_apu_cq()
1460 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1461 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1471 return -EINVAL; in UVERBS_HANDLER()
1478 return -EINVAL; in UVERBS_HANDLER()
1486 return -ENOMEM; in UVERBS_HANDLER()
1498 obj->flags |= DEVX_OBJ_FLAGS_DCT; in UVERBS_HANDLER()
1499 err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in, in UVERBS_HANDLER()
1503 obj->flags |= DEVX_OBJ_FLAGS_CQ; in UVERBS_HANDLER()
1504 obj->core_cq.comp = devx_cq_comp; in UVERBS_HANDLER()
1505 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq, in UVERBS_HANDLER()
1509 err = mlx5_cmd_exec(dev->mdev, cmd_in, in UVERBS_HANDLER()
1521 obj->flow_counter_bulk_size = 128UL * bulk; in UVERBS_HANDLER()
1524 uobj->object = obj; in UVERBS_HANDLER()
1525 INIT_LIST_HEAD(&obj->event_sub); in UVERBS_HANDLER()
1526 obj->ib_dev = dev; in UVERBS_HANDLER()
1527 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, in UVERBS_HANDLER()
1529 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); in UVERBS_HANDLER()
1537 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); in UVERBS_HANDLER()
1539 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { in UVERBS_HANDLER()
1547 if (obj->flags & DEVX_OBJ_FLAGS_DCT) in UVERBS_HANDLER()
1548 mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); in UVERBS_HANDLER()
1549 else if (obj->flags & DEVX_OBJ_FLAGS_CQ) in UVERBS_HANDLER()
1550 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); in UVERBS_HANDLER()
1552 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, in UVERBS_HANDLER()
1568 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1569 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1575 return -EINVAL; in UVERBS_HANDLER()
1582 return -EINVAL; in UVERBS_HANDLER()
1585 return -EINVAL; in UVERBS_HANDLER()
1594 err = mlx5_cmd_exec(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1613 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1617 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1620 return -EINVAL; in UVERBS_HANDLER()
1627 return -EINVAL; in UVERBS_HANDLER()
1630 return -EINVAL; in UVERBS_HANDLER()
1637 err = mlx5_cmd_exec(mdev->mdev, cmd_in, in UVERBS_HANDLER()
1663 spin_lock_init(&ev_queue->lock); in devx_init_event_queue()
1664 INIT_LIST_HEAD(&ev_queue->event_list); in devx_init_event_queue()
1665 init_waitqueue_head(&ev_queue->poll_wait); in devx_init_event_queue()
1666 atomic_set(&ev_queue->bytes_in_use, 0); in devx_init_event_queue()
1667 ev_queue->is_destroyed = 0; in devx_init_event_queue()
1677 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata); in UVERBS_HANDLER()
1681 devx_init_event_queue(&ev_file->ev_queue); in UVERBS_HANDLER()
1682 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx); in UVERBS_HANDLER()
1693 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1694 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1707 spin_lock_init(&ev_file->lock); in UVERBS_HANDLER()
1708 INIT_LIST_HEAD(&ev_file->event_list); in UVERBS_HANDLER()
1709 init_waitqueue_head(&ev_file->poll_wait); in UVERBS_HANDLER()
1711 ev_file->omit_data = 1; in UVERBS_HANDLER()
1712 INIT_LIST_HEAD(&ev_file->subscribed_events_list); in UVERBS_HANDLER()
1713 ev_file->dev = dev; in UVERBS_HANDLER()
1714 get_device(&dev->ib_dev.dev); in UVERBS_HANDLER()
1722 struct devx_async_cmd_event_file *ev_file = async_data->ev_file; in devx_query_callback()
1723 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue; in devx_query_callback()
1731 spin_lock_irqsave(&ev_queue->lock, flags); in devx_query_callback()
1732 list_add_tail(&async_data->list, &ev_queue->event_list); in devx_query_callback()
1733 spin_unlock_irqrestore(&ev_queue->lock, flags); in devx_query_callback()
1735 wake_up_interruptible(&ev_queue->poll_wait); in devx_query_callback()
1750 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1754 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1759 return -EINVAL; in UVERBS_HANDLER()
1766 return -EINVAL; in UVERBS_HANDLER()
1774 return -EINVAL; in UVERBS_HANDLER()
1784 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) > in UVERBS_HANDLER()
1786 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1787 return -EAGAIN; in UVERBS_HANDLER()
1793 err = -ENOMEM; in UVERBS_HANDLER()
1797 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs, in UVERBS_HANDLER()
1802 async_data->cmd_out_len = cmd_out_len; in UVERBS_HANDLER()
1803 async_data->mdev = mdev; in UVERBS_HANDLER()
1804 async_data->ev_file = ev_file; in UVERBS_HANDLER()
1807 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in, in UVERBS_HANDLER()
1810 async_data->hdr.out_data, in UVERBS_HANDLER()
1811 async_data->cmd_out_len, in UVERBS_HANDLER()
1812 devx_query_callback, &async_data->cb_work); in UVERBS_HANDLER()
1822 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use); in UVERBS_HANDLER()
1839 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_dealloc()
1842 xa_val_level2 = xa_load(&event->object_ids, in subscribe_event_xa_dealloc()
1844 if (list_empty(&xa_val_level2->obj_sub_list)) { in subscribe_event_xa_dealloc()
1845 xa_erase(&event->object_ids, in subscribe_event_xa_dealloc()
1861 event = xa_load(&devx_event_table->event_xa, key_level1); in subscribe_event_xa_alloc()
1865 return -ENOMEM; in subscribe_event_xa_alloc()
1867 INIT_LIST_HEAD(&event->unaffiliated_list); in subscribe_event_xa_alloc()
1868 xa_init(&event->object_ids); in subscribe_event_xa_alloc()
1870 err = xa_insert(&devx_event_table->event_xa, in subscribe_event_xa_alloc()
1883 obj_event = xa_load(&event->object_ids, key_level2); in subscribe_event_xa_alloc()
1888 return -ENOMEM; in subscribe_event_xa_alloc()
1890 err = xa_insert(&event->object_ids, in subscribe_event_xa_alloc()
1896 INIT_LIST_HEAD(&obj_event->obj_sub_list); in subscribe_event_xa_alloc()
1976 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
1977 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
1981 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table; in UVERBS_HANDLER()
1995 if (!c->devx_uid) in UVERBS_HANDLER()
1996 return -EINVAL; in UVERBS_HANDLER()
1999 obj = (struct devx_obj *)devx_uobj->object; in UVERBS_HANDLER()
2001 obj_id = get_dec_obj_id(obj->obj_id); in UVERBS_HANDLER()
2025 return -EINVAL; in UVERBS_HANDLER()
2041 return -EINVAL; in UVERBS_HANDLER()
2046 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj)) in UVERBS_HANDLER()
2047 return -EINVAL; in UVERBS_HANDLER()
2054 mutex_lock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2073 err = -ENOMEM; in UVERBS_HANDLER()
2077 list_add_tail(&event_sub->event_list, &sub_list); in UVERBS_HANDLER()
2078 uverbs_uobject_get(&ev_file->uobj); in UVERBS_HANDLER()
2080 event_sub->eventfd = in UVERBS_HANDLER()
2083 if (IS_ERR(event_sub->eventfd)) { in UVERBS_HANDLER()
2084 err = PTR_ERR(event_sub->eventfd); in UVERBS_HANDLER()
2085 event_sub->eventfd = NULL; in UVERBS_HANDLER()
2090 event_sub->cookie = cookie; in UVERBS_HANDLER()
2091 event_sub->ev_file = ev_file; in UVERBS_HANDLER()
2093 event_sub->xa_key_level1 = key_level1; in UVERBS_HANDLER()
2094 event_sub->xa_key_level2 = obj_id; in UVERBS_HANDLER()
2095 INIT_LIST_HEAD(&event_sub->obj_list); in UVERBS_HANDLER()
2106 list_del_init(&event_sub->event_list); in UVERBS_HANDLER()
2108 spin_lock_irq(&ev_file->lock); in UVERBS_HANDLER()
2109 list_add_tail_rcu(&event_sub->file_list, in UVERBS_HANDLER()
2110 &ev_file->subscribed_events_list); in UVERBS_HANDLER()
2111 spin_unlock_irq(&ev_file->lock); in UVERBS_HANDLER()
2113 event = xa_load(&devx_event_table->event_xa, in UVERBS_HANDLER()
2114 event_sub->xa_key_level1); in UVERBS_HANDLER()
2118 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2119 &event->unaffiliated_list); in UVERBS_HANDLER()
2123 obj_event = xa_load(&event->object_ids, obj_id); in UVERBS_HANDLER()
2125 list_add_tail_rcu(&event_sub->xa_list, in UVERBS_HANDLER()
2126 &obj_event->obj_sub_list); in UVERBS_HANDLER()
2127 list_add_tail_rcu(&event_sub->obj_list, in UVERBS_HANDLER()
2128 &obj->event_sub); in UVERBS_HANDLER()
2131 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2136 list_del(&event_sub->event_list); in UVERBS_HANDLER()
2139 event_sub->xa_key_level1, in UVERBS_HANDLER()
2143 if (event_sub->eventfd) in UVERBS_HANDLER()
2144 eventfd_ctx_put(event_sub->eventfd); in UVERBS_HANDLER()
2145 uverbs_uobject_put(&event_sub->ev_file->uobj); in UVERBS_HANDLER()
2149 mutex_unlock(&devx_event_table->event_xa_lock); in UVERBS_HANDLER()
2164 return -EFAULT; in devx_umem_get()
2174 err = ib_check_mr_access(&dev->ib_dev, access); in devx_umem_get()
2178 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access); in devx_umem_get()
2179 if (IS_ERR(obj->umem)) in devx_umem_get()
2180 return PTR_ERR(obj->umem); in devx_umem_get()
2192 pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length), in devx_umem_find_best_pgsize()
2209 (umem->length % page_size) != 0) && in devx_umem_find_best_pgsize()
2246 page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap); in devx_umem_reg_cmd_alloc()
2248 return -EINVAL; in devx_umem_reg_cmd_alloc()
2250 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) + in devx_umem_reg_cmd_alloc()
2252 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2253 cmd->in = uverbs_zalloc(attrs, cmd->inlen); in devx_umem_reg_cmd_alloc()
2254 if (IS_ERR(cmd->in)) in devx_umem_reg_cmd_alloc()
2255 return PTR_ERR(cmd->in); in devx_umem_reg_cmd_alloc()
2257 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); in devx_umem_reg_cmd_alloc()
2260 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM); in devx_umem_reg_cmd_alloc()
2262 ib_umem_num_dma_blocks(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2264 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT); in devx_umem_reg_cmd_alloc()
2266 ib_umem_dma_offset(obj->umem, page_size)); in devx_umem_reg_cmd_alloc()
2268 mlx5_ib_populate_pas(obj->umem, page_size, mtt, in devx_umem_reg_cmd_alloc()
2269 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) | in devx_umem_reg_cmd_alloc()
2283 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); in UVERBS_HANDLER()
2284 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); in UVERBS_HANDLER()
2287 if (!c->devx_uid) in UVERBS_HANDLER()
2288 return -EINVAL; in UVERBS_HANDLER()
2292 return -ENOMEM; in UVERBS_HANDLER()
2294 err = devx_umem_get(dev, &c->ibucontext, attrs, obj); in UVERBS_HANDLER()
2302 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid); in UVERBS_HANDLER()
2303 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out, in UVERBS_HANDLER()
2308 obj->mdev = dev->mdev; in UVERBS_HANDLER()
2309 uobj->object = obj; in UVERBS_HANDLER()
2310 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id); in UVERBS_HANDLER()
2318 ib_umem_release(obj->umem); in UVERBS_HANDLER()
2328 struct devx_umem *obj = uobject->object; in devx_umem_cleanup()
2332 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); in devx_umem_cleanup()
2336 ib_umem_release(obj->umem); in devx_umem_cleanup()
2380 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in devx_get_obj_id_from_event()
2383 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; in devx_get_obj_id_from_event()
2387 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in devx_get_obj_id_from_event()
2390 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; in devx_get_obj_id_from_event()
2393 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id); in devx_get_obj_id_from_event()
2407 ev_file = event_sub->ev_file; in deliver_event()
2409 if (ev_file->omit_data) { in deliver_event()
2410 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2411 if (!list_empty(&event_sub->event_list) || in deliver_event()
2412 ev_file->is_destroyed) { in deliver_event()
2413 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2417 list_add_tail(&event_sub->event_list, &ev_file->event_list); in deliver_event()
2418 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2419 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2426 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2427 ev_file->is_overflow_err = 1; in deliver_event()
2428 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2429 return -ENOMEM; in deliver_event()
2432 event_data->hdr.cookie = event_sub->cookie; in deliver_event()
2433 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe)); in deliver_event()
2435 spin_lock_irqsave(&ev_file->lock, flags); in deliver_event()
2436 if (!ev_file->is_destroyed) in deliver_event()
2437 list_add_tail(&event_data->list, &ev_file->event_list); in deliver_event()
2440 spin_unlock_irqrestore(&ev_file->lock, flags); in deliver_event()
2441 wake_up_interruptible(&ev_file->poll_wait); in deliver_event()
2452 if (item->eventfd) in dispatch_event_fd()
2453 eventfd_signal(item->eventfd, 1); in dispatch_event_fd()
2477 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type); in devx_event_notifier()
2483 event = xa_load(&table->event_xa, event_type | (obj_type << 16)); in devx_event_notifier()
2490 dispatch_event_fd(&event->unaffiliated_list, data); in devx_event_notifier()
2496 obj_event = xa_load(&event->object_ids, obj_id); in devx_event_notifier()
2502 dispatch_event_fd(&obj_event->obj_sub_list, data); in devx_event_notifier()
2510 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_init()
2515 dev->devx_whitelist_uid = uid; in mlx5_ib_devx_init()
2516 xa_init(&table->event_xa); in mlx5_ib_devx_init()
2517 mutex_init(&table->event_xa_lock); in mlx5_ib_devx_init()
2518 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY); in mlx5_ib_devx_init()
2519 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb); in mlx5_ib_devx_init()
2527 struct mlx5_devx_event_table *table = &dev->devx_event_table; in mlx5_ib_devx_cleanup()
2528 struct devx_event_subscription *sub, *tmp; in mlx5_ib_devx_cleanup() local
2533 if (dev->devx_whitelist_uid) { in mlx5_ib_devx_cleanup()
2534 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb); in mlx5_ib_devx_cleanup()
2535 mutex_lock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2536 xa_for_each(&table->event_xa, id, entry) { in mlx5_ib_devx_cleanup()
2539 sub, tmp, &event->unaffiliated_list, xa_list) in mlx5_ib_devx_cleanup()
2540 devx_cleanup_subscription(dev, sub); in mlx5_ib_devx_cleanup()
2543 mutex_unlock(&dev->devx_event_table.event_xa_lock); in mlx5_ib_devx_cleanup()
2544 xa_destroy(&table->event_xa); in mlx5_ib_devx_cleanup()
2546 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid); in mlx5_ib_devx_cleanup()
2553 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_read()
2554 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_read()
2559 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2561 while (list_empty(&ev_queue->event_list)) { in devx_async_cmd_event_read()
2562 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2564 if (filp->f_flags & O_NONBLOCK) in devx_async_cmd_event_read()
2565 return -EAGAIN; in devx_async_cmd_event_read()
2568 ev_queue->poll_wait, in devx_async_cmd_event_read()
2569 (!list_empty(&ev_queue->event_list) || in devx_async_cmd_event_read()
2570 ev_queue->is_destroyed))) { in devx_async_cmd_event_read()
2571 return -ERESTARTSYS; in devx_async_cmd_event_read()
2574 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2575 if (ev_queue->is_destroyed) { in devx_async_cmd_event_read()
2576 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2577 return -EIO; in devx_async_cmd_event_read()
2581 event = list_entry(ev_queue->event_list.next, in devx_async_cmd_event_read()
2583 eventsz = event->cmd_out_len + in devx_async_cmd_event_read()
2587 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2588 return -ENOSPC; in devx_async_cmd_event_read()
2591 list_del(ev_queue->event_list.next); in devx_async_cmd_event_read()
2592 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_read()
2594 if (copy_to_user(buf, &event->hdr, eventsz)) in devx_async_cmd_event_read()
2595 ret = -EFAULT; in devx_async_cmd_event_read()
2599 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use); in devx_async_cmd_event_read()
2607 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data; in devx_async_cmd_event_poll()
2608 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_poll()
2611 poll_wait(filp, &ev_queue->poll_wait, wait); in devx_async_cmd_event_poll()
2613 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2614 if (ev_queue->is_destroyed) in devx_async_cmd_event_poll()
2616 else if (!list_empty(&ev_queue->event_list)) in devx_async_cmd_event_poll()
2618 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_poll()
2634 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_read()
2642 omit_data = ev_file->omit_data; in devx_async_event_read()
2644 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2646 if (ev_file->is_overflow_err) { in devx_async_event_read()
2647 ev_file->is_overflow_err = 0; in devx_async_event_read()
2648 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2649 return -EOVERFLOW; in devx_async_event_read()
2653 while (list_empty(&ev_file->event_list)) { in devx_async_event_read()
2654 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2656 if (filp->f_flags & O_NONBLOCK) in devx_async_event_read()
2657 return -EAGAIN; in devx_async_event_read()
2659 if (wait_event_interruptible(ev_file->poll_wait, in devx_async_event_read()
2660 (!list_empty(&ev_file->event_list) || in devx_async_event_read()
2661 ev_file->is_destroyed))) { in devx_async_event_read()
2662 return -ERESTARTSYS; in devx_async_event_read()
2665 spin_lock_irq(&ev_file->lock); in devx_async_event_read()
2666 if (ev_file->is_destroyed) { in devx_async_event_read()
2667 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2668 return -EIO; in devx_async_event_read()
2673 event_sub = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2676 eventsz = sizeof(event_sub->cookie); in devx_async_event_read()
2677 event_data = &event_sub->cookie; in devx_async_event_read()
2679 event = list_first_entry(&ev_file->event_list, in devx_async_event_read()
2683 event_data = &event->hdr; in devx_async_event_read()
2687 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2688 return -EINVAL; in devx_async_event_read()
2692 list_del_init(&event_sub->event_list); in devx_async_event_read()
2694 list_del(&event->list); in devx_async_event_read()
2696 spin_unlock_irq(&ev_file->lock); in devx_async_event_read()
2700 ret = -EFAULT; in devx_async_event_read()
2712 struct devx_async_event_file *ev_file = filp->private_data; in devx_async_event_poll()
2715 poll_wait(filp, &ev_file->poll_wait, wait); in devx_async_event_poll()
2717 spin_lock_irq(&ev_file->lock); in devx_async_event_poll()
2718 if (ev_file->is_destroyed) in devx_async_event_poll()
2720 else if (!list_empty(&ev_file->event_list)) in devx_async_event_poll()
2722 spin_unlock_irq(&ev_file->lock); in devx_async_event_poll()
2732 if (event_sub->eventfd) in devx_free_subscription()
2733 eventfd_ctx_put(event_sub->eventfd); in devx_free_subscription()
2734 uverbs_uobject_put(&event_sub->ev_file->uobj); in devx_free_subscription()
2752 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue; in devx_async_cmd_event_destroy_uobj()
2755 spin_lock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2756 ev_queue->is_destroyed = 1; in devx_async_cmd_event_destroy_uobj()
2757 spin_unlock_irq(&ev_queue->lock); in devx_async_cmd_event_destroy_uobj()
2758 wake_up_interruptible(&ev_queue->poll_wait); in devx_async_cmd_event_destroy_uobj()
2760 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx); in devx_async_cmd_event_destroy_uobj()
2762 spin_lock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2764 &comp_ev_file->ev_queue.event_list, list) { in devx_async_cmd_event_destroy_uobj()
2765 list_del(&entry->list); in devx_async_cmd_event_destroy_uobj()
2768 spin_unlock_irq(&comp_ev_file->ev_queue.lock); in devx_async_cmd_event_destroy_uobj()
2778 struct mlx5_ib_dev *dev = ev_file->dev; in devx_async_event_destroy_uobj()
2780 spin_lock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2781 ev_file->is_destroyed = 1; in devx_async_event_destroy_uobj()
2784 if (ev_file->omit_data) { in devx_async_event_destroy_uobj()
2787 list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2789 list_del_init(&event_sub->event_list); in devx_async_event_destroy_uobj()
2794 list_for_each_entry_safe(entry, tmp, &ev_file->event_list, in devx_async_event_destroy_uobj()
2796 list_del(&entry->list); in devx_async_event_destroy_uobj()
2801 spin_unlock_irq(&ev_file->lock); in devx_async_event_destroy_uobj()
2802 wake_up_interruptible(&ev_file->poll_wait); in devx_async_event_destroy_uobj()
2804 mutex_lock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2807 &ev_file->subscribed_events_list, file_list) { in devx_async_event_destroy_uobj()
2809 list_del_rcu(&event_sub->file_list); in devx_async_event_destroy_uobj()
2811 call_rcu(&event_sub->rcu, devx_free_subscription); in devx_async_event_destroy_uobj()
2813 mutex_unlock(&dev->devx_event_table.event_xa_lock); in devx_async_event_destroy_uobj()
2815 put_device(&dev->ib_dev.dev); in devx_async_event_destroy_uobj()
3029 return MLX5_CAP_GEN(dev->mdev, log_max_uctx); in devx_is_supported()