Lines Matching +full:sig +full:- +full:dir
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
15 * - Redistributions of source code must retain the above
19 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-buf.h>
41 #include <linux/dma-resv.h>
70 struct mlx5_ib_dev *dev = to_mdev(pd->device); in set_mkc_access_pd_addr_fields()
71 bool ro_pci_enabled = pcie_relaxed_ordering_enabled(dev->mdev->pdev); in set_mkc_access_pd_addr_fields()
79 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) in set_mkc_access_pd_addr_fields()
82 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) in set_mkc_access_pd_addr_fields()
86 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); in set_mkc_access_pd_addr_fields()
95 u8 key = atomic_inc_return(&dev->mkey_var); in assign_mkey_variant()
100 mkey->key = key; in assign_mkey_variant()
108 return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen); in mlx5_ib_create_mkey()
129 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled); in umr_can_use_indirect_mkey()
134 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); in destroy_mkey()
136 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); in destroy_mkey()
143 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback()
144 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
150 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
151 ent->pending--; in create_mkey_callback()
152 WRITE_ONCE(dev->fill_delay, 1); in create_mkey_callback()
153 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
154 mod_timer(&dev->delay_timer, jiffies + HZ); in create_mkey_callback()
158 mr->mmkey.type = MLX5_MKEY_MR; in create_mkey_callback()
159 mr->mmkey.key |= mlx5_idx_to_mkey( in create_mkey_callback()
160 MLX5_GET(create_mkey_out, mr->out, mkey_index)); in create_mkey_callback()
161 init_waitqueue_head(&mr->mmkey.wait); in create_mkey_callback()
163 WRITE_ONCE(dev->cache.last_add, jiffies); in create_mkey_callback()
165 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
166 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
167 ent->available_mrs++; in create_mkey_callback()
168 ent->total_mrs++; in create_mkey_callback()
171 ent->pending--; in create_mkey_callback()
172 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
182 mr->cache_ent = ent; in alloc_cache_mr()
184 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); in alloc_cache_mr()
187 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in alloc_cache_mr()
188 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); in alloc_cache_mr()
190 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in alloc_cache_mr()
191 MLX5_SET(mkc, mkc, log_page_size, ent->page); in alloc_cache_mr()
207 return -ENOMEM; in add_keys()
213 err = -ENOMEM; in add_keys()
216 spin_lock_irq(&ent->lock); in add_keys()
217 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
218 err = -EAGAIN; in add_keys()
219 spin_unlock_irq(&ent->lock); in add_keys()
223 ent->pending++; in add_keys()
224 spin_unlock_irq(&ent->lock); in add_keys()
225 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
226 &ent->dev->async_ctx, in, inlen, in add_keys()
227 mr->out, sizeof(mr->out), in add_keys()
228 &mr->cb_work); in add_keys()
230 spin_lock_irq(&ent->lock); in add_keys()
231 ent->pending--; in add_keys()
232 spin_unlock_irq(&ent->lock); in add_keys()
233 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
254 return ERR_PTR(-ENOMEM); in create_cache_mr()
259 err = -ENOMEM; in create_cache_mr()
263 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); in create_cache_mr()
267 mr->mmkey.type = MLX5_MKEY_MR; in create_cache_mr()
268 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mr()
269 spin_lock_irq(&ent->lock); in create_cache_mr()
270 ent->total_mrs++; in create_cache_mr()
271 spin_unlock_irq(&ent->lock); in create_cache_mr()
285 lockdep_assert_held(&ent->lock); in remove_cache_mr_locked()
286 if (list_empty(&ent->head)) in remove_cache_mr_locked()
288 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
289 list_del(&mr->list); in remove_cache_mr_locked()
290 ent->available_mrs--; in remove_cache_mr_locked()
291 ent->total_mrs--; in remove_cache_mr_locked()
292 spin_unlock_irq(&ent->lock); in remove_cache_mr_locked()
293 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); in remove_cache_mr_locked()
295 spin_lock_irq(&ent->lock); in remove_cache_mr_locked()
303 lockdep_assert_held(&ent->lock); in resize_available_mrs()
307 target = ent->limit * 2; in resize_available_mrs()
308 if (target == ent->available_mrs + ent->pending) in resize_available_mrs()
310 if (target > ent->available_mrs + ent->pending) { in resize_available_mrs()
311 u32 todo = target - (ent->available_mrs + ent->pending); in resize_available_mrs()
313 spin_unlock_irq(&ent->lock); in resize_available_mrs()
315 if (err == -EAGAIN) in resize_available_mrs()
317 spin_lock_irq(&ent->lock); in resize_available_mrs()
319 if (err != -EAGAIN) in resize_available_mrs()
332 struct mlx5_cache_ent *ent = filp->private_data; in size_write()
345 spin_lock_irq(&ent->lock); in size_write()
346 if (target < ent->total_mrs - ent->available_mrs) { in size_write()
347 err = -EINVAL; in size_write()
350 target = target - (ent->total_mrs - ent->available_mrs); in size_write()
351 if (target < ent->limit || target > ent->limit*2) { in size_write()
352 err = -EINVAL; in size_write()
358 spin_unlock_irq(&ent->lock); in size_write()
363 spin_unlock_irq(&ent->lock); in size_write()
370 struct mlx5_cache_ent *ent = filp->private_data; in size_read()
374 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); in size_read()
391 struct mlx5_cache_ent *ent = filp->private_data; in limit_write()
403 spin_lock_irq(&ent->lock); in limit_write()
404 ent->limit = var; in limit_write()
406 spin_unlock_irq(&ent->lock); in limit_write()
415 struct mlx5_cache_ent *ent = filp->private_data; in limit_read()
419 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
438 struct mlx5_cache_ent *ent = &cache->ent[i]; in someone_adding()
441 spin_lock_irq(&ent->lock); in someone_adding()
442 ret = ent->available_mrs < ent->limit; in someone_adding()
443 spin_unlock_irq(&ent->lock); in someone_adding()
457 lockdep_assert_held(&ent->lock); in queue_adjust_cache_locked()
459 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) in queue_adjust_cache_locked()
461 if (ent->available_mrs < ent->limit) { in queue_adjust_cache_locked()
462 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
463 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
464 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
465 ent->available_mrs + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
470 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
471 } else if (ent->available_mrs == 2 * ent->limit) { in queue_adjust_cache_locked()
472 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
473 } else if (ent->available_mrs > 2 * ent->limit) { in queue_adjust_cache_locked()
475 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
476 if (ent->pending) in queue_adjust_cache_locked()
477 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
480 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
486 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
487 struct mlx5_mr_cache *cache = &dev->cache; in __cache_work_func()
490 spin_lock_irq(&ent->lock); in __cache_work_func()
491 if (ent->disabled) in __cache_work_func()
494 if (ent->fill_to_high_water && in __cache_work_func()
495 ent->available_mrs + ent->pending < 2 * ent->limit && in __cache_work_func()
496 !READ_ONCE(dev->fill_delay)) { in __cache_work_func()
497 spin_unlock_irq(&ent->lock); in __cache_work_func()
499 spin_lock_irq(&ent->lock); in __cache_work_func()
500 if (ent->disabled) in __cache_work_func()
508 if (err != -EAGAIN) { in __cache_work_func()
512 ent->order, err); in __cache_work_func()
513 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
517 } else if (ent->available_mrs > 2 * ent->limit) { in __cache_work_func()
532 spin_unlock_irq(&ent->lock); in __cache_work_func()
535 READ_ONCE(cache->last_add) + 300 * HZ); in __cache_work_func()
536 spin_lock_irq(&ent->lock); in __cache_work_func()
537 if (ent->disabled) in __cache_work_func()
540 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
545 spin_unlock_irq(&ent->lock); in __cache_work_func()
568 struct mlx5_mr_cache *cache = &dev->cache; in mlx5_mr_cache_alloc()
573 entry >= ARRAY_SIZE(cache->ent))) in mlx5_mr_cache_alloc()
574 return ERR_PTR(-EINVAL); in mlx5_mr_cache_alloc()
578 return ERR_PTR(-EOPNOTSUPP); in mlx5_mr_cache_alloc()
580 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
581 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
582 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
583 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
588 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
589 list_del(&mr->list); in mlx5_mr_cache_alloc()
590 ent->available_mrs--; in mlx5_mr_cache_alloc()
592 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
596 mr->access_flags = access_flags; in mlx5_mr_cache_alloc()
603 struct mlx5_ib_dev *dev = req_ent->dev; in get_cache_mr()
608 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { in get_cache_mr()
609 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, in get_cache_mr()
610 ent - dev->cache.ent); in get_cache_mr()
612 spin_lock_irq(&ent->lock); in get_cache_mr()
613 if (!list_empty(&ent->head)) { in get_cache_mr()
614 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in get_cache_mr()
616 list_del(&mr->list); in get_cache_mr()
617 ent->available_mrs--; in get_cache_mr()
619 spin_unlock_irq(&ent->lock); in get_cache_mr()
624 spin_unlock_irq(&ent->lock); in get_cache_mr()
626 req_ent->miss++; in get_cache_mr()
632 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free()
634 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
635 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
636 ent->available_mrs++; in mlx5_mr_cache_free()
638 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
643 struct mlx5_mr_cache *cache = &dev->cache; in clean_keys()
644 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys()
649 cancel_delayed_work(&ent->dwork); in clean_keys()
651 spin_lock_irq(&ent->lock); in clean_keys()
652 if (list_empty(&ent->head)) { in clean_keys()
653 spin_unlock_irq(&ent->lock); in clean_keys()
656 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
657 list_move(&mr->list, &del_list); in clean_keys()
658 ent->available_mrs--; in clean_keys()
659 ent->total_mrs--; in clean_keys()
660 spin_unlock_irq(&ent->lock); in clean_keys()
661 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); in clean_keys()
665 list_del(&mr->list); in clean_keys()
672 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mr_cache_debugfs_cleanup()
675 debugfs_remove_recursive(dev->cache.root); in mlx5_mr_cache_debugfs_cleanup()
676 dev->cache.root = NULL; in mlx5_mr_cache_debugfs_cleanup()
681 struct mlx5_mr_cache *cache = &dev->cache; in mlx5_mr_cache_debugfs_init()
683 struct dentry *dir; in mlx5_mr_cache_debugfs_init() local
686 if (!mlx5_debugfs_root || dev->is_rep) in mlx5_mr_cache_debugfs_init()
689 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); in mlx5_mr_cache_debugfs_init()
692 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
693 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
694 dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
695 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mr_cache_debugfs_init()
696 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mr_cache_debugfs_init()
697 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); in mlx5_mr_cache_debugfs_init()
698 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mr_cache_debugfs_init()
706 WRITE_ONCE(dev->fill_delay, 0); in delay_time_func()
711 struct mlx5_mr_cache *cache = &dev->cache; in mlx5_mr_cache_init()
715 mutex_init(&dev->slow_path_mutex); in mlx5_mr_cache_init()
716 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); in mlx5_mr_cache_init()
717 if (!cache->wq) { in mlx5_mr_cache_init()
719 return -ENOMEM; in mlx5_mr_cache_init()
722 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); in mlx5_mr_cache_init()
723 timer_setup(&dev->delay_timer, delay_time_func, 0); in mlx5_mr_cache_init()
725 ent = &cache->ent[i]; in mlx5_mr_cache_init()
726 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
727 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
728 ent->order = i + 2; in mlx5_mr_cache_init()
729 ent->dev = dev; in mlx5_mr_cache_init()
730 ent->limit = 0; in mlx5_mr_cache_init()
732 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
733 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
740 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
743 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
744 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
746 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
747 if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && in mlx5_mr_cache_init()
748 !dev->is_rep && mlx5_core_is_pf(dev->mdev) && in mlx5_mr_cache_init()
750 ent->limit = dev->mdev->profile.mr_cache[i].limit; in mlx5_mr_cache_init()
752 ent->limit = 0; in mlx5_mr_cache_init()
753 spin_lock_irq(&ent->lock); in mlx5_mr_cache_init()
755 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_init()
767 if (!dev->cache.wq) in mlx5_mr_cache_cleanup()
771 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; in mlx5_mr_cache_cleanup()
773 spin_lock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
774 ent->disabled = true; in mlx5_mr_cache_cleanup()
775 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
776 cancel_work_sync(&ent->work); in mlx5_mr_cache_cleanup()
777 cancel_delayed_work_sync(&ent->dwork); in mlx5_mr_cache_cleanup()
781 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); in mlx5_mr_cache_cleanup()
786 destroy_workqueue(dev->cache.wq); in mlx5_mr_cache_cleanup()
787 del_timer_sync(&dev->delay_timer); in mlx5_mr_cache_cleanup()
794 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dma_mr()
803 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dma_mr()
807 err = -ENOMEM; in mlx5_ib_get_dma_mr()
818 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dma_mr()
823 mr->mmkey.type = MLX5_MKEY_MR; in mlx5_ib_get_dma_mr()
824 mr->ibmr.lkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
825 mr->ibmr.rkey = mr->mmkey.key; in mlx5_ib_get_dma_mr()
826 mr->umem = NULL; in mlx5_ib_get_dma_mr()
828 return &mr->ibmr; in mlx5_ib_get_dma_mr()
845 offset = addr & (page_size - 1); in get_octo_len()
852 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) in mr_cache_max_order()
860 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); in mlx5_ib_umr_done()
862 context->status = wc->status; in mlx5_ib_umr_done()
863 complete(&context->done); in mlx5_ib_umr_done()
868 context->cqe.done = mlx5_ib_umr_done; in mlx5_ib_init_umr_context()
869 context->status = -1; in mlx5_ib_init_umr_context()
870 init_completion(&context->done); in mlx5_ib_init_umr_context()
876 struct umr_common *umrc = &dev->umrc; in mlx5_ib_post_send_wait()
882 umrwr->wr.wr_cqe = &umr_context.cqe; in mlx5_ib_post_send_wait()
884 down(&umrc->sem); in mlx5_ib_post_send_wait()
885 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); in mlx5_ib_post_send_wait()
893 err = -EFAULT; in mlx5_ib_post_send_wait()
896 up(&umrc->sem); in mlx5_ib_post_send_wait()
903 struct mlx5_mr_cache *cache = &dev->cache; in mr_cache_ent_from_order()
905 if (order < cache->ent[0].order) in mr_cache_ent_from_order()
906 return &cache->ent[0]; in mr_cache_ent_from_order()
907 order = order - cache->ent[0].order; in mr_cache_ent_from_order()
910 return &cache->ent[order]; in mr_cache_ent_from_order()
916 mr->ibmr.lkey = mr->mmkey.key; in set_mr_fields()
917 mr->ibmr.rkey = mr->mmkey.key; in set_mr_fields()
918 mr->ibmr.length = length; in set_mr_fields()
919 mr->ibmr.device = &dev->ib_dev; in set_mr_fields()
920 mr->access_flags = access_flags; in set_mr_fields()
930 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
938 struct mlx5_ib_dev *dev = to_mdev(pd->device); in alloc_cacheable_mr()
943 if (umem->is_dmabuf) in alloc_cacheable_mr()
949 return ERR_PTR(-EINVAL); in alloc_cacheable_mr()
956 if (!ent || ent->limit == 0 || in alloc_cacheable_mr()
958 mutex_lock(&dev->slow_path_mutex); in alloc_cacheable_mr()
960 mutex_unlock(&dev->slow_path_mutex); in alloc_cacheable_mr()
975 mr->ibmr.pd = pd; in alloc_cacheable_mr()
976 mr->umem = umem; in alloc_cacheable_mr()
977 mr->mmkey.iova = iova; in alloc_cacheable_mr()
978 mr->mmkey.size = umem->length; in alloc_cacheable_mr()
979 mr->mmkey.pd = to_mpd(pd)->pdn; in alloc_cacheable_mr()
980 mr->page_shift = order_base_2(page_size); in alloc_cacheable_mr()
981 set_mr_fields(dev, mr, umem->length, access_flags); in alloc_cacheable_mr()
986 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
991 * Allocate a temporary buffer to hold the per-page information to transfer to
1064 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_ib_create_xlt_wr()
1071 sg->length = nents * ent_size; in mlx5_ib_create_xlt_wr()
1072 dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE); in mlx5_ib_create_xlt_wr()
1075 mlx5_ib_free_xlt(xlt, sg->length); in mlx5_ib_create_xlt_wr()
1078 sg->addr = dma; in mlx5_ib_create_xlt_wr()
1079 sg->lkey = dev->umrc.pd->local_dma_lkey; in mlx5_ib_create_xlt_wr()
1082 wr->wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT; in mlx5_ib_create_xlt_wr()
1084 wr->wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE; in mlx5_ib_create_xlt_wr()
1085 wr->wr.sg_list = sg; in mlx5_ib_create_xlt_wr()
1086 wr->wr.num_sge = 1; in mlx5_ib_create_xlt_wr()
1087 wr->wr.opcode = MLX5_IB_WR_UMR; in mlx5_ib_create_xlt_wr()
1088 wr->pd = mr->ibmr.pd; in mlx5_ib_create_xlt_wr()
1089 wr->mkey = mr->mmkey.key; in mlx5_ib_create_xlt_wr()
1090 wr->length = mr->mmkey.size; in mlx5_ib_create_xlt_wr()
1091 wr->virt_addr = mr->mmkey.iova; in mlx5_ib_create_xlt_wr()
1092 wr->access_flags = mr->access_flags; in mlx5_ib_create_xlt_wr()
1093 wr->page_shift = mr->page_shift; in mlx5_ib_create_xlt_wr()
1094 wr->xlt_size = sg->length; in mlx5_ib_create_xlt_wr()
1101 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_ib_unmap_free_xlt()
1103 dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE); in mlx5_ib_unmap_free_xlt()
1104 mlx5_ib_free_xlt(xlt, sg->length); in mlx5_ib_unmap_free_xlt()
1126 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_ib_update_xlt()
1135 const int page_mask = page_align - 1; in mlx5_ib_update_xlt()
1144 return -EPERM; in mlx5_ib_update_xlt()
1146 if (WARN_ON(!mr->umem->is_odp)) in mlx5_ib_update_xlt()
1147 return -EINVAL; in mlx5_ib_update_xlt()
1160 return -ENOMEM; in mlx5_ib_update_xlt()
1165 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in mlx5_ib_update_xlt()
1166 size_t max_pages = ib_umem_odp_num_pages(odp) - idx; in mlx5_ib_update_xlt()
1176 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); in mlx5_ib_update_xlt()
1207 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_ib_update_mr_pas()
1217 if (WARN_ON(mr->umem->is_odp)) in mlx5_ib_update_mr_pas()
1218 return -EINVAL; in mlx5_ib_update_mr_pas()
1221 ib_umem_num_dma_blocks(mr->umem, in mlx5_ib_update_mr_pas()
1222 1 << mr->page_shift), in mlx5_ib_update_mr_pas()
1225 return -ENOMEM; in mlx5_ib_update_mr_pas()
1229 rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter, in mlx5_ib_update_mr_pas()
1230 mr->umem->sgt_append.sgt.nents, in mlx5_ib_update_mr_pas()
1231 BIT(mr->page_shift)) { in mlx5_ib_update_mr_pas()
1244 cur_mtt->ptag = in mlx5_ib_update_mr_pas()
1248 if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP)) in mlx5_ib_update_mr_pas()
1249 cur_mtt->ptag = 0; in mlx5_ib_update_mr_pas()
1254 final_size = (void *)cur_mtt - (void *)mtt; in mlx5_ib_update_mr_pas()
1256 memset(cur_mtt, 0, sg.length - final_size); in mlx5_ib_update_mr_pas()
1277 struct mlx5_ib_dev *dev = to_mdev(pd->device); in reg_create()
1284 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); in reg_create()
1287 return ERR_PTR(-EINVAL); in reg_create()
1290 return ERR_PTR(-ENOMEM); in reg_create()
1292 mr->ibmr.pd = pd; in reg_create()
1293 mr->access_flags = access_flags; in reg_create()
1294 mr->page_shift = order_base_2(page_size); in reg_create()
1302 err = -ENOMEM; in reg_create()
1308 err = -EINVAL; in reg_create()
1311 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas, in reg_create()
1321 populate ? pd : dev->umrc.pd); in reg_create()
1326 MLX5_SET64(mkc, mkc, len, umem->length); in reg_create()
1329 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1330 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift); in reg_create()
1333 get_octo_len(iova, umem->length, mr->page_shift)); in reg_create()
1336 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in reg_create()
1341 mr->mmkey.type = MLX5_MKEY_MR; in reg_create()
1342 mr->umem = umem; in reg_create()
1343 set_mr_fields(dev, mr, umem->length, access_flags); in reg_create()
1346 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); in reg_create()
1360 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_get_dm_mr()
1369 return ERR_PTR(-ENOMEM); in mlx5_ib_get_dm_mr()
1373 err = -ENOMEM; in mlx5_ib_get_dm_mr()
1384 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in mlx5_ib_get_dm_mr()
1392 return &mr->ibmr; in mlx5_ib_get_dm_mr()
1413 return -EOPNOTSUPP; in mlx5_ib_advise_mr()
1424 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev; in mlx5_ib_reg_dm_mr()
1425 u64 start_addr = mdm->dev_addr + attr->offset; in mlx5_ib_reg_dm_mr()
1428 switch (mdm->type) { in mlx5_ib_reg_dm_mr()
1430 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1431 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1434 start_addr -= pci_resource_start(dev->pdev, 0); in mlx5_ib_reg_dm_mr()
1438 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS) in mlx5_ib_reg_dm_mr()
1439 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1444 return ERR_PTR(-EINVAL); in mlx5_ib_reg_dm_mr()
1447 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length, in mlx5_ib_reg_dm_mr()
1448 attr->access_flags, mode); in mlx5_ib_reg_dm_mr()
1454 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_real_mr()
1459 xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length); in create_real_mr()
1466 mutex_lock(&dev->slow_path_mutex); in create_real_mr()
1468 mutex_unlock(&dev->slow_path_mutex); in create_real_mr()
1475 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in create_real_mr()
1477 atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); in create_real_mr()
1487 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_real_mr()
1491 return &mr->ibmr; in create_real_mr()
1498 struct mlx5_ib_dev *dev = to_mdev(pd->device); in create_user_odp_mr()
1504 return ERR_PTR(-EOPNOTSUPP); in create_user_odp_mr()
1506 err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq); in create_user_odp_mr()
1511 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1512 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) in create_user_odp_mr()
1513 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1518 return &mr->ibmr; in create_user_odp_mr()
1523 return ERR_PTR(-EINVAL); in create_user_odp_mr()
1525 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, in create_user_odp_mr()
1530 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags); in create_user_odp_mr()
1532 ib_umem_release(&odp->umem); in create_user_odp_mr()
1535 xa_init(&mr->implicit_children); in create_user_odp_mr()
1537 odp->private = mr; in create_user_odp_mr()
1538 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in create_user_odp_mr()
1545 return &mr->ibmr; in create_user_odp_mr()
1548 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in create_user_odp_mr()
1556 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr()
1560 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr()
1568 umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); in mlx5_ib_reg_user_mr()
1576 struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; in mlx5_ib_dmabuf_invalidate_cb()
1577 struct mlx5_ib_mr *mr = umem_dmabuf->private; in mlx5_ib_dmabuf_invalidate_cb()
1579 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in mlx5_ib_dmabuf_invalidate_cb()
1581 if (!umem_dmabuf->sgt) in mlx5_ib_dmabuf_invalidate_cb()
1598 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_ib_reg_user_mr_dmabuf()
1605 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_reg_user_mr_dmabuf()
1613 return ERR_PTR(-EINVAL); in mlx5_ib_reg_user_mr_dmabuf()
1615 umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, in mlx5_ib_reg_user_mr_dmabuf()
1624 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr, in mlx5_ib_reg_user_mr_dmabuf()
1627 ib_umem_release(&umem_dmabuf->umem); in mlx5_ib_reg_user_mr_dmabuf()
1631 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); in mlx5_ib_reg_user_mr_dmabuf()
1633 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages); in mlx5_ib_reg_user_mr_dmabuf()
1634 umem_dmabuf->private = mr; in mlx5_ib_reg_user_mr_dmabuf()
1635 err = mlx5r_store_odp_mkey(dev, &mr->mmkey); in mlx5_ib_reg_user_mr_dmabuf()
1642 return &mr->ibmr; in mlx5_ib_reg_user_mr_dmabuf()
1645 mlx5_ib_dereg_mr(&mr->ibmr, NULL); in mlx5_ib_reg_user_mr_dmabuf()
1650 * revoke_mr - Fence all DMA on the MR
1661 if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) in revoke_mr()
1667 umrwr.pd = mr_to_mdev(mr)->umrc.pd; in revoke_mr()
1668 umrwr.mkey = mr->mmkey.key; in revoke_mr()
1694 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pd_access()
1701 .mkey = mr->mmkey.key, in umr_rereg_pd_access()
1711 mr->access_flags = access_flags; in umr_rereg_pd_access()
1712 mr->mmkey.pd = to_mpd(pd)->pdn; in umr_rereg_pd_access()
1721 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in can_use_umr_rereg_pas()
1724 if (!mr->cache_ent) in can_use_umr_rereg_pas()
1726 if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length)) in can_use_umr_rereg_pas()
1733 return (1ULL << mr->cache_ent->order) >= in can_use_umr_rereg_pas()
1741 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); in umr_rereg_pas()
1743 struct ib_umem *old_umem = mr->umem; in umr_rereg_pas()
1756 mr->ibmr.pd = pd; in umr_rereg_pas()
1757 mr->mmkey.pd = to_mpd(pd)->pdn; in umr_rereg_pas()
1761 mr->access_flags = access_flags; in umr_rereg_pas()
1765 mr->ibmr.length = new_umem->length; in umr_rereg_pas()
1766 mr->mmkey.iova = iova; in umr_rereg_pas()
1767 mr->mmkey.size = new_umem->length; in umr_rereg_pas()
1768 mr->page_shift = order_base_2(page_size); in umr_rereg_pas()
1769 mr->umem = new_umem; in umr_rereg_pas()
1776 mr->umem = old_umem; in umr_rereg_pas()
1780 atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages); in umr_rereg_pas()
1782 atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages); in umr_rereg_pas()
1791 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); in mlx5_ib_rereg_user_mr()
1796 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_rereg_user_mr()
1804 return ERR_PTR(-EOPNOTSUPP); in mlx5_ib_rereg_user_mr()
1807 new_access_flags = mr->access_flags; in mlx5_ib_rereg_user_mr()
1809 new_pd = ib_mr->pd; in mlx5_ib_rereg_user_mr()
1815 if (can_use_umr_rereg_access(dev, mr->access_flags, in mlx5_ib_rereg_user_mr()
1822 /* DM or ODP MR's don't have a normal umem so we can't re-use it */ in mlx5_ib_rereg_user_mr()
1823 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1833 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1834 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1835 atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); in mlx5_ib_rereg_user_mr()
1837 return create_real_mr(new_pd, umem, mr->mmkey.iova, in mlx5_ib_rereg_user_mr()
1842 * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does in mlx5_ib_rereg_user_mr()
1845 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr)) in mlx5_ib_rereg_user_mr()
1849 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) { in mlx5_ib_rereg_user_mr()
1853 new_umem = ib_umem_get(&dev->ib_dev, start, length, in mlx5_ib_rereg_user_mr()
1888 struct device *ddev = &dev->mdev->pdev->dev; in mlx5_alloc_priv_descs()
1893 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); in mlx5_alloc_priv_descs()
1895 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); in mlx5_alloc_priv_descs()
1896 if (!mr->descs_alloc) in mlx5_alloc_priv_descs()
1897 return -ENOMEM; in mlx5_alloc_priv_descs()
1899 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); in mlx5_alloc_priv_descs()
1901 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE); in mlx5_alloc_priv_descs()
1902 if (dma_mapping_error(ddev, mr->desc_map)) { in mlx5_alloc_priv_descs()
1903 ret = -ENOMEM; in mlx5_alloc_priv_descs()
1909 kfree(mr->descs_alloc); in mlx5_alloc_priv_descs()
1917 if (!mr->umem && mr->descs) { in mlx5_free_priv_descs()
1918 struct ib_device *device = mr->ibmr.device; in mlx5_free_priv_descs()
1919 int size = mr->max_descs * mr->desc_size; in mlx5_free_priv_descs()
1922 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, in mlx5_free_priv_descs()
1924 kfree(mr->descs_alloc); in mlx5_free_priv_descs()
1925 mr->descs = NULL; in mlx5_free_priv_descs()
1932 struct mlx5_ib_dev *dev = to_mdev(ibmr->device); in mlx5_ib_dereg_mr()
1941 refcount_read(&mr->mmkey.usecount) != 0 && in mlx5_ib_dereg_mr()
1942 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))) in mlx5_ib_dereg_mr()
1943 mlx5r_deref_wait_odp_mkey(&mr->mmkey); in mlx5_ib_dereg_mr()
1945 if (ibmr->type == IB_MR_TYPE_INTEGRITY) { in mlx5_ib_dereg_mr()
1946 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_ib_dereg_mr()
1947 mr->sig, NULL, GFP_KERNEL); in mlx5_ib_dereg_mr()
1949 if (mr->mtt_mr) { in mlx5_ib_dereg_mr()
1950 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1953 mr->mtt_mr = NULL; in mlx5_ib_dereg_mr()
1955 if (mr->klm_mr) { in mlx5_ib_dereg_mr()
1956 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_ib_dereg_mr()
1959 mr->klm_mr = NULL; in mlx5_ib_dereg_mr()
1962 if (mlx5_core_destroy_psv(dev->mdev, in mlx5_ib_dereg_mr()
1963 mr->sig->psv_memory.psv_idx)) in mlx5_ib_dereg_mr()
1965 mr->sig->psv_memory.psv_idx); in mlx5_ib_dereg_mr()
1966 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_ib_dereg_mr()
1968 mr->sig->psv_wire.psv_idx); in mlx5_ib_dereg_mr()
1969 kfree(mr->sig); in mlx5_ib_dereg_mr()
1970 mr->sig = NULL; in mlx5_ib_dereg_mr()
1974 if (mr->cache_ent) { in mlx5_ib_dereg_mr()
1976 spin_lock_irq(&mr->cache_ent->lock); in mlx5_ib_dereg_mr()
1977 mr->cache_ent->total_mrs--; in mlx5_ib_dereg_mr()
1978 spin_unlock_irq(&mr->cache_ent->lock); in mlx5_ib_dereg_mr()
1979 mr->cache_ent = NULL; in mlx5_ib_dereg_mr()
1982 if (!mr->cache_ent) { in mlx5_ib_dereg_mr()
1983 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); in mlx5_ib_dereg_mr()
1988 if (mr->umem) { in mlx5_ib_dereg_mr()
1992 atomic_sub(ib_umem_num_pages(mr->umem), in mlx5_ib_dereg_mr()
1993 &dev->mdev->priv.reg_pages); in mlx5_ib_dereg_mr()
1994 ib_umem_release(mr->umem); in mlx5_ib_dereg_mr()
1999 if (mr->cache_ent) { in mlx5_ib_dereg_mr()
2029 struct mlx5_ib_dev *dev = to_mdev(pd->device); in _mlx5_alloc_mkey_descs()
2032 mr->access_mode = access_mode; in _mlx5_alloc_mkey_descs()
2033 mr->desc_size = desc_size; in _mlx5_alloc_mkey_descs()
2034 mr->max_descs = ndescs; in _mlx5_alloc_mkey_descs()
2036 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); in _mlx5_alloc_mkey_descs()
2042 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen); in _mlx5_alloc_mkey_descs()
2046 mr->mmkey.type = MLX5_MKEY_MR; in _mlx5_alloc_mkey_descs()
2047 mr->ibmr.lkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2048 mr->ibmr.rkey = mr->mmkey.key; in _mlx5_alloc_mkey_descs()
2070 return ERR_PTR(-ENOMEM); in mlx5_ib_alloc_pi_mr()
2072 mr->ibmr.pd = pd; in mlx5_ib_alloc_pi_mr()
2073 mr->ibmr.device = pd->device; in mlx5_ib_alloc_pi_mr()
2077 err = -ENOMEM; in mlx5_ib_alloc_pi_mr()
2089 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
2120 struct mlx5_ib_dev *dev = to_mdev(pd->device); in mlx5_alloc_integrity_descs()
2125 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); in mlx5_alloc_integrity_descs()
2126 if (!mr->sig) in mlx5_alloc_integrity_descs()
2127 return -ENOMEM; in mlx5_alloc_integrity_descs()
2130 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); in mlx5_alloc_integrity_descs()
2134 mr->sig->psv_memory.psv_idx = psv_index[0]; in mlx5_alloc_integrity_descs()
2135 mr->sig->psv_wire.psv_idx = psv_index[1]; in mlx5_alloc_integrity_descs()
2137 mr->sig->sig_status_checked = true; in mlx5_alloc_integrity_descs()
2138 mr->sig->sig_err_exists = false; in mlx5_alloc_integrity_descs()
2140 ++mr->sig->sigerr_count; in mlx5_alloc_integrity_descs()
2141 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2144 if (IS_ERR(mr->klm_mr)) { in mlx5_alloc_integrity_descs()
2145 err = PTR_ERR(mr->klm_mr); in mlx5_alloc_integrity_descs()
2148 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, in mlx5_alloc_integrity_descs()
2151 if (IS_ERR(mr->mtt_mr)) { in mlx5_alloc_integrity_descs()
2152 err = PTR_ERR(mr->mtt_mr); in mlx5_alloc_integrity_descs()
2166 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key), in mlx5_alloc_integrity_descs()
2167 mr->sig, GFP_KERNEL)); in mlx5_alloc_integrity_descs()
2176 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2177 mr->mtt_mr = NULL; in mlx5_alloc_integrity_descs()
2179 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL); in mlx5_alloc_integrity_descs()
2180 mr->klm_mr = NULL; in mlx5_alloc_integrity_descs()
2182 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) in mlx5_alloc_integrity_descs()
2184 mr->sig->psv_memory.psv_idx); in mlx5_alloc_integrity_descs()
2185 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) in mlx5_alloc_integrity_descs()
2187 mr->sig->psv_wire.psv_idx); in mlx5_alloc_integrity_descs()
2189 kfree(mr->sig); in mlx5_alloc_integrity_descs()
2198 struct mlx5_ib_dev *dev = to_mdev(pd->device); in __mlx5_ib_alloc_mr()
2207 return ERR_PTR(-ENOMEM); in __mlx5_ib_alloc_mr()
2211 err = -ENOMEM; in __mlx5_ib_alloc_mr()
2215 mr->ibmr.device = pd->device; in __mlx5_ib_alloc_mr()
2216 mr->umem = NULL; in __mlx5_ib_alloc_mr()
2231 err = -EINVAL; in __mlx5_ib_alloc_mr()
2239 return &mr->ibmr; in __mlx5_ib_alloc_mr()
2263 struct mlx5_ib_dev *dev = to_mdev(ibmw->device); in mlx5_ib_alloc_mw()
2276 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); in mlx5_ib_alloc_mw()
2281 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2283 if (udata->inlen > sizeof(req) && in mlx5_ib_alloc_mw()
2285 udata->inlen - sizeof(req))) in mlx5_ib_alloc_mw()
2286 return -EOPNOTSUPP; in mlx5_ib_alloc_mw()
2292 err = -ENOMEM; in mlx5_ib_alloc_mw()
2300 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn); in mlx5_ib_alloc_mw()
2304 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2))); in mlx5_ib_alloc_mw()
2307 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen); in mlx5_ib_alloc_mw()
2311 mw->mmkey.type = MLX5_MKEY_MW; in mlx5_ib_alloc_mw()
2312 ibmw->rkey = mw->mmkey.key; in mlx5_ib_alloc_mw()
2313 mw->ndescs = ndescs; in mlx5_ib_alloc_mw()
2316 min(offsetofend(typeof(resp), response_length), udata->outlen); in mlx5_ib_alloc_mw()
2324 err = mlx5r_store_odp_mkey(dev, &mw->mmkey); in mlx5_ib_alloc_mw()
2333 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); in mlx5_ib_alloc_mw()
2341 struct mlx5_ib_dev *dev = to_mdev(mw->device); in mlx5_ib_dealloc_mw()
2345 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key))) in mlx5_ib_dealloc_mw()
2350 mlx5r_deref_wait_odp_mkey(&mmw->mmkey); in mlx5_ib_dealloc_mw()
2352 return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); in mlx5_ib_dealloc_mw()
2363 ret = -EINVAL; in mlx5_ib_check_mr_status()
2367 mr_status->fail_status = 0; in mlx5_ib_check_mr_status()
2369 if (!mmr->sig) { in mlx5_ib_check_mr_status()
2370 ret = -EINVAL; in mlx5_ib_check_mr_status()
2371 pr_err("signature status check requested on a non-signature enabled MR\n"); in mlx5_ib_check_mr_status()
2375 mmr->sig->sig_status_checked = true; in mlx5_ib_check_mr_status()
2376 if (!mmr->sig->sig_err_exists) in mlx5_ib_check_mr_status()
2379 if (ibmr->lkey == mmr->sig->err_item.key) in mlx5_ib_check_mr_status()
2380 memcpy(&mr_status->sig_err, &mmr->sig->err_item, in mlx5_ib_check_mr_status()
2381 sizeof(mr_status->sig_err)); in mlx5_ib_check_mr_status()
2383 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD; in mlx5_ib_check_mr_status()
2384 mr_status->sig_err.sig_err_offset = 0; in mlx5_ib_check_mr_status()
2385 mr_status->sig_err.key = mmr->sig->err_item.key; in mlx5_ib_check_mr_status()
2388 mmr->sig->sig_err_exists = false; in mlx5_ib_check_mr_status()
2389 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS; in mlx5_ib_check_mr_status()
2406 mr->meta_length = 0; in mlx5_ib_map_pa_mr_sg_pi()
2409 mr->ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2412 mr->data_length = sg_dma_len(data_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2413 mr->data_iova = sg_dma_address(data_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2416 mr->meta_ndescs = 1; in mlx5_ib_map_pa_mr_sg_pi()
2421 mr->meta_length = sg_dma_len(meta_sg) - sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2422 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; in mlx5_ib_map_pa_mr_sg_pi()
2424 ibmr->length = mr->data_length + mr->meta_length; in mlx5_ib_map_pa_mr_sg_pi()
2440 struct mlx5_klm *klms = mr->descs; in mlx5_ib_sg_to_klms()
2442 u32 lkey = mr->ibmr.pd->local_dma_lkey; in mlx5_ib_sg_to_klms()
2445 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; in mlx5_ib_sg_to_klms()
2446 mr->ibmr.length = 0; in mlx5_ib_sg_to_klms()
2449 if (unlikely(i >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2452 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); in mlx5_ib_sg_to_klms()
2454 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2462 mr->ndescs = i; in mlx5_ib_sg_to_klms()
2463 mr->data_length = mr->ibmr.length; in mlx5_ib_sg_to_klms()
2469 if (unlikely(i + j >= mr->max_descs)) in mlx5_ib_sg_to_klms()
2473 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - in mlx5_ib_sg_to_klms()
2476 mr->ibmr.length += sg_dma_len(sg) - sg_offset; in mlx5_ib_sg_to_klms()
2483 mr->meta_ndescs = j; in mlx5_ib_sg_to_klms()
2484 mr->meta_length = mr->ibmr.length - mr->data_length; in mlx5_ib_sg_to_klms()
2495 if (unlikely(mr->ndescs == mr->max_descs)) in mlx5_set_page()
2496 return -ENOMEM; in mlx5_set_page()
2498 descs = mr->descs; in mlx5_set_page()
2499 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); in mlx5_set_page()
2509 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) in mlx5_set_page_pi()
2510 return -ENOMEM; in mlx5_set_page_pi()
2512 descs = mr->descs; in mlx5_set_page_pi()
2513 descs[mr->ndescs + mr->meta_ndescs++] = in mlx5_set_page_pi()
2526 struct mlx5_ib_mr *pi_mr = mr->mtt_mr; in mlx5_ib_map_mtt_mr_sg_pi()
2529 pi_mr->ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2530 pi_mr->meta_ndescs = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2531 pi_mr->meta_length = 0; in mlx5_ib_map_mtt_mr_sg_pi()
2533 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2534 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2537 pi_mr->ibmr.page_size = ibmr->page_size; in mlx5_ib_map_mtt_mr_sg_pi()
2538 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, in mlx5_ib_map_mtt_mr_sg_pi()
2543 pi_mr->data_iova = pi_mr->ibmr.iova; in mlx5_ib_map_mtt_mr_sg_pi()
2544 pi_mr->data_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2545 pi_mr->ibmr.length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2546 ibmr->length = pi_mr->data_length; in mlx5_ib_map_mtt_mr_sg_pi()
2549 u64 page_mask = ~((u64)ibmr->page_size - 1); in mlx5_ib_map_mtt_mr_sg_pi()
2550 u64 iova = pi_mr->data_iova; in mlx5_ib_map_mtt_mr_sg_pi()
2552 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, in mlx5_ib_map_mtt_mr_sg_pi()
2555 pi_mr->meta_length = pi_mr->ibmr.length; in mlx5_ib_map_mtt_mr_sg_pi()
2562 pi_mr->pi_iova = (iova & page_mask) + in mlx5_ib_map_mtt_mr_sg_pi()
2563 pi_mr->ndescs * ibmr->page_size + in mlx5_ib_map_mtt_mr_sg_pi()
2564 (pi_mr->ibmr.iova & ~page_mask); in mlx5_ib_map_mtt_mr_sg_pi()
2568 * the metadata (the sig MR will verify that the HW will access in mlx5_ib_map_mtt_mr_sg_pi()
2572 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; in mlx5_ib_map_mtt_mr_sg_pi()
2573 pi_mr->ibmr.iova = iova; in mlx5_ib_map_mtt_mr_sg_pi()
2574 ibmr->length += pi_mr->meta_length; in mlx5_ib_map_mtt_mr_sg_pi()
2577 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_mtt_mr_sg_pi()
2578 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_mtt_mr_sg_pi()
2591 struct mlx5_ib_mr *pi_mr = mr->klm_mr; in mlx5_ib_map_klm_mr_sg_pi()
2594 pi_mr->ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2595 pi_mr->meta_ndescs = 0; in mlx5_ib_map_klm_mr_sg_pi()
2596 pi_mr->meta_length = 0; in mlx5_ib_map_klm_mr_sg_pi()
2598 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2599 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2605 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, in mlx5_ib_map_klm_mr_sg_pi()
2606 pi_mr->desc_size * pi_mr->max_descs, in mlx5_ib_map_klm_mr_sg_pi()
2609 /* This is zero-based memory region */ in mlx5_ib_map_klm_mr_sg_pi()
2610 pi_mr->data_iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2611 pi_mr->ibmr.iova = 0; in mlx5_ib_map_klm_mr_sg_pi()
2612 pi_mr->pi_iova = pi_mr->data_length; in mlx5_ib_map_klm_mr_sg_pi()
2613 ibmr->length = pi_mr->ibmr.length; in mlx5_ib_map_klm_mr_sg_pi()
2627 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); in mlx5_ib_map_mr_sg_pi()
2629 mr->ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2630 mr->data_length = 0; in mlx5_ib_map_mr_sg_pi()
2631 mr->data_iova = 0; in mlx5_ib_map_mr_sg_pi()
2632 mr->meta_ndescs = 0; in mlx5_ib_map_mr_sg_pi()
2633 mr->pi_iova = 0; in mlx5_ib_map_mr_sg_pi()
2653 pi_mr = mr->mtt_mr; in mlx5_ib_map_mr_sg_pi()
2660 pi_mr = mr->klm_mr; in mlx5_ib_map_mr_sg_pi()
2665 return -ENOMEM; in mlx5_ib_map_mr_sg_pi()
2668 /* This is zero-based memory region */ in mlx5_ib_map_mr_sg_pi()
2669 ibmr->iova = 0; in mlx5_ib_map_mr_sg_pi()
2670 mr->pi_mr = pi_mr; in mlx5_ib_map_mr_sg_pi()
2672 ibmr->sig_attrs->meta_length = pi_mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2674 ibmr->sig_attrs->meta_length = mr->meta_length; in mlx5_ib_map_mr_sg_pi()
2685 mr->ndescs = 0; in mlx5_ib_map_mr_sg()
2687 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2688 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()
2691 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in mlx5_ib_map_mr_sg()
2698 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, in mlx5_ib_map_mr_sg()
2699 mr->desc_size * mr->max_descs, in mlx5_ib_map_mr_sg()