Lines Matching full:ent

112 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
138 struct mlx5_cache_ent *ent = mr->cache_ent; in create_mkey_callback() local
144 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
145 ent->pending--; in create_mkey_callback()
147 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
158 spin_lock_irqsave(&ent->lock, flags); in create_mkey_callback()
159 list_add_tail(&mr->list, &ent->head); in create_mkey_callback()
160 ent->available_mrs++; in create_mkey_callback()
161 ent->total_mrs++; in create_mkey_callback()
163 queue_adjust_cache_locked(ent); in create_mkey_callback()
164 ent->pending--; in create_mkey_callback()
165 spin_unlock_irqrestore(&ent->lock, flags); in create_mkey_callback()
168 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc) in alloc_cache_mr() argument
175 mr->order = ent->order; in alloc_cache_mr()
176 mr->cache_ent = ent; in alloc_cache_mr()
177 mr->dev = ent->dev; in alloc_cache_mr()
179 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd); in alloc_cache_mr()
182 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3); in alloc_cache_mr()
183 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7); in alloc_cache_mr()
185 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt); in alloc_cache_mr()
186 MLX5_SET(mkc, mkc, log_page_size, ent->page); in alloc_cache_mr()
191 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
206 mr = alloc_cache_mr(ent, mkc); in add_keys()
211 spin_lock_irq(&ent->lock); in add_keys()
212 if (ent->pending >= MAX_PENDING_REG_MR) { in add_keys()
214 spin_unlock_irq(&ent->lock); in add_keys()
218 ent->pending++; in add_keys()
219 spin_unlock_irq(&ent->lock); in add_keys()
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey, in add_keys()
221 &ent->dev->async_ctx, in, inlen, in add_keys()
225 spin_lock_irq(&ent->lock); in add_keys()
226 ent->pending--; in add_keys()
227 spin_unlock_irq(&ent->lock); in add_keys()
228 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
239 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) in create_cache_mr() argument
252 mr = alloc_cache_mr(ent, mkc); in create_cache_mr()
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); in create_cache_mr()
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mr()
264 spin_lock_irq(&ent->lock); in create_cache_mr()
265 ent->total_mrs++; in create_cache_mr()
266 spin_unlock_irq(&ent->lock); in create_cache_mr()
276 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
280 lockdep_assert_held(&ent->lock); in remove_cache_mr_locked()
281 if (list_empty(&ent->head)) in remove_cache_mr_locked()
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in remove_cache_mr_locked()
285 ent->available_mrs--; in remove_cache_mr_locked()
286 ent->total_mrs--; in remove_cache_mr_locked()
287 spin_unlock_irq(&ent->lock); in remove_cache_mr_locked()
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); in remove_cache_mr_locked()
290 spin_lock_irq(&ent->lock); in remove_cache_mr_locked()
293 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
298 lockdep_assert_held(&ent->lock); in resize_available_mrs()
302 target = ent->limit * 2; in resize_available_mrs()
303 if (target == ent->available_mrs + ent->pending) in resize_available_mrs()
305 if (target > ent->available_mrs + ent->pending) { in resize_available_mrs()
306 u32 todo = target - (ent->available_mrs + ent->pending); in resize_available_mrs()
308 spin_unlock_irq(&ent->lock); in resize_available_mrs()
309 err = add_keys(ent, todo); in resize_available_mrs()
312 spin_lock_irq(&ent->lock); in resize_available_mrs()
319 remove_cache_mr_locked(ent); in resize_available_mrs()
327 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
340 spin_lock_irq(&ent->lock); in size_write()
341 if (target < ent->total_mrs - ent->available_mrs) { in size_write()
345 target = target - (ent->total_mrs - ent->available_mrs); in size_write()
346 if (target < ent->limit || target > ent->limit*2) { in size_write()
350 err = resize_available_mrs(ent, target, false); in size_write()
353 spin_unlock_irq(&ent->lock); in size_write()
358 spin_unlock_irq(&ent->lock); in size_write()
365 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
369 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs); in size_read()
386 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
398 spin_lock_irq(&ent->lock); in limit_write()
399 ent->limit = var; in limit_write()
400 err = resize_available_mrs(ent, 0, true); in limit_write()
401 spin_unlock_irq(&ent->lock); in limit_write()
410 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
414 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
433 struct mlx5_cache_ent *ent = &cache->ent[i]; in someone_adding() local
436 spin_lock_irq(&ent->lock); in someone_adding()
437 ret = ent->available_mrs < ent->limit; in someone_adding()
438 spin_unlock_irq(&ent->lock); in someone_adding()
450 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
452 lockdep_assert_held(&ent->lock); in queue_adjust_cache_locked()
454 if (ent->disabled || READ_ONCE(ent->dev->fill_delay)) in queue_adjust_cache_locked()
456 if (ent->available_mrs < ent->limit) { in queue_adjust_cache_locked()
457 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
458 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
459 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
460 ent->available_mrs + ent->pending < 2 * ent->limit) { in queue_adjust_cache_locked()
465 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
466 } else if (ent->available_mrs == 2 * ent->limit) { in queue_adjust_cache_locked()
467 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
468 } else if (ent->available_mrs > 2 * ent->limit) { in queue_adjust_cache_locked()
470 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
471 if (ent->pending) in queue_adjust_cache_locked()
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
475 queue_work(ent->dev->cache.wq, &ent->work); in queue_adjust_cache_locked()
479 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
481 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
485 spin_lock_irq(&ent->lock); in __cache_work_func()
486 if (ent->disabled) in __cache_work_func()
489 if (ent->fill_to_high_water && in __cache_work_func()
490 ent->available_mrs + ent->pending < 2 * ent->limit && in __cache_work_func()
492 spin_unlock_irq(&ent->lock); in __cache_work_func()
493 err = add_keys(ent, 1); in __cache_work_func()
494 spin_lock_irq(&ent->lock); in __cache_work_func()
495 if (ent->disabled) in __cache_work_func()
507 ent->order, err); in __cache_work_func()
508 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
512 } else if (ent->available_mrs > 2 * ent->limit) { in __cache_work_func()
527 spin_unlock_irq(&ent->lock); in __cache_work_func()
531 spin_lock_irq(&ent->lock); in __cache_work_func()
532 if (ent->disabled) in __cache_work_func()
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
536 remove_cache_mr_locked(ent); in __cache_work_func()
537 queue_adjust_cache_locked(ent); in __cache_work_func()
540 spin_unlock_irq(&ent->lock); in __cache_work_func()
545 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
547 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
548 __cache_work_func(ent); in delayed_cache_work_func()
553 struct mlx5_cache_ent *ent; in cache_work_func() local
555 ent = container_of(work, struct mlx5_cache_ent, work); in cache_work_func()
556 __cache_work_func(ent); in cache_work_func()
564 struct mlx5_cache_ent *ent; in mlx5_mr_cache_alloc() local
568 entry >= ARRAY_SIZE(cache->ent))) in mlx5_mr_cache_alloc()
575 ent = &cache->ent[entry]; in mlx5_mr_cache_alloc()
576 spin_lock_irq(&ent->lock); in mlx5_mr_cache_alloc()
577 if (list_empty(&ent->head)) { in mlx5_mr_cache_alloc()
578 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
579 mr = create_cache_mr(ent); in mlx5_mr_cache_alloc()
583 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in mlx5_mr_cache_alloc()
585 ent->available_mrs--; in mlx5_mr_cache_alloc()
586 queue_adjust_cache_locked(ent); in mlx5_mr_cache_alloc()
587 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_alloc()
598 struct mlx5_cache_ent *ent = req_ent; in get_cache_mr() local
601 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { in get_cache_mr()
602 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, in get_cache_mr()
603 ent - dev->cache.ent); in get_cache_mr()
605 spin_lock_irq(&ent->lock); in get_cache_mr()
606 if (!list_empty(&ent->head)) { in get_cache_mr()
607 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, in get_cache_mr()
610 ent->available_mrs--; in get_cache_mr()
611 queue_adjust_cache_locked(ent); in get_cache_mr()
612 spin_unlock_irq(&ent->lock); in get_cache_mr()
615 queue_adjust_cache_locked(ent); in get_cache_mr()
616 spin_unlock_irq(&ent->lock); in get_cache_mr()
627 struct mlx5_cache_ent *ent = mr->cache_ent; in detach_mr_from_cache() local
630 spin_lock_irq(&ent->lock); in detach_mr_from_cache()
631 ent->total_mrs--; in detach_mr_from_cache()
632 spin_unlock_irq(&ent->lock); in detach_mr_from_cache()
637 struct mlx5_cache_ent *ent = mr->cache_ent; in mlx5_mr_cache_free() local
639 if (!ent) in mlx5_mr_cache_free()
648 spin_lock_irq(&ent->lock); in mlx5_mr_cache_free()
649 list_add_tail(&mr->list, &ent->head); in mlx5_mr_cache_free()
650 ent->available_mrs++; in mlx5_mr_cache_free()
651 queue_adjust_cache_locked(ent); in mlx5_mr_cache_free()
652 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_free()
658 struct mlx5_cache_ent *ent = &cache->ent[c]; in clean_keys() local
663 cancel_delayed_work(&ent->dwork); in clean_keys()
665 spin_lock_irq(&ent->lock); in clean_keys()
666 if (list_empty(&ent->head)) { in clean_keys()
667 spin_unlock_irq(&ent->lock); in clean_keys()
670 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); in clean_keys()
672 ent->available_mrs--; in clean_keys()
673 ent->total_mrs--; in clean_keys()
674 spin_unlock_irq(&ent->lock); in clean_keys()
696 struct mlx5_cache_ent *ent; in mlx5_mr_cache_debugfs_init() local
706 ent = &cache->ent[i]; in mlx5_mr_cache_debugfs_init()
707 sprintf(ent->name, "%d", ent->order); in mlx5_mr_cache_debugfs_init()
708 dir = debugfs_create_dir(ent->name, cache->root); in mlx5_mr_cache_debugfs_init()
709 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mr_cache_debugfs_init()
710 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mr_cache_debugfs_init()
711 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs); in mlx5_mr_cache_debugfs_init()
712 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mr_cache_debugfs_init()
726 struct mlx5_cache_ent *ent; in mlx5_mr_cache_init() local
739 ent = &cache->ent[i]; in mlx5_mr_cache_init()
740 INIT_LIST_HEAD(&ent->head); in mlx5_mr_cache_init()
741 spin_lock_init(&ent->lock); in mlx5_mr_cache_init()
742 ent->order = i + 2; in mlx5_mr_cache_init()
743 ent->dev = dev; in mlx5_mr_cache_init()
744 ent->limit = 0; in mlx5_mr_cache_init()
746 INIT_WORK(&ent->work, cache_work_func); in mlx5_mr_cache_init()
747 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5_mr_cache_init()
750 mlx5_odp_init_mr_cache_entry(ent); in mlx5_mr_cache_init()
754 if (ent->order > mr_cache_max_order(dev)) in mlx5_mr_cache_init()
757 ent->page = PAGE_SHIFT; in mlx5_mr_cache_init()
758 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) / in mlx5_mr_cache_init()
760 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; in mlx5_mr_cache_init()
764 ent->limit = dev->mdev->profile->mr_cache[i].limit; in mlx5_mr_cache_init()
766 ent->limit = 0; in mlx5_mr_cache_init()
767 spin_lock_irq(&ent->lock); in mlx5_mr_cache_init()
768 queue_adjust_cache_locked(ent); in mlx5_mr_cache_init()
769 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_init()
785 struct mlx5_cache_ent *ent = &dev->cache.ent[i]; in mlx5_mr_cache_cleanup() local
787 spin_lock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
788 ent->disabled = true; in mlx5_mr_cache_cleanup()
789 spin_unlock_irq(&ent->lock); in mlx5_mr_cache_cleanup()
790 cancel_work_sync(&ent->work); in mlx5_mr_cache_cleanup()
791 cancel_delayed_work_sync(&ent->dwork); in mlx5_mr_cache_cleanup()
969 if (order < cache->ent[0].order) in mr_cache_ent_from_order()
970 return &cache->ent[0]; in mr_cache_ent_from_order()
971 order = order - cache->ent[0].order; in mr_cache_ent_from_order()
974 return &cache->ent[order]; in mr_cache_ent_from_order()
983 struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order); in alloc_mr_from_cache() local
986 if (!ent) in alloc_mr_from_cache()
993 mr = get_cache_mr(ent); in alloc_mr_from_cache()
995 mr = create_cache_mr(ent); in alloc_mr_from_cache()