Lines Matching refs:pool

194 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)  in rds_ib_reuse_mr()  argument
203 ret = llist_del_first(&pool->clean_list); in rds_ib_reuse_mr()
206 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_reuse_mr()
283 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_teardown_mr() local
285 atomic_sub(pinned, &pool->free_pinned); in rds_ib_teardown_mr()
289 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) in rds_ib_flush_goal() argument
293 item_count = atomic_read(&pool->item_count); in rds_ib_flush_goal()
327 static void list_to_llist_nodes(struct rds_ib_mr_pool *pool, in list_to_llist_nodes() argument
351 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, in rds_ib_flush_mr_pool() argument
361 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_flush_mr_pool()
368 while (!mutex_trylock(&pool->flush_lock)) { in rds_ib_flush_mr_pool()
369 ibmr = rds_ib_reuse_mr(pool); in rds_ib_flush_mr_pool()
372 finish_wait(&pool->flush_wait, &wait); in rds_ib_flush_mr_pool()
376 prepare_to_wait(&pool->flush_wait, &wait, in rds_ib_flush_mr_pool()
378 if (llist_empty(&pool->clean_list)) in rds_ib_flush_mr_pool()
381 ibmr = rds_ib_reuse_mr(pool); in rds_ib_flush_mr_pool()
384 finish_wait(&pool->flush_wait, &wait); in rds_ib_flush_mr_pool()
388 finish_wait(&pool->flush_wait, &wait); in rds_ib_flush_mr_pool()
390 mutex_lock(&pool->flush_lock); in rds_ib_flush_mr_pool()
393 ibmr = rds_ib_reuse_mr(pool); in rds_ib_flush_mr_pool()
403 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list); in rds_ib_flush_mr_pool()
404 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list); in rds_ib_flush_mr_pool()
406 llist_append_to_list(&pool->clean_list, &unmap_list); in rds_ib_flush_mr_pool()
408 free_goal = rds_ib_flush_goal(pool, free_all); in rds_ib_flush_mr_pool()
413 if (pool->use_fastreg) in rds_ib_flush_mr_pool()
430 list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail); in rds_ib_flush_mr_pool()
436 llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list); in rds_ib_flush_mr_pool()
440 atomic_sub(unpinned, &pool->free_pinned); in rds_ib_flush_mr_pool()
441 atomic_sub(dirty_to_clean, &pool->dirty_count); in rds_ib_flush_mr_pool()
442 atomic_sub(nfreed, &pool->item_count); in rds_ib_flush_mr_pool()
445 mutex_unlock(&pool->flush_lock); in rds_ib_flush_mr_pool()
446 if (waitqueue_active(&pool->flush_wait)) in rds_ib_flush_mr_pool()
447 wake_up(&pool->flush_wait); in rds_ib_flush_mr_pool()
452 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) in rds_ib_try_reuse_ibmr() argument
457 if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10) in rds_ib_try_reuse_ibmr()
458 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); in rds_ib_try_reuse_ibmr()
461 ibmr = rds_ib_reuse_mr(pool); in rds_ib_try_reuse_ibmr()
465 if (atomic_inc_return(&pool->item_count) <= pool->max_items) in rds_ib_try_reuse_ibmr()
468 atomic_dec(&pool->item_count); in rds_ib_try_reuse_ibmr()
471 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_try_reuse_ibmr()
479 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_try_reuse_ibmr()
484 rds_ib_flush_mr_pool(pool, 0, &ibmr); in rds_ib_try_reuse_ibmr()
494 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); in rds_ib_mr_pool_flush_worker() local
496 rds_ib_flush_mr_pool(pool, 0, NULL); in rds_ib_mr_pool_flush_worker()
502 struct rds_ib_mr_pool *pool = ibmr->pool; in rds_ib_free_mr() local
513 atomic_add(ibmr->sg_len, &pool->free_pinned); in rds_ib_free_mr()
514 atomic_inc(&pool->dirty_count); in rds_ib_free_mr()
517 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || in rds_ib_free_mr()
518 atomic_read(&pool->dirty_count) >= pool->max_items / 5) in rds_ib_free_mr()
519 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); in rds_ib_free_mr()
523 rds_ib_flush_mr_pool(pool, 0, NULL); in rds_ib_free_mr()
529 &pool->flush_worker, 10); in rds_ib_free_mr()
592 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) in rds_ib_destroy_mr_pool() argument
594 cancel_delayed_work_sync(&pool->flush_worker); in rds_ib_destroy_mr_pool()
595 rds_ib_flush_mr_pool(pool, 1, NULL); in rds_ib_destroy_mr_pool()
596 WARN_ON(atomic_read(&pool->item_count)); in rds_ib_destroy_mr_pool()
597 WARN_ON(atomic_read(&pool->free_pinned)); in rds_ib_destroy_mr_pool()
598 kfree(pool); in rds_ib_destroy_mr_pool()
604 struct rds_ib_mr_pool *pool; in rds_ib_create_mr_pool() local
606 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in rds_ib_create_mr_pool()
607 if (!pool) in rds_ib_create_mr_pool()
610 pool->pool_type = pool_type; in rds_ib_create_mr_pool()
611 init_llist_head(&pool->free_list); in rds_ib_create_mr_pool()
612 init_llist_head(&pool->drop_list); in rds_ib_create_mr_pool()
613 init_llist_head(&pool->clean_list); in rds_ib_create_mr_pool()
614 mutex_init(&pool->flush_lock); in rds_ib_create_mr_pool()
615 init_waitqueue_head(&pool->flush_wait); in rds_ib_create_mr_pool()
616 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); in rds_ib_create_mr_pool()
620 pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1; in rds_ib_create_mr_pool()
621 pool->max_items = rds_ibdev->max_1m_mrs; in rds_ib_create_mr_pool()
624 pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1; in rds_ib_create_mr_pool()
625 pool->max_items = rds_ibdev->max_8k_mrs; in rds_ib_create_mr_pool()
628 pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4; in rds_ib_create_mr_pool()
629 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; in rds_ib_create_mr_pool()
630 pool->fmr_attr.page_shift = PAGE_SHIFT; in rds_ib_create_mr_pool()
631 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4; in rds_ib_create_mr_pool()
632 pool->use_fastreg = rds_ibdev->use_fastreg; in rds_ib_create_mr_pool()
634 return pool; in rds_ib_create_mr_pool()