Lines Matching refs:pool
95 void (*flush_function)(struct ib_fmr_pool *pool,
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument
123 if (!pool->cache_bucket) in ib_fmr_cache_lookup()
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument
145 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
147 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release()
153 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release()
154 pool->dirty_len = 0; in ib_fmr_batch_release()
156 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
166 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
167 list_splice(&unmap_list, &pool->free_list); in ib_fmr_batch_release()
168 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
173 struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work); in ib_fmr_cleanup_func() local
175 ib_fmr_batch_release(pool); in ib_fmr_cleanup_func()
176 atomic_inc(&pool->flush_ser); in ib_fmr_cleanup_func()
177 wake_up_interruptible(&pool->force_wait); in ib_fmr_cleanup_func()
179 if (pool->flush_function) in ib_fmr_cleanup_func()
180 pool->flush_function(pool, pool->flush_arg); in ib_fmr_cleanup_func()
182 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) in ib_fmr_cleanup_func()
183 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_cleanup_func()
198 struct ib_fmr_pool *pool; in ib_create_fmr_pool() local
218 pool = kmalloc(sizeof *pool, GFP_KERNEL); in ib_create_fmr_pool()
219 if (!pool) in ib_create_fmr_pool()
222 pool->cache_bucket = NULL; in ib_create_fmr_pool()
223 pool->flush_function = params->flush_function; in ib_create_fmr_pool()
224 pool->flush_arg = params->flush_arg; in ib_create_fmr_pool()
226 INIT_LIST_HEAD(&pool->free_list); in ib_create_fmr_pool()
227 INIT_LIST_HEAD(&pool->dirty_list); in ib_create_fmr_pool()
230 pool->cache_bucket = in ib_create_fmr_pool()
232 sizeof(*pool->cache_bucket), in ib_create_fmr_pool()
234 if (!pool->cache_bucket) { in ib_create_fmr_pool()
240 INIT_HLIST_HEAD(pool->cache_bucket + i); in ib_create_fmr_pool()
243 pool->pool_size = 0; in ib_create_fmr_pool()
244 pool->max_pages = params->max_pages_per_fmr; in ib_create_fmr_pool()
245 pool->max_remaps = max_remaps; in ib_create_fmr_pool()
246 pool->dirty_watermark = params->dirty_watermark; in ib_create_fmr_pool()
247 pool->dirty_len = 0; in ib_create_fmr_pool()
248 spin_lock_init(&pool->pool_lock); in ib_create_fmr_pool()
249 atomic_set(&pool->req_ser, 0); in ib_create_fmr_pool()
250 atomic_set(&pool->flush_ser, 0); in ib_create_fmr_pool()
251 init_waitqueue_head(&pool->force_wait); in ib_create_fmr_pool()
253 pool->worker = in ib_create_fmr_pool()
255 if (IS_ERR(pool->worker)) { in ib_create_fmr_pool()
257 ret = PTR_ERR(pool->worker); in ib_create_fmr_pool()
260 kthread_init_work(&pool->work, ib_fmr_cleanup_func); in ib_create_fmr_pool()
266 .max_maps = pool->max_remaps, in ib_create_fmr_pool()
271 if (pool->cache_bucket) in ib_create_fmr_pool()
279 fmr->pool = pool; in ib_create_fmr_pool()
292 list_add_tail(&fmr->list, &pool->free_list); in ib_create_fmr_pool()
293 ++pool->pool_size; in ib_create_fmr_pool()
297 return pool; in ib_create_fmr_pool()
300 kfree(pool->cache_bucket); in ib_create_fmr_pool()
301 kfree(pool); in ib_create_fmr_pool()
306 ib_destroy_fmr_pool(pool); in ib_create_fmr_pool()
318 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) in ib_destroy_fmr_pool() argument
325 kthread_destroy_worker(pool->worker); in ib_destroy_fmr_pool()
326 ib_fmr_batch_release(pool); in ib_destroy_fmr_pool()
329 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { in ib_destroy_fmr_pool()
341 if (i < pool->pool_size) in ib_destroy_fmr_pool()
343 pool->pool_size - i); in ib_destroy_fmr_pool()
345 kfree(pool->cache_bucket); in ib_destroy_fmr_pool()
346 kfree(pool); in ib_destroy_fmr_pool()
356 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) in ib_flush_fmr_pool() argument
367 spin_lock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
368 list_for_each_entry_safe(fmr, next, &pool->free_list, list) { in ib_flush_fmr_pool()
370 list_move(&fmr->list, &pool->dirty_list); in ib_flush_fmr_pool()
372 spin_unlock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
374 serial = atomic_inc_return(&pool->req_ser); in ib_flush_fmr_pool()
375 kthread_queue_work(pool->worker, &pool->work); in ib_flush_fmr_pool()
377 if (wait_event_interruptible(pool->force_wait, in ib_flush_fmr_pool()
378 atomic_read(&pool->flush_ser) - serial >= 0)) in ib_flush_fmr_pool()
397 struct ib_fmr_pool *pool = pool_handle; in ib_fmr_pool_map_phys() local
402 if (list_len < 1 || list_len > pool->max_pages) in ib_fmr_pool_map_phys()
405 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
406 fmr = ib_fmr_cache_lookup(pool, in ib_fmr_pool_map_phys()
417 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
422 if (list_empty(&pool->free_list)) { in ib_fmr_pool_map_phys()
423 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
427 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); in ib_fmr_pool_map_phys()
430 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
436 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
437 list_add(&fmr->list, &pool->free_list); in ib_fmr_pool_map_phys()
438 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
448 if (pool->cache_bucket) { in ib_fmr_pool_map_phys()
453 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
455 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
456 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
472 struct ib_fmr_pool *pool; in ib_fmr_pool_unmap() local
475 pool = fmr->pool; in ib_fmr_pool_unmap()
477 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_unmap()
481 if (fmr->remap_count < pool->max_remaps) { in ib_fmr_pool_unmap()
482 list_add_tail(&fmr->list, &pool->free_list); in ib_fmr_pool_unmap()
484 list_add_tail(&fmr->list, &pool->dirty_list); in ib_fmr_pool_unmap()
485 if (++pool->dirty_len >= pool->dirty_watermark) { in ib_fmr_pool_unmap()
486 atomic_inc(&pool->req_ser); in ib_fmr_pool_unmap()
487 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_pool_unmap()
492 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_unmap()