Lines Matching refs:pool

95 	void                     (*flush_function)(struct ib_fmr_pool *pool,
115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument
123 if (!pool->cache_bucket) in ib_fmr_cache_lookup()
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup()
138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument
145 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
147 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release()
160 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release()
161 pool->dirty_len = 0; in ib_fmr_batch_release()
163 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
173 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release()
174 list_splice(&unmap_list, &pool->free_list); in ib_fmr_batch_release()
175 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release()
180 struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work); in ib_fmr_cleanup_func() local
182 ib_fmr_batch_release(pool); in ib_fmr_cleanup_func()
183 atomic_inc(&pool->flush_ser); in ib_fmr_cleanup_func()
184 wake_up_interruptible(&pool->force_wait); in ib_fmr_cleanup_func()
186 if (pool->flush_function) in ib_fmr_cleanup_func()
187 pool->flush_function(pool, pool->flush_arg); in ib_fmr_cleanup_func()
189 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) in ib_fmr_cleanup_func()
190 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_cleanup_func()
205 struct ib_fmr_pool *pool; in ib_create_fmr_pool() local
225 pool = kmalloc(sizeof *pool, GFP_KERNEL); in ib_create_fmr_pool()
226 if (!pool) in ib_create_fmr_pool()
229 pool->cache_bucket = NULL; in ib_create_fmr_pool()
230 pool->flush_function = params->flush_function; in ib_create_fmr_pool()
231 pool->flush_arg = params->flush_arg; in ib_create_fmr_pool()
233 INIT_LIST_HEAD(&pool->free_list); in ib_create_fmr_pool()
234 INIT_LIST_HEAD(&pool->dirty_list); in ib_create_fmr_pool()
237 pool->cache_bucket = in ib_create_fmr_pool()
239 sizeof(*pool->cache_bucket), in ib_create_fmr_pool()
241 if (!pool->cache_bucket) { in ib_create_fmr_pool()
247 INIT_HLIST_HEAD(pool->cache_bucket + i); in ib_create_fmr_pool()
250 pool->pool_size = 0; in ib_create_fmr_pool()
251 pool->max_pages = params->max_pages_per_fmr; in ib_create_fmr_pool()
252 pool->max_remaps = max_remaps; in ib_create_fmr_pool()
253 pool->dirty_watermark = params->dirty_watermark; in ib_create_fmr_pool()
254 pool->dirty_len = 0; in ib_create_fmr_pool()
255 spin_lock_init(&pool->pool_lock); in ib_create_fmr_pool()
256 atomic_set(&pool->req_ser, 0); in ib_create_fmr_pool()
257 atomic_set(&pool->flush_ser, 0); in ib_create_fmr_pool()
258 init_waitqueue_head(&pool->force_wait); in ib_create_fmr_pool()
260 pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name); in ib_create_fmr_pool()
261 if (IS_ERR(pool->worker)) { in ib_create_fmr_pool()
263 ret = PTR_ERR(pool->worker); in ib_create_fmr_pool()
266 kthread_init_work(&pool->work, ib_fmr_cleanup_func); in ib_create_fmr_pool()
272 .max_maps = pool->max_remaps, in ib_create_fmr_pool()
277 if (pool->cache_bucket) in ib_create_fmr_pool()
285 fmr->pool = pool; in ib_create_fmr_pool()
298 list_add_tail(&fmr->list, &pool->free_list); in ib_create_fmr_pool()
299 ++pool->pool_size; in ib_create_fmr_pool()
303 return pool; in ib_create_fmr_pool()
306 kfree(pool->cache_bucket); in ib_create_fmr_pool()
307 kfree(pool); in ib_create_fmr_pool()
312 ib_destroy_fmr_pool(pool); in ib_create_fmr_pool()
324 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) in ib_destroy_fmr_pool() argument
331 kthread_destroy_worker(pool->worker); in ib_destroy_fmr_pool()
332 ib_fmr_batch_release(pool); in ib_destroy_fmr_pool()
335 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { in ib_destroy_fmr_pool()
347 if (i < pool->pool_size) in ib_destroy_fmr_pool()
349 pool->pool_size - i); in ib_destroy_fmr_pool()
351 kfree(pool->cache_bucket); in ib_destroy_fmr_pool()
352 kfree(pool); in ib_destroy_fmr_pool()
362 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) in ib_flush_fmr_pool() argument
373 spin_lock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
374 list_for_each_entry_safe(fmr, next, &pool->free_list, list) { in ib_flush_fmr_pool()
376 list_move(&fmr->list, &pool->dirty_list); in ib_flush_fmr_pool()
378 spin_unlock_irq(&pool->pool_lock); in ib_flush_fmr_pool()
380 serial = atomic_inc_return(&pool->req_ser); in ib_flush_fmr_pool()
381 kthread_queue_work(pool->worker, &pool->work); in ib_flush_fmr_pool()
383 if (wait_event_interruptible(pool->force_wait, in ib_flush_fmr_pool()
384 atomic_read(&pool->flush_ser) - serial >= 0)) in ib_flush_fmr_pool()
403 struct ib_fmr_pool *pool = pool_handle; in ib_fmr_pool_map_phys() local
408 if (list_len < 1 || list_len > pool->max_pages) in ib_fmr_pool_map_phys()
411 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
412 fmr = ib_fmr_cache_lookup(pool, in ib_fmr_pool_map_phys()
423 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
428 if (list_empty(&pool->free_list)) { in ib_fmr_pool_map_phys()
429 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
433 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); in ib_fmr_pool_map_phys()
436 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
442 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
443 list_add(&fmr->list, &pool->free_list); in ib_fmr_pool_map_phys()
444 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
454 if (pool->cache_bucket) { in ib_fmr_pool_map_phys()
459 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
461 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); in ib_fmr_pool_map_phys()
462 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_map_phys()
478 struct ib_fmr_pool *pool; in ib_fmr_pool_unmap() local
481 pool = fmr->pool; in ib_fmr_pool_unmap()
483 spin_lock_irqsave(&pool->pool_lock, flags); in ib_fmr_pool_unmap()
487 if (fmr->remap_count < pool->max_remaps) { in ib_fmr_pool_unmap()
488 list_add_tail(&fmr->list, &pool->free_list); in ib_fmr_pool_unmap()
490 list_add_tail(&fmr->list, &pool->dirty_list); in ib_fmr_pool_unmap()
491 if (++pool->dirty_len >= pool->dirty_watermark) { in ib_fmr_pool_unmap()
492 atomic_inc(&pool->req_ser); in ib_fmr_pool_unmap()
493 kthread_queue_work(pool->worker, &pool->work); in ib_fmr_pool_unmap()
504 spin_unlock_irqrestore(&pool->pool_lock, flags); in ib_fmr_pool_unmap()