Lines Matching refs:pool
108 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument
110 return rxe_type_info[pool->type].name; in pool_name()
113 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool) in pool_cache() argument
115 return rxe_type_info[pool->type].cache; in pool_cache()
169 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument
174 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index()
180 pool->max_index = max; in rxe_pool_init_index()
181 pool->min_index = min; in rxe_pool_init_index()
184 pool->table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index()
185 if (!pool->table) { in rxe_pool_init_index()
190 pool->table_size = size; in rxe_pool_init_index()
191 bitmap_zero(pool->table, max - min + 1); in rxe_pool_init_index()
199 struct rxe_pool *pool, in rxe_pool_init() argument
206 memset(pool, 0, sizeof(*pool)); in rxe_pool_init()
208 pool->rxe = rxe; in rxe_pool_init()
209 pool->type = type; in rxe_pool_init()
210 pool->max_elem = max_elem; in rxe_pool_init()
211 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); in rxe_pool_init()
212 pool->flags = rxe_type_info[type].flags; in rxe_pool_init()
213 pool->tree = RB_ROOT; in rxe_pool_init()
214 pool->cleanup = rxe_type_info[type].cleanup; in rxe_pool_init()
216 atomic_set(&pool->num_elem, 0); in rxe_pool_init()
218 kref_init(&pool->ref_cnt); in rxe_pool_init()
220 rwlock_init(&pool->pool_lock); in rxe_pool_init()
223 err = rxe_pool_init_index(pool, in rxe_pool_init()
231 pool->key_offset = rxe_type_info[type].key_offset; in rxe_pool_init()
232 pool->key_size = rxe_type_info[type].key_size; in rxe_pool_init()
235 pool->state = RXE_POOL_STATE_VALID; in rxe_pool_init()
243 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt); in rxe_pool_release() local
245 pool->state = RXE_POOL_STATE_INVALID; in rxe_pool_release()
246 kfree(pool->table); in rxe_pool_release()
249 static void rxe_pool_put(struct rxe_pool *pool) in rxe_pool_put() argument
251 kref_put(&pool->ref_cnt, rxe_pool_release); in rxe_pool_put()
254 void rxe_pool_cleanup(struct rxe_pool *pool) in rxe_pool_cleanup() argument
258 write_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_cleanup()
259 pool->state = RXE_POOL_STATE_INVALID; in rxe_pool_cleanup()
260 if (atomic_read(&pool->num_elem) > 0) in rxe_pool_cleanup()
262 pool_name(pool)); in rxe_pool_cleanup()
263 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_cleanup()
265 rxe_pool_put(pool); in rxe_pool_cleanup()
268 static u32 alloc_index(struct rxe_pool *pool) in alloc_index() argument
271 u32 range = pool->max_index - pool->min_index + 1; in alloc_index()
273 index = find_next_zero_bit(pool->table, range, pool->last); in alloc_index()
275 index = find_first_zero_bit(pool->table, range); in alloc_index()
278 set_bit(index, pool->table); in alloc_index()
279 pool->last = index; in alloc_index()
280 return index + pool->min_index; in alloc_index()
283 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) in insert_index() argument
285 struct rb_node **link = &pool->tree.rb_node; in insert_index()
305 rb_insert_color(&new->node, &pool->tree); in insert_index()
310 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) in insert_key() argument
312 struct rb_node **link = &pool->tree.rb_node; in insert_key()
321 cmp = memcmp((u8 *)elem + pool->key_offset, in insert_key()
322 (u8 *)new + pool->key_offset, pool->key_size); in insert_key()
336 rb_insert_color(&new->node, &pool->tree); in insert_key()
344 struct rxe_pool *pool = elem->pool; in rxe_add_key() local
347 write_lock_irqsave(&pool->pool_lock, flags); in rxe_add_key()
348 memcpy((u8 *)elem + pool->key_offset, key, pool->key_size); in rxe_add_key()
349 insert_key(pool, elem); in rxe_add_key()
350 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_key()
356 struct rxe_pool *pool = elem->pool; in rxe_drop_key() local
359 write_lock_irqsave(&pool->pool_lock, flags); in rxe_drop_key()
360 rb_erase(&elem->node, &pool->tree); in rxe_drop_key()
361 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_drop_key()
367 struct rxe_pool *pool = elem->pool; in rxe_add_index() local
370 write_lock_irqsave(&pool->pool_lock, flags); in rxe_add_index()
371 elem->index = alloc_index(pool); in rxe_add_index()
372 insert_index(pool, elem); in rxe_add_index()
373 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_index()
379 struct rxe_pool *pool = elem->pool; in rxe_drop_index() local
382 write_lock_irqsave(&pool->pool_lock, flags); in rxe_drop_index()
383 clear_bit(elem->index - pool->min_index, pool->table); in rxe_drop_index()
384 rb_erase(&elem->node, &pool->tree); in rxe_drop_index()
385 write_unlock_irqrestore(&pool->pool_lock, flags); in rxe_drop_index()
388 void *rxe_alloc(struct rxe_pool *pool) in rxe_alloc() argument
393 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); in rxe_alloc()
395 read_lock_irqsave(&pool->pool_lock, flags); in rxe_alloc()
396 if (pool->state != RXE_POOL_STATE_VALID) { in rxe_alloc()
397 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_alloc()
400 kref_get(&pool->ref_cnt); in rxe_alloc()
401 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_alloc()
403 if (!ib_device_try_get(&pool->rxe->ib_dev)) in rxe_alloc()
406 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in rxe_alloc()
409 elem = kmem_cache_zalloc(pool_cache(pool), in rxe_alloc()
410 (pool->flags & RXE_POOL_ATOMIC) ? in rxe_alloc()
415 elem->pool = pool; in rxe_alloc()
421 atomic_dec(&pool->num_elem); in rxe_alloc()
422 ib_device_put(&pool->rxe->ib_dev); in rxe_alloc()
424 rxe_pool_put(pool); in rxe_alloc()
428 int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) in rxe_add_to_pool() argument
432 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC)); in rxe_add_to_pool()
434 read_lock_irqsave(&pool->pool_lock, flags); in rxe_add_to_pool()
435 if (pool->state != RXE_POOL_STATE_VALID) { in rxe_add_to_pool()
436 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_to_pool()
439 kref_get(&pool->ref_cnt); in rxe_add_to_pool()
440 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_add_to_pool()
442 if (!ib_device_try_get(&pool->rxe->ib_dev)) in rxe_add_to_pool()
445 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in rxe_add_to_pool()
448 elem->pool = pool; in rxe_add_to_pool()
454 atomic_dec(&pool->num_elem); in rxe_add_to_pool()
455 ib_device_put(&pool->rxe->ib_dev); in rxe_add_to_pool()
457 rxe_pool_put(pool); in rxe_add_to_pool()
465 struct rxe_pool *pool = elem->pool; in rxe_elem_release() local
467 if (pool->cleanup) in rxe_elem_release()
468 pool->cleanup(elem); in rxe_elem_release()
470 if (!(pool->flags & RXE_POOL_NO_ALLOC)) in rxe_elem_release()
471 kmem_cache_free(pool_cache(pool), elem); in rxe_elem_release()
472 atomic_dec(&pool->num_elem); in rxe_elem_release()
473 ib_device_put(&pool->rxe->ib_dev); in rxe_elem_release()
474 rxe_pool_put(pool); in rxe_elem_release()
477 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) in rxe_pool_get_index() argument
483 read_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_get_index()
485 if (pool->state != RXE_POOL_STATE_VALID) in rxe_pool_get_index()
488 node = pool->tree.rb_node; in rxe_pool_get_index()
504 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_get_index()
508 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) in rxe_pool_get_key() argument
515 read_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_get_key()
517 if (pool->state != RXE_POOL_STATE_VALID) in rxe_pool_get_key()
520 node = pool->tree.rb_node; in rxe_pool_get_key()
525 cmp = memcmp((u8 *)elem + pool->key_offset, in rxe_pool_get_key()
526 key, pool->key_size); in rxe_pool_get_key()
540 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_get_key()