Lines Matching refs:pool
89 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument
91 return rxe_type_info[pool->type].name; in pool_name()
94 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument
99 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index()
105 pool->index.max_index = max; in rxe_pool_init_index()
106 pool->index.min_index = min; in rxe_pool_init_index()
109 pool->index.table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index()
110 if (!pool->index.table) { in rxe_pool_init_index()
115 pool->index.table_size = size; in rxe_pool_init_index()
116 bitmap_zero(pool->index.table, max - min + 1); in rxe_pool_init_index()
124 struct rxe_pool *pool, in rxe_pool_init() argument
131 memset(pool, 0, sizeof(*pool)); in rxe_pool_init()
133 pool->rxe = rxe; in rxe_pool_init()
134 pool->type = type; in rxe_pool_init()
135 pool->max_elem = max_elem; in rxe_pool_init()
136 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); in rxe_pool_init()
137 pool->flags = rxe_type_info[type].flags; in rxe_pool_init()
138 pool->index.tree = RB_ROOT; in rxe_pool_init()
139 pool->key.tree = RB_ROOT; in rxe_pool_init()
140 pool->cleanup = rxe_type_info[type].cleanup; in rxe_pool_init()
142 atomic_set(&pool->num_elem, 0); in rxe_pool_init()
144 rwlock_init(&pool->pool_lock); in rxe_pool_init()
147 err = rxe_pool_init_index(pool, in rxe_pool_init()
155 pool->key.key_offset = rxe_type_info[type].key_offset; in rxe_pool_init()
156 pool->key.key_size = rxe_type_info[type].key_size; in rxe_pool_init()
163 void rxe_pool_cleanup(struct rxe_pool *pool) in rxe_pool_cleanup() argument
165 if (atomic_read(&pool->num_elem) > 0) in rxe_pool_cleanup()
167 pool_name(pool)); in rxe_pool_cleanup()
169 kfree(pool->index.table); in rxe_pool_cleanup()
172 static u32 alloc_index(struct rxe_pool *pool) in alloc_index() argument
175 u32 range = pool->index.max_index - pool->index.min_index + 1; in alloc_index()
177 index = find_next_zero_bit(pool->index.table, range, pool->index.last); in alloc_index()
179 index = find_first_zero_bit(pool->index.table, range); in alloc_index()
182 set_bit(index, pool->index.table); in alloc_index()
183 pool->index.last = index; in alloc_index()
184 return index + pool->index.min_index; in alloc_index()
187 static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) in rxe_insert_index() argument
189 struct rb_node **link = &pool->index.tree.rb_node; in rxe_insert_index()
209 rb_insert_color(&new->index_node, &pool->index.tree); in rxe_insert_index()
214 static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) in rxe_insert_key() argument
216 struct rb_node **link = &pool->key.tree.rb_node; in rxe_insert_key()
225 cmp = memcmp((u8 *)elem + pool->key.key_offset, in rxe_insert_key()
226 (u8 *)new + pool->key.key_offset, pool->key.key_size); in rxe_insert_key()
240 rb_insert_color(&new->key_node, &pool->key.tree); in rxe_insert_key()
247 struct rxe_pool *pool = elem->pool; in __rxe_add_key_locked() local
250 memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); in __rxe_add_key_locked()
251 err = rxe_insert_key(pool, elem); in __rxe_add_key_locked()
258 struct rxe_pool *pool = elem->pool; in __rxe_add_key() local
262 write_lock_irqsave(&pool->pool_lock, flags); in __rxe_add_key()
264 write_unlock_irqrestore(&pool->pool_lock, flags); in __rxe_add_key()
271 struct rxe_pool *pool = elem->pool; in __rxe_drop_key_locked() local
273 rb_erase(&elem->key_node, &pool->key.tree); in __rxe_drop_key_locked()
278 struct rxe_pool *pool = elem->pool; in __rxe_drop_key() local
281 write_lock_irqsave(&pool->pool_lock, flags); in __rxe_drop_key()
283 write_unlock_irqrestore(&pool->pool_lock, flags); in __rxe_drop_key()
288 struct rxe_pool *pool = elem->pool; in __rxe_add_index_locked() local
291 elem->index = alloc_index(pool); in __rxe_add_index_locked()
292 err = rxe_insert_index(pool, elem); in __rxe_add_index_locked()
299 struct rxe_pool *pool = elem->pool; in __rxe_add_index() local
303 write_lock_irqsave(&pool->pool_lock, flags); in __rxe_add_index()
305 write_unlock_irqrestore(&pool->pool_lock, flags); in __rxe_add_index()
312 struct rxe_pool *pool = elem->pool; in __rxe_drop_index_locked() local
314 clear_bit(elem->index - pool->index.min_index, pool->index.table); in __rxe_drop_index_locked()
315 rb_erase(&elem->index_node, &pool->index.tree); in __rxe_drop_index_locked()
320 struct rxe_pool *pool = elem->pool; in __rxe_drop_index() local
323 write_lock_irqsave(&pool->pool_lock, flags); in __rxe_drop_index()
325 write_unlock_irqrestore(&pool->pool_lock, flags); in __rxe_drop_index()
328 void *rxe_alloc_locked(struct rxe_pool *pool) in rxe_alloc_locked() argument
330 struct rxe_type_info *info = &rxe_type_info[pool->type]; in rxe_alloc_locked()
334 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in rxe_alloc_locked()
343 elem->pool = pool; in rxe_alloc_locked()
349 atomic_dec(&pool->num_elem); in rxe_alloc_locked()
353 void *rxe_alloc(struct rxe_pool *pool) in rxe_alloc() argument
355 struct rxe_type_info *info = &rxe_type_info[pool->type]; in rxe_alloc()
359 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in rxe_alloc()
368 elem->pool = pool; in rxe_alloc()
374 atomic_dec(&pool->num_elem); in rxe_alloc()
378 int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) in __rxe_add_to_pool() argument
380 if (atomic_inc_return(&pool->num_elem) > pool->max_elem) in __rxe_add_to_pool()
383 elem->pool = pool; in __rxe_add_to_pool()
389 atomic_dec(&pool->num_elem); in __rxe_add_to_pool()
397 struct rxe_pool *pool = elem->pool; in rxe_elem_release() local
398 struct rxe_type_info *info = &rxe_type_info[pool->type]; in rxe_elem_release()
401 if (pool->cleanup) in rxe_elem_release()
402 pool->cleanup(elem); in rxe_elem_release()
404 if (!(pool->flags & RXE_POOL_NO_ALLOC)) { in rxe_elem_release()
409 atomic_dec(&pool->num_elem); in rxe_elem_release()
412 void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) in rxe_pool_get_index_locked() argument
414 struct rxe_type_info *info = &rxe_type_info[pool->type]; in rxe_pool_get_index_locked()
419 node = pool->index.tree.rb_node; in rxe_pool_get_index_locked()
442 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) in rxe_pool_get_index() argument
447 read_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_get_index()
448 obj = rxe_pool_get_index_locked(pool, index); in rxe_pool_get_index()
449 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_get_index()
454 void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) in rxe_pool_get_key_locked() argument
456 struct rxe_type_info *info = &rxe_type_info[pool->type]; in rxe_pool_get_key_locked()
462 node = pool->key.tree.rb_node; in rxe_pool_get_key_locked()
467 cmp = memcmp((u8 *)elem + pool->key.key_offset, in rxe_pool_get_key_locked()
468 key, pool->key.key_size); in rxe_pool_get_key_locked()
488 void *rxe_pool_get_key(struct rxe_pool *pool, void *key) in rxe_pool_get_key() argument
493 read_lock_irqsave(&pool->pool_lock, flags); in rxe_pool_get_key()
494 obj = rxe_pool_get_key_locked(pool, key); in rxe_pool_get_key()
495 read_unlock_irqrestore(&pool->pool_lock, flags); in rxe_pool_get_key()