Lines Matching full:pool

47  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * be 63, or 62, respectively, freelists per pool.
72 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
89 unsigned long pool; /* back link */ member
98 * pool
103 * @pool: pointer to the containing pool
117 struct z3fold_pool *pool; member
129 * struct z3fold_pool - stores metadata for each z3fold pool
130 * @name: pool name
131 * @lock: protects pool unbuddied/lru lists
132 * @stale_lock: protects pool stale page list
139 * @pages_nr: number of z3fold pages in the pool.
142 * pool creation time.
149 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool.
207 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, in alloc_slots() argument
210 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, in alloc_slots()
216 slots->pool = (unsigned long)pool; in alloc_slots()
225 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
305 if (test_bit(HANDLES_NOFREE, &slots->pool)) { in free_handle()
323 struct z3fold_pool *pool = slots_to_pool(slots); in free_handle() local
327 kmem_cache_free(pool->c_handle, slots); in free_handle()
333 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() argument
348 slots = alloc_slots(pool, gfp); in init_z3fold_page()
357 zhdr->pool = pool; in init_z3fold_page()
382 * Pool lock should be held as this function accesses first_num
448 return zhdr->pool; in zhdr_to_pool()
454 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in __release_z3fold_page() local
459 spin_lock(&pool->lock); in __release_z3fold_page()
462 spin_unlock(&pool->lock); in __release_z3fold_page()
467 spin_lock(&pool->stale_lock); in __release_z3fold_page()
468 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page()
469 queue_work(pool->release_wq, &pool->work); in __release_z3fold_page()
470 spin_unlock(&pool->stale_lock); in __release_z3fold_page()
472 atomic64_dec(&pool->pages_nr); in __release_z3fold_page()
487 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in release_z3fold_page_locked_list() local
489 spin_lock(&pool->lock); in release_z3fold_page_locked_list()
491 spin_unlock(&pool->lock); in release_z3fold_page_locked_list()
499 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); in free_pages_work() local
501 spin_lock(&pool->stale_lock); in free_pages_work()
502 while (!list_empty(&pool->stale)) { in free_pages_work()
503 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work()
510 spin_unlock(&pool->stale_lock); in free_pages_work()
514 spin_lock(&pool->stale_lock); in free_pages_work()
516 spin_unlock(&pool->stale_lock); in free_pages_work()
544 static inline void add_to_unbuddied(struct z3fold_pool *pool, in add_to_unbuddied() argument
553 unbuddied = this_cpu_ptr(pool->unbuddied); in add_to_unbuddied()
554 spin_lock(&pool->lock); in add_to_unbuddied()
556 spin_unlock(&pool->lock); in add_to_unbuddied()
602 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in compact_single_buddy() local
638 new_zhdr = __z3fold_alloc(pool, sz, false); in compact_single_buddy()
674 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
684 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
742 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in do_compact_page() local
754 spin_lock(&pool->lock); in do_compact_page()
756 spin_unlock(&pool->lock); in do_compact_page()
777 add_to_unbuddied(pool, zhdr); in do_compact_page()
791 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, in __z3fold_alloc() argument
802 unbuddied = this_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
813 spin_lock(&pool->lock); in __z3fold_alloc()
817 spin_unlock(&pool->lock); in __z3fold_alloc()
826 spin_unlock(&pool->lock); in __z3fold_alloc()
841 * list while pool lock was held, and then we've taken in __z3fold_alloc()
857 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); in __z3fold_alloc()
858 spin_lock(&pool->lock); in __z3fold_alloc()
865 spin_unlock(&pool->lock); in __z3fold_alloc()
871 spin_unlock(&pool->lock); in __z3fold_alloc()
888 zhdr->slots = alloc_slots(pool, GFP_ATOMIC); in __z3fold_alloc()
896 add_to_unbuddied(pool, zhdr); in __z3fold_alloc()
907 * z3fold_create_pool() - create a new z3fold pool
908 * @name: pool name
909 * @gfp: gfp flags when allocating the z3fold pool structure
910 * @ops: user-defined operations for the z3fold pool
912 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
918 struct z3fold_pool *pool = NULL; in z3fold_create_pool() local
921 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool()
922 if (!pool) in z3fold_create_pool()
924 pool->c_handle = kmem_cache_create("z3fold_handle", in z3fold_create_pool()
927 if (!pool->c_handle) in z3fold_create_pool()
929 spin_lock_init(&pool->lock); in z3fold_create_pool()
930 spin_lock_init(&pool->stale_lock); in z3fold_create_pool()
931 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, in z3fold_create_pool()
933 if (!pool->unbuddied) in z3fold_create_pool()
937 per_cpu_ptr(pool->unbuddied, cpu); in z3fold_create_pool()
941 INIT_LIST_HEAD(&pool->lru); in z3fold_create_pool()
942 INIT_LIST_HEAD(&pool->stale); in z3fold_create_pool()
943 atomic64_set(&pool->pages_nr, 0); in z3fold_create_pool()
944 pool->name = name; in z3fold_create_pool()
945 pool->compact_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
946 if (!pool->compact_wq) in z3fold_create_pool()
948 pool->release_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
949 if (!pool->release_wq) in z3fold_create_pool()
951 INIT_WORK(&pool->work, free_pages_work); in z3fold_create_pool()
952 pool->ops = ops; in z3fold_create_pool()
953 return pool; in z3fold_create_pool()
956 destroy_workqueue(pool->compact_wq); in z3fold_create_pool()
958 free_percpu(pool->unbuddied); in z3fold_create_pool()
960 kmem_cache_destroy(pool->c_handle); in z3fold_create_pool()
962 kfree(pool); in z3fold_create_pool()
968 * z3fold_destroy_pool() - destroys an existing z3fold pool
969 * @pool: the z3fold pool to be destroyed
971 * The pool should be emptied before this function is called.
973 static void z3fold_destroy_pool(struct z3fold_pool *pool) in z3fold_destroy_pool() argument
975 kmem_cache_destroy(pool->c_handle); in z3fold_destroy_pool()
978 * We need to destroy pool->compact_wq before pool->release_wq, in z3fold_destroy_pool()
979 * as any pending work on pool->compact_wq will call in z3fold_destroy_pool()
980 * queue_work(pool->release_wq, &pool->work). in z3fold_destroy_pool()
986 destroy_workqueue(pool->compact_wq); in z3fold_destroy_pool()
987 destroy_workqueue(pool->release_wq); in z3fold_destroy_pool()
988 free_percpu(pool->unbuddied); in z3fold_destroy_pool()
989 kfree(pool); in z3fold_destroy_pool()
996 * @pool: z3fold pool from which to allocate
998 * @gfp: gfp flags used if the pool needs to grow
1001 * This function will attempt to find a free region in the pool large enough to
1004 * allocated and added to the pool to satisfy the request.
1007 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1010 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument
1029 zhdr = __z3fold_alloc(pool, size, can_sleep); in z3fold_alloc()
1050 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
1055 atomic64_inc(&pool->pages_nr); in z3fold_alloc()
1081 add_to_unbuddied(pool, zhdr); in z3fold_alloc()
1084 spin_lock(&pool->lock); in z3fold_alloc()
1089 list_add(&page->lru, &pool->lru); in z3fold_alloc()
1092 spin_unlock(&pool->lock); in z3fold_alloc()
1101 * @pool: pool in which the allocation resided
1109 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) in z3fold_free() argument
1127 spin_lock(&pool->lock); in z3fold_free()
1129 spin_unlock(&pool->lock); in z3fold_free()
1132 atomic64_dec(&pool->pages_nr); in z3fold_free()
1180 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free()
1185 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1186 * @pool: pool from which a page will attempt to be evicted
1199 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1200 * call the user-defined eviction handler with the pool and handle as
1220 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) in z3fold_reclaim_page() argument
1230 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); in z3fold_reclaim_page()
1232 spin_lock(&pool->lock); in z3fold_reclaim_page()
1233 if (!pool->ops || !pool->ops->evict || retries == 0) { in z3fold_reclaim_page()
1234 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1238 if (list_empty(&pool->lru)) { in z3fold_reclaim_page()
1239 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1242 list_for_each_prev(pos, &pool->lru) { in z3fold_reclaim_page()
1289 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1322 ret = pool->ops->evict(pool, middle_handle); in z3fold_reclaim_page()
1327 ret = pool->ops->evict(pool, first_handle); in z3fold_reclaim_page()
1332 ret = pool->ops->evict(pool, last_handle); in z3fold_reclaim_page()
1340 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1343 spin_lock(&pool->lock); in z3fold_reclaim_page()
1344 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1345 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1352 kmem_cache_free(pool->c_handle, slots); in z3fold_reclaim_page()
1357 * free. Take the global pool lock then to be able in z3fold_reclaim_page()
1360 spin_lock(&pool->lock); in z3fold_reclaim_page()
1361 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1362 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1364 add_to_unbuddied(pool, zhdr); in z3fold_reclaim_page()
1369 /* We started off locked to we need to lock the pool back */ in z3fold_reclaim_page()
1370 spin_lock(&pool->lock); in z3fold_reclaim_page()
1372 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1378 * @pool: pool in which the allocation resides
1386 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) in z3fold_map() argument
1428 * @pool: pool in which the allocation resides
1431 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) in z3fold_unmap() argument
1451 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1452 * @pool: pool whose size is being queried
1454 * Returns: size in pages of the given pool.
1456 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) in z3fold_get_pool_size() argument
1458 return atomic64_read(&pool->pages_nr); in z3fold_get_pool_size()
1464 struct z3fold_pool *pool; in z3fold_page_isolate() local
1483 pool = zhdr_to_pool(zhdr); in z3fold_page_isolate()
1484 spin_lock(&pool->lock); in z3fold_page_isolate()
1489 spin_unlock(&pool->lock); in z3fold_page_isolate()
1504 struct z3fold_pool *pool; in z3fold_page_migrate() local
1512 pool = zhdr_to_pool(zhdr); in z3fold_page_migrate()
1549 spin_lock(&pool->lock); in z3fold_page_migrate()
1550 list_add(&newpage->lru, &pool->lru); in z3fold_page_migrate()
1551 spin_unlock(&pool->lock); in z3fold_page_migrate()
1555 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); in z3fold_page_migrate()
1566 struct z3fold_pool *pool; in z3fold_page_putback() local
1569 pool = zhdr_to_pool(zhdr); in z3fold_page_putback()
1577 spin_lock(&pool->lock); in z3fold_page_putback()
1578 list_add(&page->lru, &pool->lru); in z3fold_page_putback()
1579 spin_unlock(&pool->lock); in z3fold_page_putback()
1581 add_to_unbuddied(pool, zhdr); in z3fold_page_putback()
1596 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) in z3fold_zpool_evict() argument
1598 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in z3fold_zpool_evict()
1599 return pool->zpool_ops->evict(pool->zpool, handle); in z3fold_zpool_evict()
1612 struct z3fold_pool *pool; in z3fold_zpool_create() local
1614 pool = z3fold_create_pool(name, gfp, in z3fold_zpool_create()
1616 if (pool) { in z3fold_zpool_create()
1617 pool->zpool = zpool; in z3fold_zpool_create()
1618 pool->zpool_ops = zpool_ops; in z3fold_zpool_create()
1620 return pool; in z3fold_zpool_create()
1623 static void z3fold_zpool_destroy(void *pool) in z3fold_zpool_destroy() argument
1625 z3fold_destroy_pool(pool); in z3fold_zpool_destroy()
1628 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, in z3fold_zpool_malloc() argument
1631 return z3fold_alloc(pool, size, gfp, handle); in z3fold_zpool_malloc()
1633 static void z3fold_zpool_free(void *pool, unsigned long handle) in z3fold_zpool_free() argument
1635 z3fold_free(pool, handle); in z3fold_zpool_free()
1638 static int z3fold_zpool_shrink(void *pool, unsigned int pages, in z3fold_zpool_shrink() argument
1645 ret = z3fold_reclaim_page(pool, 8); in z3fold_zpool_shrink()
1657 static void *z3fold_zpool_map(void *pool, unsigned long handle, in z3fold_zpool_map() argument
1660 return z3fold_map(pool, handle); in z3fold_zpool_map()
1662 static void z3fold_zpool_unmap(void *pool, unsigned long handle) in z3fold_zpool_unmap() argument
1664 z3fold_unmap(pool, handle); in z3fold_zpool_unmap()
1667 static u64 z3fold_zpool_total_size(void *pool) in z3fold_zpool_total_size() argument
1669 return z3fold_get_pool_size(pool) * PAGE_SIZE; in z3fold_zpool_total_size()