Lines Matching full:pool

51  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
56 * be 63, or 62, respectively, freelists per pool.
76 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
93 unsigned long pool; /* back link */ member
102 * pool
107 * @pool: pointer to the containing pool
121 struct z3fold_pool *pool; member
133 * struct z3fold_pool - stores metadata for each z3fold pool
134 * @name: pool name
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
143 * @pages_nr: number of z3fold pages in the pool.
146 * pool creation time.
154 * This structure is allocated at pool creation time and maintains metadata
155 * pertaining to a particular z3fold pool.
212 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, in alloc_slots() argument
217 slots = kmem_cache_zalloc(pool->c_handle, in alloc_slots()
223 slots->pool = (unsigned long)pool; in alloc_slots()
232 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); in slots_to_pool()
310 if (test_bit(HANDLES_NOFREE, &slots->pool)) { in free_handle()
328 struct z3fold_pool *pool = slots_to_pool(slots); in free_handle() local
332 kmem_cache_free(pool->c_handle, slots); in free_handle()
365 static int z3fold_register_migration(struct z3fold_pool *pool) in z3fold_register_migration() argument
367 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); in z3fold_register_migration()
368 if (IS_ERR(pool->inode)) { in z3fold_register_migration()
369 pool->inode = NULL; in z3fold_register_migration()
373 pool->inode->i_mapping->private_data = pool; in z3fold_register_migration()
374 pool->inode->i_mapping->a_ops = &z3fold_aops; in z3fold_register_migration()
378 static void z3fold_unregister_migration(struct z3fold_pool *pool) in z3fold_unregister_migration() argument
380 if (pool->inode) in z3fold_unregister_migration()
381 iput(pool->inode); in z3fold_unregister_migration()
386 struct z3fold_pool *pool, gfp_t gfp) in init_z3fold_page() argument
400 slots = alloc_slots(pool, gfp); in init_z3fold_page()
409 zhdr->pool = pool; in init_z3fold_page()
435 * Pool lock should be held as this function accesses first_num
501 return zhdr->pool; in zhdr_to_pool()
507 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in __release_z3fold_page() local
512 spin_lock(&pool->lock); in __release_z3fold_page()
515 spin_unlock(&pool->lock); in __release_z3fold_page()
520 spin_lock(&pool->stale_lock); in __release_z3fold_page()
521 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page()
522 queue_work(pool->release_wq, &pool->work); in __release_z3fold_page()
523 spin_unlock(&pool->stale_lock); in __release_z3fold_page()
545 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in release_z3fold_page_locked_list() local
547 spin_lock(&pool->lock); in release_z3fold_page_locked_list()
549 spin_unlock(&pool->lock); in release_z3fold_page_locked_list()
557 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); in free_pages_work() local
559 spin_lock(&pool->stale_lock); in free_pages_work()
560 while (!list_empty(&pool->stale)) { in free_pages_work()
561 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work()
568 spin_unlock(&pool->stale_lock); in free_pages_work()
572 spin_lock(&pool->stale_lock); in free_pages_work()
574 spin_unlock(&pool->stale_lock); in free_pages_work()
602 static inline void add_to_unbuddied(struct z3fold_pool *pool, in add_to_unbuddied() argument
611 unbuddied = this_cpu_ptr(pool->unbuddied); in add_to_unbuddied()
612 spin_lock(&pool->lock); in add_to_unbuddied()
614 spin_unlock(&pool->lock); in add_to_unbuddied()
660 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in compact_single_buddy() local
696 new_zhdr = __z3fold_alloc(pool, sz, false); in compact_single_buddy()
732 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
743 atomic64_dec(&pool->pages_nr); in compact_single_buddy()
745 add_to_unbuddied(pool, new_zhdr); in compact_single_buddy()
804 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in do_compact_page() local
816 spin_lock(&pool->lock); in do_compact_page()
818 spin_unlock(&pool->lock); in do_compact_page()
821 atomic64_dec(&pool->pages_nr); in do_compact_page()
834 atomic64_dec(&pool->pages_nr); in do_compact_page()
843 add_to_unbuddied(pool, zhdr); in do_compact_page()
857 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, in __z3fold_alloc() argument
868 unbuddied = this_cpu_ptr(pool->unbuddied); in __z3fold_alloc()
879 spin_lock(&pool->lock); in __z3fold_alloc()
884 spin_unlock(&pool->lock); in __z3fold_alloc()
893 spin_unlock(&pool->lock); in __z3fold_alloc()
908 * list while pool lock was held, and then we've taken in __z3fold_alloc()
924 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); in __z3fold_alloc()
925 spin_lock(&pool->lock); in __z3fold_alloc()
932 spin_unlock(&pool->lock); in __z3fold_alloc()
938 spin_unlock(&pool->lock); in __z3fold_alloc()
955 zhdr->slots = alloc_slots(pool, in __z3fold_alloc()
965 * z3fold_create_pool() - create a new z3fold pool
966 * @name: pool name
967 * @gfp: gfp flags when allocating the z3fold pool structure
968 * @ops: user-defined operations for the z3fold pool
970 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
976 struct z3fold_pool *pool = NULL; in z3fold_create_pool() local
979 pool = kzalloc(sizeof(struct z3fold_pool), gfp); in z3fold_create_pool()
980 if (!pool) in z3fold_create_pool()
982 pool->c_handle = kmem_cache_create("z3fold_handle", in z3fold_create_pool()
985 if (!pool->c_handle) in z3fold_create_pool()
987 spin_lock_init(&pool->lock); in z3fold_create_pool()
988 spin_lock_init(&pool->stale_lock); in z3fold_create_pool()
989 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, in z3fold_create_pool()
991 if (!pool->unbuddied) in z3fold_create_pool()
995 per_cpu_ptr(pool->unbuddied, cpu); in z3fold_create_pool()
999 INIT_LIST_HEAD(&pool->lru); in z3fold_create_pool()
1000 INIT_LIST_HEAD(&pool->stale); in z3fold_create_pool()
1001 atomic64_set(&pool->pages_nr, 0); in z3fold_create_pool()
1002 pool->name = name; in z3fold_create_pool()
1003 pool->compact_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
1004 if (!pool->compact_wq) in z3fold_create_pool()
1006 pool->release_wq = create_singlethread_workqueue(pool->name); in z3fold_create_pool()
1007 if (!pool->release_wq) in z3fold_create_pool()
1009 if (z3fold_register_migration(pool)) in z3fold_create_pool()
1011 INIT_WORK(&pool->work, free_pages_work); in z3fold_create_pool()
1012 pool->ops = ops; in z3fold_create_pool()
1013 return pool; in z3fold_create_pool()
1016 destroy_workqueue(pool->release_wq); in z3fold_create_pool()
1018 destroy_workqueue(pool->compact_wq); in z3fold_create_pool()
1020 free_percpu(pool->unbuddied); in z3fold_create_pool()
1022 kmem_cache_destroy(pool->c_handle); in z3fold_create_pool()
1024 kfree(pool); in z3fold_create_pool()
1030 * z3fold_destroy_pool() - destroys an existing z3fold pool
1031 * @pool: the z3fold pool to be destroyed
1033 * The pool should be emptied before this function is called.
1035 static void z3fold_destroy_pool(struct z3fold_pool *pool) in z3fold_destroy_pool() argument
1037 kmem_cache_destroy(pool->c_handle); in z3fold_destroy_pool()
1040 * We need to destroy pool->compact_wq before pool->release_wq, in z3fold_destroy_pool()
1041 * as any pending work on pool->compact_wq will call in z3fold_destroy_pool()
1042 * queue_work(pool->release_wq, &pool->work). in z3fold_destroy_pool()
1048 destroy_workqueue(pool->compact_wq); in z3fold_destroy_pool()
1049 destroy_workqueue(pool->release_wq); in z3fold_destroy_pool()
1050 z3fold_unregister_migration(pool); in z3fold_destroy_pool()
1051 free_percpu(pool->unbuddied); in z3fold_destroy_pool()
1052 kfree(pool); in z3fold_destroy_pool()
1057 * @pool: z3fold pool from which to allocate
1059 * @gfp: gfp flags used if the pool needs to grow
1062 * This function will attempt to find a free region in the pool large enough to
1065 * allocated and added to the pool to satisfy the request.
1068 * as z3fold pool pages.
1071 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1074 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, in z3fold_alloc() argument
1093 zhdr = __z3fold_alloc(pool, size, can_sleep); in z3fold_alloc()
1099 atomic64_dec(&pool->pages_nr); in z3fold_alloc()
1114 spin_lock(&pool->stale_lock); in z3fold_alloc()
1115 zhdr = list_first_entry_or_null(&pool->stale, in z3fold_alloc()
1124 spin_unlock(&pool->stale_lock); in z3fold_alloc()
1128 spin_unlock(&pool->stale_lock); in z3fold_alloc()
1137 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
1142 atomic64_inc(&pool->pages_nr); in z3fold_alloc()
1150 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
1154 __SetPageMovable(page, pool->inode->i_mapping); in z3fold_alloc()
1169 add_to_unbuddied(pool, zhdr); in z3fold_alloc()
1172 spin_lock(&pool->lock); in z3fold_alloc()
1177 list_add(&page->lru, &pool->lru); in z3fold_alloc()
1180 spin_unlock(&pool->lock); in z3fold_alloc()
1189 * @pool: pool in which the allocation resided
1197 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) in z3fold_free() argument
1215 spin_lock(&pool->lock); in z3fold_free()
1217 spin_unlock(&pool->lock); in z3fold_free()
1220 atomic64_dec(&pool->pages_nr); in z3fold_free()
1248 atomic64_dec(&pool->pages_nr); in z3fold_free()
1262 spin_lock(&pool->lock); in z3fold_free()
1264 spin_unlock(&pool->lock); in z3fold_free()
1273 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free()
1278 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1279 * @pool: pool from which a page will attempt to be evicted
1292 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1293 * call the user-defined eviction handler with the pool and handle as
1313 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) in z3fold_reclaim_page() argument
1323 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); in z3fold_reclaim_page()
1325 spin_lock(&pool->lock); in z3fold_reclaim_page()
1326 if (!pool->ops || !pool->ops->evict || retries == 0) { in z3fold_reclaim_page()
1327 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1331 if (list_empty(&pool->lru)) { in z3fold_reclaim_page()
1332 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1335 list_for_each_prev(pos, &pool->lru) { in z3fold_reclaim_page()
1363 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1376 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1391 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1424 ret = pool->ops->evict(pool, middle_handle); in z3fold_reclaim_page()
1429 ret = pool->ops->evict(pool, first_handle); in z3fold_reclaim_page()
1434 ret = pool->ops->evict(pool, last_handle); in z3fold_reclaim_page()
1442 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1445 spin_lock(&pool->lock); in z3fold_reclaim_page()
1446 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1447 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1454 kmem_cache_free(pool->c_handle, slots); in z3fold_reclaim_page()
1455 atomic64_dec(&pool->pages_nr); in z3fold_reclaim_page()
1460 * free. Take the global pool lock then to be able in z3fold_reclaim_page()
1463 spin_lock(&pool->lock); in z3fold_reclaim_page()
1464 list_add(&page->lru, &pool->lru); in z3fold_reclaim_page()
1465 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1470 /* We started off locked to we need to lock the pool back */ in z3fold_reclaim_page()
1471 spin_lock(&pool->lock); in z3fold_reclaim_page()
1473 spin_unlock(&pool->lock); in z3fold_reclaim_page()
1479 * @pool: pool in which the allocation resides
1487 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) in z3fold_map() argument
1529 * @pool: pool in which the allocation resides
1532 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) in z3fold_unmap() argument
1552 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1553 * @pool: pool whose size is being queried
1555 * Returns: size in pages of the given pool.
1557 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) in z3fold_get_pool_size() argument
1559 return atomic64_read(&pool->pages_nr); in z3fold_get_pool_size()
1565 struct z3fold_pool *pool; in z3fold_page_isolate() local
1584 pool = zhdr_to_pool(zhdr); in z3fold_page_isolate()
1585 spin_lock(&pool->lock); in z3fold_page_isolate()
1590 spin_unlock(&pool->lock); in z3fold_page_isolate()
1605 struct z3fold_pool *pool; in z3fold_page_migrate() local
1614 pool = zhdr_to_pool(zhdr); in z3fold_page_migrate()
1653 spin_lock(&pool->lock); in z3fold_page_migrate()
1654 list_add(&newpage->lru, &pool->lru); in z3fold_page_migrate()
1655 spin_unlock(&pool->lock); in z3fold_page_migrate()
1659 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); in z3fold_page_migrate()
1670 struct z3fold_pool *pool; in z3fold_page_putback() local
1673 pool = zhdr_to_pool(zhdr); in z3fold_page_putback()
1680 atomic64_dec(&pool->pages_nr); in z3fold_page_putback()
1683 spin_lock(&pool->lock); in z3fold_page_putback()
1684 list_add(&page->lru, &pool->lru); in z3fold_page_putback()
1685 spin_unlock(&pool->lock); in z3fold_page_putback()
1700 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) in z3fold_zpool_evict() argument
1702 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) in z3fold_zpool_evict()
1703 return pool->zpool_ops->evict(pool->zpool, handle); in z3fold_zpool_evict()
1716 struct z3fold_pool *pool; in z3fold_zpool_create() local
1718 pool = z3fold_create_pool(name, gfp, in z3fold_zpool_create()
1720 if (pool) { in z3fold_zpool_create()
1721 pool->zpool = zpool; in z3fold_zpool_create()
1722 pool->zpool_ops = zpool_ops; in z3fold_zpool_create()
1724 return pool; in z3fold_zpool_create()
1727 static void z3fold_zpool_destroy(void *pool) in z3fold_zpool_destroy() argument
1729 z3fold_destroy_pool(pool); in z3fold_zpool_destroy()
1732 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, in z3fold_zpool_malloc() argument
1735 return z3fold_alloc(pool, size, gfp, handle); in z3fold_zpool_malloc()
1737 static void z3fold_zpool_free(void *pool, unsigned long handle) in z3fold_zpool_free() argument
1739 z3fold_free(pool, handle); in z3fold_zpool_free()
1742 static int z3fold_zpool_shrink(void *pool, unsigned int pages, in z3fold_zpool_shrink() argument
1749 ret = z3fold_reclaim_page(pool, 8); in z3fold_zpool_shrink()
1761 static void *z3fold_zpool_map(void *pool, unsigned long handle, in z3fold_zpool_map() argument
1764 return z3fold_map(pool, handle); in z3fold_zpool_map()
1766 static void z3fold_zpool_unmap(void *pool, unsigned long handle) in z3fold_zpool_unmap() argument
1768 z3fold_unmap(pool, handle); in z3fold_zpool_unmap()
1771 static u64 z3fold_zpool_total_size(void *pool) in z3fold_zpool_total_size() argument
1773 return z3fold_get_pool_size(pool) * PAGE_SIZE; in z3fold_zpool_total_size()