Lines Matching +full:page +full:- +full:size
10 * Released under the terms of 3-clause BSD License
16 * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->index: links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->page_type: first object offset in a subpage of zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
36 * pool->migrate_lock
37 * class->lock
38 * zspage->lock
72 * span more than 1 page which avoids complex case of mapping 2 pages simply
78 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
107 #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGE_SHIFT)
113 * header keeps handle which is 4byte-aligned address so we
118 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
119 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1)
135 * On systems with 4K page size, this gives 255 size classes! There is a
136 * trader-off here:
137 * - Large number of size classes is potentially wasteful as free page are
139 * - Small number of size classes causes large internal fragmentation
140 * - Probably its better to use specific size classes (empirically
148 #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
178 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
198 * Size of objects stored in this class. Must be multiple
201 int size; member
212 * For every zspage, zspage->freeobj gives head of this list.
220 * It's valid for non-allocated object
250 /* protect page/zspage migration */
264 struct page *first_page;
282 zspage->huge = 1; in SetZsHugePage()
287 return zspage->huge; in ZsHugePage()
314 pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, in create_cache()
316 if (!pool->handle_cachep) in create_cache()
319 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), in create_cache()
321 if (!pool->zspage_cachep) { in create_cache()
322 kmem_cache_destroy(pool->handle_cachep); in create_cache()
323 pool->handle_cachep = NULL; in create_cache()
332 kmem_cache_destroy(pool->handle_cachep); in destroy_cache()
333 kmem_cache_destroy(pool->zspage_cachep); in destroy_cache()
338 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, in cache_alloc_handle()
344 kmem_cache_free(pool->handle_cachep, (void *)handle); in cache_free_handle()
349 return kmem_cache_zalloc(pool->zspage_cachep, in cache_alloc_zspage()
355 kmem_cache_free(pool->zspage_cachep, zspage); in cache_free_zspage()
358 /* class->lock(which owns the handle) synchronizes races */
385 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, in zs_zpool_malloc() argument
388 *handle = zs_malloc(pool, size, gfp); in zs_zpool_malloc()
442 MODULE_ALIAS("zpool-zsmalloc");
445 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
450 static __maybe_unused int is_first_page(struct page *page) in is_first_page() argument
452 return PagePrivate(page); in is_first_page()
455 /* Protected by class->lock */
458 return zspage->inuse; in get_zspage_inuse()
464 zspage->inuse += val; in mod_zspage_inuse()
467 static inline struct page *get_first_page(struct zspage *zspage) in get_first_page()
469 struct page *first_page = zspage->first_page; in get_first_page()
475 static inline unsigned int get_first_obj_offset(struct page *page) in get_first_obj_offset() argument
477 return page->page_type; in get_first_obj_offset()
480 static inline void set_first_obj_offset(struct page *page, unsigned int offset) in set_first_obj_offset() argument
482 page->page_type = offset; in set_first_obj_offset()
487 return zspage->freeobj; in get_freeobj()
492 zspage->freeobj = obj; in set_freeobj()
499 BUG_ON(zspage->magic != ZSPAGE_MAGIC); in get_zspage_mapping()
501 *fullness = zspage->fullness; in get_zspage_mapping()
502 *class_idx = zspage->class; in get_zspage_mapping()
508 return pool->size_class[zspage->class]; in zspage_class()
515 zspage->class = class_idx; in set_zspage_mapping()
516 zspage->fullness = fullness; in set_zspage_mapping()
520 * zsmalloc divides the pool into various size classes where each
523 * classes depending on its size. This function returns index of the
524 * size class which has chunk size big enough to hold the given size.
526 static int get_size_class_index(int size) in get_size_class_index() argument
530 if (likely(size > ZS_MIN_ALLOC_SIZE)) in get_size_class_index()
531 idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, in get_size_class_index()
534 return min_t(int, ZS_SIZE_CLASSES - 1, idx); in get_size_class_index()
541 class->stats.objs[type] += cnt; in class_stat_inc()
548 class->stats.objs[type] -= cnt; in class_stat_dec()
555 return class->stats.objs[type]; in zs_stat_get()
580 struct zs_pool *pool = s->private; in zs_stats_size_show()
590 "class", "size", "almost_full", "almost_empty", in zs_stats_size_show()
595 class = pool->size_class[i]; in zs_stats_size_show()
597 if (class->index != i) in zs_stats_size_show()
600 spin_lock(&class->lock); in zs_stats_size_show()
606 spin_unlock(&class->lock); in zs_stats_size_show()
608 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
610 class->pages_per_zspage; in zs_stats_size_show()
614 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
616 class->pages_per_zspage, freeable); in zs_stats_size_show()
643 pool->stat_dentry = debugfs_create_dir(name, zs_stat_root); in zs_pool_stat_create()
645 debugfs_create_file("classes", S_IFREG | 0444, pool->stat_dentry, pool, in zs_pool_stat_create()
651 debugfs_remove_recursive(pool->stat_dentry); in zs_pool_stat_destroy()
674 * For each size class, zspages are divided into different groups
678 * status of the given page.
687 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
702 * Each size class maintains various freelists and zspages are assigned
714 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
718 * Put pages with higher ->inuse first. in insert_zspage()
721 list_add(&zspage->list, &head->list); in insert_zspage()
723 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
734 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
736 list_del_init(&zspage->list); in remove_zspage()
741 * Each size class maintains zspages in different fullness groups depending
743 * objects, the fullness status of the page can change, say, from ALMOST_FULL
745 * a status change has occurred for the given page and accordingly moves the
746 * page from the freelist of the old fullness group to that of the new
769 * to form a zspage for each size class. This is important
773 * usage = Zp - wastage
774 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
776 * For example, for size class of 3/8 * PAGE_SIZE, we should
783 /* zspage order which gives maximum used size per KB */ in get_pages_per_zspage()
792 usedpc = (zspage_size - waste) * 100 / zspage_size; in get_pages_per_zspage()
803 static struct zspage *get_zspage(struct page *page) in get_zspage() argument
805 struct zspage *zspage = (struct zspage *)page_private(page); in get_zspage()
807 BUG_ON(zspage->magic != ZSPAGE_MAGIC); in get_zspage()
811 static struct page *get_next_page(struct page *page) in get_next_page() argument
813 struct zspage *zspage = get_zspage(page); in get_next_page()
818 return (struct page *)page->index; in get_next_page()
822 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
824 * @page: page object resides in zspage
827 static void obj_to_location(unsigned long obj, struct page **page, in obj_to_location() argument
831 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); in obj_to_location()
835 static void obj_to_page(unsigned long obj, struct page **page) in obj_to_page() argument
838 *page = pfn_to_page(obj >> OBJ_INDEX_BITS); in obj_to_page()
842 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
843 * @page: page object resides in zspage
846 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) in location_to_obj() argument
850 obj = page_to_pfn(page) << OBJ_INDEX_BITS; in location_to_obj()
862 static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle) in obj_allocated() argument
865 struct zspage *zspage = get_zspage(page); in obj_allocated()
868 VM_BUG_ON_PAGE(!is_first_page(page), page); in obj_allocated()
869 handle = page->index; in obj_allocated()
880 static void reset_page(struct page *page) in reset_page() argument
882 __ClearPageMovable(page); in reset_page()
883 ClearPagePrivate(page); in reset_page()
884 set_page_private(page, 0); in reset_page()
885 page_mapcount_reset(page); in reset_page()
886 page->index = 0; in reset_page()
891 struct page *cursor, *fail; in trylock_zspage()
913 struct page *page, *next; in __free_zspage() local
919 assert_spin_locked(&class->lock); in __free_zspage()
924 next = page = get_first_page(zspage); in __free_zspage()
926 VM_BUG_ON_PAGE(!PageLocked(page), page); in __free_zspage()
927 next = get_next_page(page); in __free_zspage()
928 reset_page(page); in __free_zspage()
929 unlock_page(page); in __free_zspage()
930 dec_zone_page_state(page, NR_ZSPAGES); in __free_zspage()
931 put_page(page); in __free_zspage()
932 page = next; in __free_zspage()
933 } while (page != NULL); in __free_zspage()
937 class_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
938 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
939 &pool->pages_allocated); in __free_zspage()
946 VM_BUG_ON(list_empty(&zspage->list)); in free_zspage()
950 * lock_page. The page locks trylock_zspage got will be released in free_zspage()
967 struct page *page = get_first_page(zspage); in init_zspage() local
969 while (page) { in init_zspage()
970 struct page *next_page; in init_zspage()
974 set_first_obj_offset(page, off); in init_zspage()
976 vaddr = kmap_atomic(page); in init_zspage()
979 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
980 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
981 link += class->size / sizeof(*link); in init_zspage()
986 * page, which must point to the first object on the next in init_zspage()
987 * page (if present) in init_zspage()
989 next_page = get_next_page(page); in init_zspage()
991 link->next = freeobj++ << OBJ_TAG_BITS; in init_zspage()
997 link->next = -1UL << OBJ_TAG_BITS; in init_zspage()
1000 page = next_page; in init_zspage()
1008 struct page *pages[]) in create_page_chain()
1011 struct page *page; in create_page_chain() local
1012 struct page *prev_page = NULL; in create_page_chain()
1013 int nr_pages = class->pages_per_zspage; in create_page_chain()
1017 * 1. all pages are linked together using page->index in create_page_chain()
1018 * 2. each sub-page point to zspage using page->private in create_page_chain()
1020 * we set PG_private to identify the first page (i.e. no other sub-page in create_page_chain()
1024 page = pages[i]; in create_page_chain()
1025 set_page_private(page, (unsigned long)zspage); in create_page_chain()
1026 page->index = 0; in create_page_chain()
1028 zspage->first_page = page; in create_page_chain()
1029 SetPagePrivate(page); in create_page_chain()
1030 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1031 class->pages_per_zspage == 1)) in create_page_chain()
1034 prev_page->index = (unsigned long)page; in create_page_chain()
1036 prev_page = page; in create_page_chain()
1041 * Allocate a zspage for the given size class
1048 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; in alloc_zspage()
1054 zspage->magic = ZSPAGE_MAGIC; in alloc_zspage()
1057 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1058 struct page *page; in alloc_zspage() local
1060 page = alloc_page(gfp); in alloc_zspage()
1061 if (!page) { in alloc_zspage()
1062 while (--i >= 0) { in alloc_zspage()
1070 inc_zone_page_state(page, NR_ZSPAGES); in alloc_zspage()
1071 pages[i] = page; in alloc_zspage()
1076 zspage->pool = pool; in alloc_zspage()
1086 for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) { in find_get_zspage()
1087 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1102 if (area->vm_buf) in __zs_cpu_up()
1104 area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); in __zs_cpu_up()
1105 if (!area->vm_buf) in __zs_cpu_up()
1106 return -ENOMEM; in __zs_cpu_up()
1112 kfree(area->vm_buf); in __zs_cpu_down()
1113 area->vm_buf = NULL; in __zs_cpu_down()
1117 struct page *pages[2], int off, int size) in __zs_map_object() argument
1121 char *buf = area->vm_buf; in __zs_map_object()
1123 /* disable page faults to match kmap_atomic() return conditions */ in __zs_map_object()
1127 if (area->vm_mm == ZS_MM_WO) in __zs_map_object()
1130 sizes[0] = PAGE_SIZE - off; in __zs_map_object()
1131 sizes[1] = size - sizes[0]; in __zs_map_object()
1133 /* copy object to per-cpu buffer */ in __zs_map_object()
1141 return area->vm_buf; in __zs_map_object()
1145 struct page *pages[2], int off, int size) in __zs_unmap_object() argument
1152 if (area->vm_mm == ZS_MM_RO) in __zs_unmap_object()
1155 buf = area->vm_buf; in __zs_unmap_object()
1157 size -= ZS_HANDLE_SIZE; in __zs_unmap_object()
1160 sizes[0] = PAGE_SIZE - off; in __zs_unmap_object()
1161 sizes[1] = size - sizes[0]; in __zs_unmap_object()
1163 /* copy per-cpu buffer to object */ in __zs_unmap_object()
1172 /* enable page faults to match kunmap_atomic() return conditions */ in __zs_unmap_object()
1196 if (prev->pages_per_zspage == pages_per_zspage && in can_merge()
1197 prev->objs_per_zspage == objs_per_zspage) in can_merge()
1205 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1210 return atomic_long_read(&pool->pages_allocated); in zs_get_total_pages()
1215 * zs_map_object - get address of allocated object from handle.
1227 * This function returns with preemption and page faults disabled.
1233 struct page *page; in zs_map_object() local
1239 struct page *pages[2]; in zs_map_object()
1243 * Because we use per-cpu mapping areas shared among the in zs_map_object()
1250 read_lock(&pool->migrate_lock); in zs_map_object()
1252 obj_to_location(obj, &page, &obj_idx); in zs_map_object()
1253 zspage = get_zspage(page); in zs_map_object()
1256 * migration cannot move any zpages in this zspage. Here, class->lock in zs_map_object()
1262 read_unlock(&pool->migrate_lock); in zs_map_object()
1265 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1269 area->vm_mm = mm; in zs_map_object()
1270 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1271 /* this object is contained entirely within a page */ in zs_map_object()
1272 area->vm_addr = kmap_atomic(page); in zs_map_object()
1273 ret = area->vm_addr + off; in zs_map_object()
1278 pages[0] = page; in zs_map_object()
1279 pages[1] = get_next_page(page); in zs_map_object()
1282 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1294 struct page *page; in zs_unmap_object() local
1302 obj_to_location(obj, &page, &obj_idx); in zs_unmap_object()
1303 zspage = get_zspage(page); in zs_unmap_object()
1305 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1308 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1309 kunmap_atomic(area->vm_addr); in zs_unmap_object()
1311 struct page *pages[2]; in zs_unmap_object()
1313 pages[0] = page; in zs_unmap_object()
1314 pages[1] = get_next_page(page); in zs_unmap_object()
1317 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1326 * zs_huge_class_size() - Returns the size (in bytes) of the first huge
1330 * The function returns the size of the first huge class - any object of equal
1331 * or bigger size will be stored in zspage consisting of a single physical
1332 * page.
1336 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1352 struct page *m_page; in obj_malloc()
1356 class = pool->size_class[zspage->class]; in obj_malloc()
1360 offset = obj * class->size; in obj_malloc()
1370 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); in obj_malloc()
1373 link->handle = handle; in obj_malloc()
1375 /* record handle to page->index */ in obj_malloc()
1376 zspage->first_page->index = handle; in obj_malloc()
1388 * zs_malloc - Allocate block of given size from pool.
1390 * @size: size of block to allocate
1395 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
1397 unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) in zs_malloc() argument
1404 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) in zs_malloc()
1405 return (unsigned long)ERR_PTR(-EINVAL); in zs_malloc()
1409 return (unsigned long)ERR_PTR(-ENOMEM); in zs_malloc()
1412 size += ZS_HANDLE_SIZE; in zs_malloc()
1413 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1415 /* class->lock effectively protects the zpage migration */ in zs_malloc()
1416 spin_lock(&class->lock); in zs_malloc()
1424 spin_unlock(&class->lock); in zs_malloc()
1429 spin_unlock(&class->lock); in zs_malloc()
1434 return (unsigned long)ERR_PTR(-ENOMEM); in zs_malloc()
1437 spin_lock(&class->lock); in zs_malloc()
1441 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1443 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1444 &pool->pages_allocated); in zs_malloc()
1445 class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1450 spin_unlock(&class->lock); in zs_malloc()
1460 struct page *f_page; in obj_free()
1474 link->next = get_freeobj(zspage) << OBJ_TAG_BITS; in obj_free()
1476 f_page->index = 0; in obj_free()
1479 mod_zspage_inuse(zspage, -1); in obj_free()
1485 struct page *f_page; in zs_free()
1494 * The pool->migrate_lock protects the race with zpage's migration in zs_free()
1495 * so it's safe to get the page from handle. in zs_free()
1497 read_lock(&pool->migrate_lock); in zs_free()
1502 spin_lock(&class->lock); in zs_free()
1503 read_unlock(&pool->migrate_lock); in zs_free()
1505 obj_free(class->size, obj); in zs_free()
1513 spin_unlock(&class->lock); in zs_free()
1521 struct page *s_page, *d_page; in zs_object_copy()
1525 int s_size, d_size, size; in zs_object_copy() local
1528 s_size = d_size = class->size; in zs_object_copy()
1533 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1534 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1536 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1537 s_size = PAGE_SIZE - s_off; in zs_object_copy()
1539 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1540 d_size = PAGE_SIZE - d_off; in zs_object_copy()
1546 size = min(s_size, d_size); in zs_object_copy()
1547 memcpy(d_addr + d_off, s_addr + s_off, size); in zs_object_copy()
1548 written += size; in zs_object_copy()
1550 if (written == class->size) in zs_object_copy()
1553 s_off += size; in zs_object_copy()
1554 s_size -= size; in zs_object_copy()
1555 d_off += size; in zs_object_copy()
1556 d_size -= size; in zs_object_copy()
1571 s_size = class->size - written; in zs_object_copy()
1579 d_size = class->size - written; in zs_object_copy()
1593 struct page *page, int *obj_idx) in find_alloced_obj() argument
1598 void *addr = kmap_atomic(page); in find_alloced_obj()
1600 offset = get_first_obj_offset(page); in find_alloced_obj()
1601 offset += class->size * index; in find_alloced_obj()
1604 if (obj_allocated(page, addr + offset, &handle)) in find_alloced_obj()
1607 offset += class->size; in find_alloced_obj()
1620 struct page *s_page;
1621 /* Destination page for migration which should be a first page
1623 struct page *d_page;
1634 struct page *s_page = cc->s_page; in migrate_zspage()
1635 struct page *d_page = cc->d_page; in migrate_zspage()
1636 int obj_idx = cc->obj_idx; in migrate_zspage()
1651 ret = -ENOMEM; in migrate_zspage()
1660 obj_free(class->size, used_obj); in migrate_zspage()
1664 cc->s_page = s_page; in migrate_zspage()
1665 cc->obj_idx = obj_idx; in migrate_zspage()
1682 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1694 * putback_zspage - add @zspage into right class's fullness list
1696 * @zspage: target page
1707 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1719 struct page *curr_page, *page; in lock_zspage() local
1724 * lock each page under migrate_read_lock(). Otherwise, the page we lock in lock_zspage()
1726 * the wrong page to unlock, so we must take a reference to the page in lock_zspage()
1731 page = get_first_page(zspage); in lock_zspage()
1732 if (trylock_page(page)) in lock_zspage()
1734 get_page(page); in lock_zspage()
1736 wait_on_page_locked(page); in lock_zspage()
1737 put_page(page); in lock_zspage()
1740 curr_page = page; in lock_zspage()
1741 while ((page = get_next_page(curr_page))) { in lock_zspage()
1742 if (trylock_page(page)) { in lock_zspage()
1743 curr_page = page; in lock_zspage()
1745 get_page(page); in lock_zspage()
1747 wait_on_page_locked(page); in lock_zspage()
1748 put_page(page); in lock_zspage()
1757 rwlock_init(&zspage->lock); in migrate_lock_init()
1760 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock) in migrate_read_lock()
1762 read_lock(&zspage->lock); in migrate_read_lock()
1765 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock) in migrate_read_unlock()
1767 read_unlock(&zspage->lock); in migrate_read_unlock()
1772 write_lock(&zspage->lock); in migrate_write_lock()
1777 write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING); in migrate_write_lock_nested()
1782 write_unlock(&zspage->lock); in migrate_write_unlock()
1785 /* Number of isolated subpage for *page migration* in this zspage */
1788 zspage->isolated++; in inc_zspage_isolation()
1793 VM_BUG_ON(zspage->isolated == 0); in dec_zspage_isolation()
1794 zspage->isolated--; in dec_zspage_isolation()
1800 struct page *newpage, struct page *oldpage) in replace_sub_page()
1802 struct page *page; in replace_sub_page() local
1803 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; in replace_sub_page()
1806 page = get_first_page(zspage); in replace_sub_page()
1808 if (page == oldpage) in replace_sub_page()
1811 pages[idx] = page; in replace_sub_page()
1813 } while ((page = get_next_page(page)) != NULL); in replace_sub_page()
1818 newpage->index = oldpage->index; in replace_sub_page()
1822 static bool zs_page_isolate(struct page *page, isolate_mode_t mode) in zs_page_isolate() argument
1827 * Page is locked so zspage couldn't be destroyed. For detail, look at in zs_page_isolate()
1830 VM_BUG_ON_PAGE(!PageMovable(page), page); in zs_page_isolate()
1831 VM_BUG_ON_PAGE(PageIsolated(page), page); in zs_page_isolate()
1833 zspage = get_zspage(page); in zs_page_isolate()
1841 static int zs_page_migrate(struct page *newpage, struct page *page, in zs_page_migrate() argument
1847 struct page *dummy; in zs_page_migrate()
1860 return -EINVAL; in zs_page_migrate()
1862 VM_BUG_ON_PAGE(!PageMovable(page), page); in zs_page_migrate()
1863 VM_BUG_ON_PAGE(!PageIsolated(page), page); in zs_page_migrate()
1865 /* The page is locked, so this pointer must remain valid */ in zs_page_migrate()
1866 zspage = get_zspage(page); in zs_page_migrate()
1867 pool = zspage->pool; in zs_page_migrate()
1873 write_lock(&pool->migrate_lock); in zs_page_migrate()
1879 spin_lock(&class->lock); in zs_page_migrate()
1883 offset = get_first_obj_offset(page); in zs_page_migrate()
1884 s_addr = kmap_atomic(page); in zs_page_migrate()
1894 addr += class->size) { in zs_page_migrate()
1895 if (obj_allocated(page, addr, &handle)) { in zs_page_migrate()
1906 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
1911 write_unlock(&pool->migrate_lock); in zs_page_migrate()
1912 spin_unlock(&class->lock); in zs_page_migrate()
1917 if (page_zone(newpage) != page_zone(page)) { in zs_page_migrate()
1918 dec_zone_page_state(page, NR_ZSPAGES); in zs_page_migrate()
1922 reset_page(page); in zs_page_migrate()
1923 put_page(page); in zs_page_migrate()
1928 static void zs_page_putback(struct page *page) in zs_page_putback() argument
1932 VM_BUG_ON_PAGE(!PageMovable(page), page); in zs_page_putback()
1933 VM_BUG_ON_PAGE(!PageIsolated(page), page); in zs_page_putback()
1935 zspage = get_zspage(page); in zs_page_putback()
1963 class = pool->size_class[i]; in async_free_zspage()
1964 if (class->index != i) in async_free_zspage()
1967 spin_lock(&class->lock); in async_free_zspage()
1968 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
1969 spin_unlock(&class->lock); in async_free_zspage()
1973 list_del(&zspage->list); in async_free_zspage()
1978 class = pool->size_class[class_idx]; in async_free_zspage()
1979 spin_lock(&class->lock); in async_free_zspage()
1981 spin_unlock(&class->lock); in async_free_zspage()
1987 schedule_work(&pool->free_work); in kick_deferred_free()
1992 flush_work(&pool->free_work); in zs_flush_migration()
1997 INIT_WORK(&pool->free_work, async_free_zspage); in init_deferred_free()
2002 struct page *page = get_first_page(zspage); in SetZsPageMovable() local
2005 WARN_ON(!trylock_page(page)); in SetZsPageMovable()
2006 __SetPageMovable(page, &zsmalloc_mops); in SetZsPageMovable()
2007 unlock_page(page); in SetZsPageMovable()
2008 } while ((page = get_next_page(page)) != NULL); in SetZsPageMovable()
2028 obj_wasted = obj_allocated - obj_used; in zs_can_compact()
2029 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2031 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2043 write_lock(&pool->migrate_lock); in __zs_compact()
2045 spin_lock(&class->lock); in __zs_compact()
2070 if (rwlock_is_contended(&pool->migrate_lock)) in __zs_compact()
2084 pages_freed += class->pages_per_zspage; in __zs_compact()
2087 spin_unlock(&class->lock); in __zs_compact()
2088 write_unlock(&pool->migrate_lock); in __zs_compact()
2090 write_lock(&pool->migrate_lock); in __zs_compact()
2091 spin_lock(&class->lock); in __zs_compact()
2099 spin_unlock(&class->lock); in __zs_compact()
2100 write_unlock(&pool->migrate_lock); in __zs_compact()
2111 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_compact()
2112 class = pool->size_class[i]; in zs_compact()
2113 if (class->index != i) in zs_compact()
2117 atomic_long_add(pages_freed, &pool->stats.pages_compacted); in zs_compact()
2125 memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); in zs_pool_stats()
2155 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_shrinker_count()
2156 class = pool->size_class[i]; in zs_shrinker_count()
2157 if (class->index != i) in zs_shrinker_count()
2168 unregister_shrinker(&pool->shrinker); in zs_unregister_shrinker()
2173 pool->shrinker.scan_objects = zs_shrinker_scan; in zs_register_shrinker()
2174 pool->shrinker.count_objects = zs_shrinker_count; in zs_register_shrinker()
2175 pool->shrinker.batch = 0; in zs_register_shrinker()
2176 pool->shrinker.seeks = DEFAULT_SEEKS; in zs_register_shrinker()
2178 return register_shrinker(&pool->shrinker, "mm-zspool:%s", in zs_register_shrinker()
2179 pool->name); in zs_register_shrinker()
2183 * zs_create_pool - Creates an allocation pool to work from.
2203 rwlock_init(&pool->migrate_lock); in zs_create_pool()
2205 pool->name = kstrdup(name, GFP_KERNEL); in zs_create_pool()
2206 if (!pool->name) in zs_create_pool()
2213 * Iterate reversely, because, size of size_class that we want to use in zs_create_pool()
2214 * for merging should be larger or equal to current size. in zs_create_pool()
2216 for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) { in zs_create_pool()
2217 int size; in zs_create_pool() local
2223 size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; in zs_create_pool()
2224 if (size > ZS_MAX_ALLOC_SIZE) in zs_create_pool()
2225 size = ZS_MAX_ALLOC_SIZE; in zs_create_pool()
2226 pages_per_zspage = get_pages_per_zspage(size); in zs_create_pool()
2227 objs_per_zspage = pages_per_zspage * PAGE_SIZE / size; in zs_create_pool()
2231 * so huge_class_size holds the size of the first huge in zs_create_pool()
2237 huge_class_size = size; in zs_create_pool()
2241 * unconditionally adds handle size before it performs in zs_create_pool()
2242 * size class search - so object may be smaller than in zs_create_pool()
2243 * huge class size, yet it still can end up in the huge in zs_create_pool()
2247 huge_class_size -= (ZS_HANDLE_SIZE - 1); in zs_create_pool()
2252 * as alloc/free for that size. Although it is natural that we in zs_create_pool()
2253 * have one size_class for each size, there is a chance that we in zs_create_pool()
2261 pool->size_class[i] = prev_class; in zs_create_pool()
2270 class->size = size; in zs_create_pool()
2271 class->index = i; in zs_create_pool()
2272 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2273 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2274 spin_lock_init(&class->lock); in zs_create_pool()
2275 pool->size_class[i] = class; in zs_create_pool()
2278 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2312 struct size_class *class = pool->size_class[i]; in zs_destroy_pool()
2317 if (class->index != i) in zs_destroy_pool()
2321 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2322 pr_info("Freeing non-empty class with size %db, fullness group %d\n", in zs_destroy_pool()
2323 class->size, fg); in zs_destroy_pool()
2330 kfree(pool->name); in zs_destroy_pool()