Lines Matching full:class

143  *    determined). NOTE: all those class sizes must be set as multiple of
204 * Size of objects stored in this class. Must be multiple
282 unsigned int class:CLASS_BITS + 1; member
470 /* Protected by class->lock */
517 *class_idx = zspage->class; in get_zspage_mapping()
524 zspage->class = class_idx; in set_zspage_mapping()
530 * class maintains a list of zspages where each zspage is divided
533 * size class which has chunk size big enough to hold the given size.
547 static inline void zs_stat_inc(struct size_class *class, in zs_stat_inc() argument
550 class->stats.objs[type] += cnt; in zs_stat_inc()
554 static inline void zs_stat_dec(struct size_class *class, in zs_stat_dec() argument
557 class->stats.objs[type] -= cnt; in zs_stat_dec()
561 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
564 return class->stats.objs[type]; in zs_stat_get()
584 static unsigned long zs_can_compact(struct size_class *class);
590 struct size_class *class; in zs_stats_size_show() local
599 "class", "size", "almost_full", "almost_empty", in zs_stats_size_show()
604 class = pool->size_class[i]; in zs_stats_size_show()
606 if (class->index != i) in zs_stats_size_show()
609 spin_lock(&class->lock); in zs_stats_size_show()
610 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); in zs_stats_size_show()
611 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); in zs_stats_size_show()
612 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_stats_size_show()
613 obj_used = zs_stat_get(class, OBJ_USED); in zs_stats_size_show()
614 freeable = zs_can_compact(class); in zs_stats_size_show()
615 spin_unlock(&class->lock); in zs_stats_size_show()
617 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
619 class->pages_per_zspage; in zs_stats_size_show()
623 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
625 class->pages_per_zspage, freeable); in zs_stats_size_show()
683 * For each size class, zspages are divided into different groups
689 static enum fullness_group get_fullness_group(struct size_class *class, in get_fullness_group() argument
696 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
711 * Each size class maintains various freelists and zspages are assigned
714 * identified by <class, fullness_group>.
716 static void insert_zspage(struct size_class *class, in insert_zspage() argument
722 zs_stat_inc(class, fullness, 1); in insert_zspage()
723 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
732 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
737 * by <class, fullness_group>.
739 static void remove_zspage(struct size_class *class, in remove_zspage() argument
743 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
747 zs_stat_dec(class, fullness, 1); in remove_zspage()
751 * Each size class maintains zspages in different fullness groups depending
759 static enum fullness_group fix_fullness_group(struct size_class *class, in fix_fullness_group() argument
766 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
771 remove_zspage(class, zspage, currfg); in fix_fullness_group()
772 insert_zspage(class, zspage, newfg); in fix_fullness_group()
783 * to form a zspage for each size class. This is important
790 * For example, for size class of 3/8 * PAGE_SIZE, we should
928 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
937 assert_spin_locked(&class->lock); in __free_zspage()
955 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
956 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
960 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
971 remove_zspage(class, zspage, ZS_EMPTY); in free_zspage()
972 __free_zspage(pool, class, zspage); in free_zspage()
976 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
992 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
994 link += class->size / sizeof(*link); in init_zspage()
1020 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
1026 int nr_pages = class->pages_per_zspage; in create_page_chain()
1043 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1044 class->pages_per_zspage == 1)) in create_page_chain()
1054 * Allocate a zspage for the given size class
1057 struct size_class *class, in alloc_zspage() argument
1070 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1087 create_page_chain(class, zspage, pages); in alloc_zspage()
1088 init_zspage(class, zspage); in alloc_zspage()
1093 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1099 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1215 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1217 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1251 struct size_class *class; in zs_map_object() local
1274 class = pool->size_class[class_idx]; in zs_map_object()
1275 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1279 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1291 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1309 struct size_class *class; in zs_unmap_object() local
1316 class = pool->size_class[class_idx]; in zs_unmap_object()
1317 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1320 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1329 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1343 * The function returns the size of the first huge class - any object of equal
1357 static unsigned long obj_malloc(struct size_class *class, in obj_malloc() argument
1371 offset = obj * class->size; in obj_malloc()
1391 zs_stat_inc(class, OBJ_USED, 1); in obj_malloc()
1412 struct size_class *class; in zs_malloc() local
1425 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1427 spin_lock(&class->lock); in zs_malloc()
1428 zspage = find_get_zspage(class); in zs_malloc()
1430 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1432 fix_fullness_group(class, zspage); in zs_malloc()
1434 spin_unlock(&class->lock); in zs_malloc()
1439 spin_unlock(&class->lock); in zs_malloc()
1441 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1447 spin_lock(&class->lock); in zs_malloc()
1448 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1449 newfg = get_fullness_group(class, zspage); in zs_malloc()
1450 insert_zspage(class, zspage, newfg); in zs_malloc()
1451 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1453 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1455 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1459 spin_unlock(&class->lock); in zs_malloc()
1465 static void obj_free(struct size_class *class, unsigned long obj) in obj_free() argument
1475 f_offset = (class->size * f_objidx) & ~PAGE_MASK; in obj_free()
1486 zs_stat_dec(class, OBJ_USED, 1); in obj_free()
1496 struct size_class *class; in zs_free() local
1511 class = pool->size_class[class_idx]; in zs_free()
1513 spin_lock(&class->lock); in zs_free()
1514 obj_free(class, obj); in zs_free()
1515 fullness = fix_fullness_group(class, zspage); in zs_free()
1525 free_zspage(pool, class, zspage); in zs_free()
1528 spin_unlock(&class->lock); in zs_free()
1534 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1544 s_size = d_size = class->size; in zs_object_copy()
1549 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1550 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1552 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1555 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1566 if (written == class->size) in zs_object_copy()
1580 s_size = class->size - written; in zs_object_copy()
1588 d_size = class->size - written; in zs_object_copy()
1601 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1611 offset += class->size * index; in find_alloced_obj()
1622 offset += class->size; in find_alloced_obj()
1644 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, in migrate_zspage() argument
1655 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1665 if (zspage_full(class, get_zspage(d_page))) { in migrate_zspage()
1672 free_obj = obj_malloc(class, get_zspage(d_page), handle); in migrate_zspage()
1673 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1684 obj_free(class, used_obj); in migrate_zspage()
1694 static struct zspage *isolate_zspage(struct size_class *class, bool source) in isolate_zspage() argument
1706 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1710 remove_zspage(class, zspage, fg[i]); in isolate_zspage()
1719 * putback_zspage - add @zspage into right class's fullness list
1720 * @class: destination class
1725 static enum fullness_group putback_zspage(struct size_class *class, in putback_zspage() argument
1732 fullness = get_fullness_group(class, zspage); in putback_zspage()
1733 insert_zspage(class, zspage, fullness); in putback_zspage()
1734 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1817 struct size_class *class, in putback_zspage_deferred() argument
1822 fg = putback_zspage(class, zspage); in putback_zspage_deferred()
1834 * checks the isolated count under &class->lock after enqueuing in zs_pool_dec_isolated()
1841 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1857 create_page_chain(class, zspage, pages); in replace_sub_page()
1867 struct size_class *class; in zs_page_isolate() local
1883 * Without class lock, fullness could be stale while class_idx is okay in zs_page_isolate()
1885 * fullness again under class lock. in zs_page_isolate()
1890 class = pool->size_class[class_idx]; in zs_page_isolate()
1892 spin_lock(&class->lock); in zs_page_isolate()
1894 spin_unlock(&class->lock); in zs_page_isolate()
1900 spin_unlock(&class->lock); in zs_page_isolate()
1911 remove_zspage(class, zspage, fullness); in zs_page_isolate()
1915 spin_unlock(&class->lock); in zs_page_isolate()
1924 struct size_class *class; in zs_page_migrate() local
1953 class = pool->size_class[class_idx]; in zs_page_migrate()
1956 spin_lock(&class->lock); in zs_page_migrate()
1974 pos += class->size; in zs_page_migrate()
1985 addr += class->size) { in zs_page_migrate()
2000 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
2016 putback_zspage_deferred(pool, class, zspage); in zs_page_migrate()
2032 addr += class->size) { in zs_page_migrate()
2041 spin_unlock(&class->lock); in zs_page_migrate()
2050 struct size_class *class; in zs_page_putback() local
2063 class = pool->size_class[class_idx]; in zs_page_putback()
2065 spin_lock(&class->lock); in zs_page_putback()
2072 putback_zspage_deferred(pool, class, zspage); in zs_page_putback()
2075 spin_unlock(&class->lock); in zs_page_putback()
2138 struct size_class *class; in async_free_zspage() local
2147 class = pool->size_class[i]; in async_free_zspage()
2148 if (class->index != i) in async_free_zspage()
2151 spin_lock(&class->lock); in async_free_zspage()
2152 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
2153 spin_unlock(&class->lock); in async_free_zspage()
2163 class = pool->size_class[class_idx]; in async_free_zspage()
2164 spin_lock(&class->lock); in async_free_zspage()
2165 __free_zspage(pool, class, zspage); in async_free_zspage()
2166 spin_unlock(&class->lock); in async_free_zspage()
2197 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
2200 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_can_compact()
2201 unsigned long obj_used = zs_stat_get(class, OBJ_USED); in zs_can_compact()
2207 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2209 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2213 struct size_class *class) in __zs_compact() argument
2220 spin_lock(&class->lock); in __zs_compact()
2221 while ((src_zspage = isolate_zspage(class, true))) { in __zs_compact()
2223 if (!zs_can_compact(class)) in __zs_compact()
2229 while ((dst_zspage = isolate_zspage(class, false))) { in __zs_compact()
2235 if (!migrate_zspage(pool, class, &cc)) in __zs_compact()
2238 putback_zspage(class, dst_zspage); in __zs_compact()
2245 putback_zspage(class, dst_zspage); in __zs_compact()
2246 if (putback_zspage(class, src_zspage) == ZS_EMPTY) { in __zs_compact()
2247 free_zspage(pool, class, src_zspage); in __zs_compact()
2248 pages_freed += class->pages_per_zspage; in __zs_compact()
2250 spin_unlock(&class->lock); in __zs_compact()
2252 spin_lock(&class->lock); in __zs_compact()
2256 putback_zspage(class, src_zspage); in __zs_compact()
2258 spin_unlock(&class->lock); in __zs_compact()
2266 struct size_class *class; in zs_compact() local
2270 class = pool->size_class[i]; in zs_compact()
2271 if (!class) in zs_compact()
2273 if (class->index != i) in zs_compact()
2275 pages_freed += __zs_compact(pool, class); in zs_compact()
2310 struct size_class *class; in zs_shrinker_count() local
2316 class = pool->size_class[i]; in zs_shrinker_count()
2317 if (!class) in zs_shrinker_count()
2319 if (class->index != i) in zs_shrinker_count()
2322 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2384 struct size_class *class; in zs_create_pool() local
2396 * class. Any object bigger than or equal to that will in zs_create_pool()
2397 * endup in the huge class. in zs_create_pool()
2406 * size class search - so object may be smaller than in zs_create_pool()
2407 * huge class size, yet it still can end up in the huge in zs_create_pool()
2408 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2409 * right before class lookup. in zs_create_pool()
2430 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2431 if (!class) in zs_create_pool()
2434 class->size = size; in zs_create_pool()
2435 class->index = i; in zs_create_pool()
2436 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2437 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2438 spin_lock_init(&class->lock); in zs_create_pool()
2439 pool->size_class[i] = class; in zs_create_pool()
2442 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2444 prev_class = class; in zs_create_pool()
2479 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2481 if (!class) in zs_destroy_pool()
2484 if (class->index != i) in zs_destroy_pool()
2488 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2489 pr_info("Freeing non-empty class with size %db, fullness group %d\n", in zs_destroy_pool()
2490 class->size, fg); in zs_destroy_pool()
2493 kfree(class); in zs_destroy_pool()