Lines Matching full:class

143  *    determined). NOTE: all those class sizes must be set as multiple of
204 * Size of objects stored in this class. Must be multiple
282 unsigned int class:CLASS_BITS + 1; member
470 /* Protected by class->lock */
517 *class_idx = zspage->class; in get_zspage_mapping()
524 zspage->class = class_idx; in set_zspage_mapping()
530 * class maintains a list of zspages where each zspage is divided
533 * size class which has chunk size big enough to hold the give size.
547 static inline void zs_stat_inc(struct size_class *class, in zs_stat_inc() argument
550 class->stats.objs[type] += cnt; in zs_stat_inc()
554 static inline void zs_stat_dec(struct size_class *class, in zs_stat_dec() argument
557 class->stats.objs[type] -= cnt; in zs_stat_dec()
561 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
564 return class->stats.objs[type]; in zs_stat_get()
584 static unsigned long zs_can_compact(struct size_class *class);
590 struct size_class *class; in zs_stats_size_show() local
599 "class", "size", "almost_full", "almost_empty", in zs_stats_size_show()
604 class = pool->size_class[i]; in zs_stats_size_show()
606 if (class->index != i) in zs_stats_size_show()
609 spin_lock(&class->lock); in zs_stats_size_show()
610 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); in zs_stats_size_show()
611 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); in zs_stats_size_show()
612 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_stats_size_show()
613 obj_used = zs_stat_get(class, OBJ_USED); in zs_stats_size_show()
614 freeable = zs_can_compact(class); in zs_stats_size_show()
615 spin_unlock(&class->lock); in zs_stats_size_show()
617 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
619 class->pages_per_zspage; in zs_stats_size_show()
623 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
625 class->pages_per_zspage, freeable); in zs_stats_size_show()
683 * For each size class, zspages are divided into different groups
689 static enum fullness_group get_fullness_group(struct size_class *class, in get_fullness_group() argument
696 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
711 * Each size class maintains various freelists and zspages are assigned
714 * identified by <class, fullness_group>.
716 static void insert_zspage(struct size_class *class, in insert_zspage() argument
722 zs_stat_inc(class, fullness, 1); in insert_zspage()
723 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
735 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
740 * by <class, fullness_group>.
742 static void remove_zspage(struct size_class *class, in remove_zspage() argument
746 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
750 zs_stat_dec(class, fullness, 1); in remove_zspage()
754 * Each size class maintains zspages in different fullness groups depending
762 static enum fullness_group fix_fullness_group(struct size_class *class, in fix_fullness_group() argument
769 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
774 remove_zspage(class, zspage, currfg); in fix_fullness_group()
775 insert_zspage(class, zspage, newfg); in fix_fullness_group()
786 * to form a zspage for each size class. This is important
793 * For example, for size class of 3/8 * PAGE_SIZE, we should
931 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
940 assert_spin_locked(&class->lock); in __free_zspage()
958 zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
959 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
963 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
974 remove_zspage(class, zspage, ZS_EMPTY); in free_zspage()
975 __free_zspage(pool, class, zspage); in free_zspage()
979 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
995 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
997 link += class->size / sizeof(*link); in init_zspage()
1023 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
1029 int nr_pages = class->pages_per_zspage; in create_page_chain()
1046 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1047 class->pages_per_zspage == 1)) in create_page_chain()
1057 * Allocate a zspage for the given size class
1060 struct size_class *class, in alloc_zspage() argument
1074 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1091 create_page_chain(class, zspage, pages); in alloc_zspage()
1092 init_zspage(class, zspage); in alloc_zspage()
1097 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1103 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1219 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1221 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1255 struct size_class *class; in zs_map_object() local
1278 class = pool->size_class[class_idx]; in zs_map_object()
1279 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1283 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1295 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1313 struct size_class *class; in zs_unmap_object() local
1320 class = pool->size_class[class_idx]; in zs_unmap_object()
1321 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1324 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1333 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1347 * The function returns the size of the first huge class - any object of equal
1361 static unsigned long obj_malloc(struct size_class *class, in obj_malloc() argument
1375 offset = obj * class->size; in obj_malloc()
1395 zs_stat_inc(class, OBJ_USED, 1); in obj_malloc()
1416 struct size_class *class; in zs_malloc() local
1429 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1431 spin_lock(&class->lock); in zs_malloc()
1432 zspage = find_get_zspage(class); in zs_malloc()
1434 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1436 fix_fullness_group(class, zspage); in zs_malloc()
1438 spin_unlock(&class->lock); in zs_malloc()
1443 spin_unlock(&class->lock); in zs_malloc()
1445 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1451 spin_lock(&class->lock); in zs_malloc()
1452 obj = obj_malloc(class, zspage, handle); in zs_malloc()
1453 newfg = get_fullness_group(class, zspage); in zs_malloc()
1454 insert_zspage(class, zspage, newfg); in zs_malloc()
1455 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1457 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1459 zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1463 spin_unlock(&class->lock); in zs_malloc()
1469 static void obj_free(struct size_class *class, unsigned long obj) in obj_free() argument
1480 f_offset = (class->size * f_objidx) & ~PAGE_MASK; in obj_free()
1491 zs_stat_dec(class, OBJ_USED, 1); in obj_free()
1501 struct size_class *class; in zs_free() local
1516 class = pool->size_class[class_idx]; in zs_free()
1518 spin_lock(&class->lock); in zs_free()
1519 obj_free(class, obj); in zs_free()
1520 fullness = fix_fullness_group(class, zspage); in zs_free()
1530 free_zspage(pool, class, zspage); in zs_free()
1533 spin_unlock(&class->lock); in zs_free()
1539 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1549 s_size = d_size = class->size; in zs_object_copy()
1554 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1555 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1557 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1560 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1571 if (written == class->size) in zs_object_copy()
1585 s_size = class->size - written; in zs_object_copy()
1593 d_size = class->size - written; in zs_object_copy()
1606 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1616 offset += class->size * index; in find_alloced_obj()
1627 offset += class->size; in find_alloced_obj()
1649 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, in migrate_zspage() argument
1660 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1670 if (zspage_full(class, get_zspage(d_page))) { in migrate_zspage()
1677 free_obj = obj_malloc(class, get_zspage(d_page), handle); in migrate_zspage()
1678 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1689 obj_free(class, used_obj); in migrate_zspage()
1699 static struct zspage *isolate_zspage(struct size_class *class, bool source) in isolate_zspage() argument
1711 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1715 remove_zspage(class, zspage, fg[i]); in isolate_zspage()
1724 * putback_zspage - add @zspage into right class's fullness list
1725 * @class: destination class
1730 static enum fullness_group putback_zspage(struct size_class *class, in putback_zspage() argument
1737 fullness = get_fullness_group(class, zspage); in putback_zspage()
1738 insert_zspage(class, zspage, fullness); in putback_zspage()
1739 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1822 struct size_class *class, in putback_zspage_deferred() argument
1827 fg = putback_zspage(class, zspage); in putback_zspage_deferred()
1839 * checks the isolated count under &class->lock after enqueuing in zs_pool_dec_isolated()
1846 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1862 create_page_chain(class, zspage, pages); in replace_sub_page()
1872 struct size_class *class; in zs_page_isolate() local
1888 * Without class lock, fullness could be stale while class_idx is okay in zs_page_isolate()
1890 * fullness again under class lock. in zs_page_isolate()
1895 class = pool->size_class[class_idx]; in zs_page_isolate()
1897 spin_lock(&class->lock); in zs_page_isolate()
1899 spin_unlock(&class->lock); in zs_page_isolate()
1905 spin_unlock(&class->lock); in zs_page_isolate()
1916 remove_zspage(class, zspage, fullness); in zs_page_isolate()
1920 spin_unlock(&class->lock); in zs_page_isolate()
1929 struct size_class *class; in zs_page_migrate() local
1958 class = pool->size_class[class_idx]; in zs_page_migrate()
1961 spin_lock(&class->lock); in zs_page_migrate()
1979 pos += class->size; in zs_page_migrate()
1990 addr += class->size) { in zs_page_migrate()
2006 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
2022 putback_zspage_deferred(pool, class, zspage); in zs_page_migrate()
2038 addr += class->size) { in zs_page_migrate()
2048 spin_unlock(&class->lock); in zs_page_migrate()
2057 struct size_class *class; in zs_page_putback() local
2070 class = pool->size_class[class_idx]; in zs_page_putback()
2072 spin_lock(&class->lock); in zs_page_putback()
2079 putback_zspage_deferred(pool, class, zspage); in zs_page_putback()
2082 spin_unlock(&class->lock); in zs_page_putback()
2145 struct size_class *class; in async_free_zspage() local
2154 class = pool->size_class[i]; in async_free_zspage()
2155 if (class->index != i) in async_free_zspage()
2158 spin_lock(&class->lock); in async_free_zspage()
2159 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
2160 spin_unlock(&class->lock); in async_free_zspage()
2170 class = pool->size_class[class_idx]; in async_free_zspage()
2171 spin_lock(&class->lock); in async_free_zspage()
2173 spin_unlock(&class->lock); in async_free_zspage()
2204 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
2207 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_can_compact()
2208 unsigned long obj_used = zs_stat_get(class, OBJ_USED); in zs_can_compact()
2214 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2216 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2219 static void __zs_compact(struct zs_pool *pool, struct size_class *class) in __zs_compact() argument
2225 spin_lock(&class->lock); in __zs_compact()
2226 while ((src_zspage = isolate_zspage(class, true))) { in __zs_compact()
2228 if (!zs_can_compact(class)) in __zs_compact()
2234 while ((dst_zspage = isolate_zspage(class, false))) { in __zs_compact()
2240 if (!migrate_zspage(pool, class, &cc)) in __zs_compact()
2243 putback_zspage(class, dst_zspage); in __zs_compact()
2250 putback_zspage(class, dst_zspage); in __zs_compact()
2251 if (putback_zspage(class, src_zspage) == ZS_EMPTY) { in __zs_compact()
2252 free_zspage(pool, class, src_zspage); in __zs_compact()
2253 pool->stats.pages_compacted += class->pages_per_zspage; in __zs_compact()
2255 spin_unlock(&class->lock); in __zs_compact()
2257 spin_lock(&class->lock); in __zs_compact()
2261 putback_zspage(class, src_zspage); in __zs_compact()
2263 spin_unlock(&class->lock); in __zs_compact()
2269 struct size_class *class; in zs_compact() local
2272 class = pool->size_class[i]; in zs_compact()
2273 if (!class) in zs_compact()
2275 if (class->index != i) in zs_compact()
2277 __zs_compact(pool, class); in zs_compact()
2312 struct size_class *class; in zs_shrinker_count() local
2318 class = pool->size_class[i]; in zs_shrinker_count()
2319 if (!class) in zs_shrinker_count()
2321 if (class->index != i) in zs_shrinker_count()
2324 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2386 struct size_class *class; in zs_create_pool() local
2398 * class. Any object bigger than or equal to that will in zs_create_pool()
2399 * endup in the huge class. in zs_create_pool()
2408 * size class search - so object may be smaller than in zs_create_pool()
2409 * huge class size, yet it still can end up in the huge in zs_create_pool()
2410 * class because it grows by ZS_HANDLE_SIZE extra bytes in zs_create_pool()
2411 * right before class lookup. in zs_create_pool()
2432 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2433 if (!class) in zs_create_pool()
2436 class->size = size; in zs_create_pool()
2437 class->index = i; in zs_create_pool()
2438 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2439 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2440 spin_lock_init(&class->lock); in zs_create_pool()
2441 pool->size_class[i] = class; in zs_create_pool()
2444 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2446 prev_class = class; in zs_create_pool()
2481 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2483 if (!class) in zs_destroy_pool()
2486 if (class->index != i) in zs_destroy_pool()
2490 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2491 pr_info("Freeing non-empty class with size %db, fullness group %d\n", in zs_destroy_pool()
2492 class->size, fg); in zs_destroy_pool()
2495 kfree(class); in zs_destroy_pool()