Lines Matching refs:s

205 static inline bool kmem_cache_debug(struct kmem_cache *s)  in kmem_cache_debug()  argument
207 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); in kmem_cache_debug()
210 static inline bool slub_debug_orig_size(struct kmem_cache *s) in slub_debug_orig_size() argument
212 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && in slub_debug_orig_size()
213 (s->flags & SLAB_KMALLOC)); in slub_debug_orig_size()
216 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
218 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) in fixup_red_left()
219 p += s->red_left_pad; in fixup_red_left()
224 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
227 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
305 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
306 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
313 static inline void debugfs_slab_add(struct kmem_cache *s) { } in debugfs_slab_add() argument
316 static inline void stat(const struct kmem_cache *s, enum stat_item si) in stat() argument
323 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
349 static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, in freelist_ptr() argument
363 return (void *)((unsigned long)ptr ^ s->random ^ in freelist_ptr()
371 static inline void *freelist_dereference(const struct kmem_cache *s, in freelist_dereference() argument
374 return freelist_ptr(s, (void *)*(unsigned long *)(ptr_addr), in freelist_dereference()
378 static inline void *get_freepointer(struct kmem_cache *s, void *object) in get_freepointer() argument
381 return freelist_dereference(s, object + s->offset); in get_freepointer()
384 static void prefetch_freepointer(const struct kmem_cache *s, void *object) in prefetch_freepointer() argument
386 prefetchw(object + s->offset); in prefetch_freepointer()
400 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) in get_freepointer_safe() argument
406 return get_freepointer(s, object); in get_freepointer_safe()
409 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
411 return freelist_ptr(s, p, freepointer_addr); in get_freepointer_safe()
414 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) in set_freepointer() argument
416 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
423 *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); in set_freepointer()
458 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) in slub_set_cpu_partial() argument
462 s->cpu_partial = nr_objects; in slub_set_cpu_partial()
470 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); in slub_set_cpu_partial()
471 s->cpu_partial_slabs = nr_slabs; in slub_set_cpu_partial()
475 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) in slub_set_cpu_partial() argument
506 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, in __cmpxchg_double_slab() argument
515 if (s->flags & __CMPXCHG_DOUBLE) { in __cmpxchg_double_slab()
535 stat(s, CMPXCHG_DOUBLE_FAIL); in __cmpxchg_double_slab()
538 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __cmpxchg_double_slab()
544 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, in cmpxchg_double_slab() argument
551 if (s->flags & __CMPXCHG_DOUBLE) { in cmpxchg_double_slab()
576 stat(s, CMPXCHG_DOUBLE_FAIL); in cmpxchg_double_slab()
579 pr_info("%s %s: cmpxchg double redo ", n, s->name); in cmpxchg_double_slab()
589 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, in __fill_map() argument
597 for (p = slab->freelist; p; p = get_freepointer(s, p)) in __fill_map()
598 set_bit(__obj_to_index(s, addr, p), obj_map); in __fill_map()
621 static inline unsigned int size_from_object(struct kmem_cache *s) in size_from_object() argument
623 if (s->flags & SLAB_RED_ZONE) in size_from_object()
624 return s->size - s->red_left_pad; in size_from_object()
626 return s->size; in size_from_object()
629 static inline void *restore_red_left(struct kmem_cache *s, void *p) in restore_red_left() argument
631 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
632 p -= s->red_left_pad; in restore_red_left()
670 static inline int check_valid_pointer(struct kmem_cache *s, in check_valid_pointer() argument
680 object = restore_red_left(s, object); in check_valid_pointer()
681 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer()
682 (object - base) % s->size) { in check_valid_pointer()
701 static inline bool freeptr_outside_object(struct kmem_cache *s) in freeptr_outside_object() argument
703 return s->offset >= s->inuse; in freeptr_outside_object()
710 static inline unsigned int get_info_end(struct kmem_cache *s) in get_info_end() argument
712 if (freeptr_outside_object(s)) in get_info_end()
713 return s->inuse + sizeof(void *); in get_info_end()
715 return s->inuse; in get_info_end()
718 static struct track *get_track(struct kmem_cache *s, void *object, in get_track() argument
723 p = object + get_info_end(s); in get_track()
747 static void set_track_update(struct kmem_cache *s, void *object, in set_track_update() argument
751 struct track *p = get_track(s, object, alloc); in set_track_update()
762 static __always_inline void set_track(struct kmem_cache *s, void *object, in set_track() argument
767 set_track_update(s, object, alloc, addr, handle); in set_track()
770 static void init_tracking(struct kmem_cache *s, void *object) in init_tracking() argument
774 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
777 p = get_track(s, object, TRACK_ALLOC); in init_tracking()
781 static void print_track(const char *s, struct track *t, unsigned long pr_time) in print_track() argument
789 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
799 void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument
802 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
805 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); in print_tracking()
806 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); in print_tracking()
824 static inline void set_orig_size(struct kmem_cache *s, in set_orig_size() argument
829 if (!slub_debug_orig_size(s)) in set_orig_size()
832 p += get_info_end(s); in set_orig_size()
838 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) in get_orig_size() argument
842 if (!slub_debug_orig_size(s)) in get_orig_size()
843 return s->object_size; in get_orig_size()
845 p += get_info_end(s); in get_orig_size()
851 static void slab_bug(struct kmem_cache *s, char *fmt, ...) in slab_bug() argument
860 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
866 static void slab_fix(struct kmem_cache *s, char *fmt, ...) in slab_fix() argument
877 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
881 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) in print_trailer() argument
886 print_tracking(s, p); in print_trailer()
891 p, p - addr, get_freepointer(s, p)); in print_trailer()
893 if (s->flags & SLAB_RED_ZONE) in print_trailer()
894 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
895 s->red_left_pad); in print_trailer()
900 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
901 if (s->flags & SLAB_RED_ZONE) in print_trailer()
902 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
903 s->inuse - s->object_size); in print_trailer()
905 off = get_info_end(s); in print_trailer()
907 if (s->flags & SLAB_STORE_USER) in print_trailer()
910 if (slub_debug_orig_size(s)) in print_trailer()
913 off += kasan_metadata_size(s); in print_trailer()
915 if (off != size_from_object(s)) in print_trailer()
918 size_from_object(s) - off); in print_trailer()
923 static void object_err(struct kmem_cache *s, struct slab *slab, in object_err() argument
929 slab_bug(s, "%s", reason); in object_err()
930 print_trailer(s, slab, object); in object_err()
934 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
937 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && in freelist_corrupted()
938 !check_valid_pointer(s, slab, nextfree) && freelist) { in freelist_corrupted()
939 object_err(s, slab, *freelist, "Freechain corrupt"); in freelist_corrupted()
941 slab_fix(s, "Isolate corrupted freechain"); in freelist_corrupted()
948 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, in slab_err() argument
960 slab_bug(s, "%s", buf); in slab_err()
966 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument
970 if (s->flags & SLAB_RED_ZONE) in init_object()
971 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
973 if (s->flags & __OBJECT_POISON) { in init_object()
974 memset(p, POISON_FREE, s->object_size - 1); in init_object()
975 p[s->object_size - 1] = POISON_END; in init_object()
978 if (s->flags & SLAB_RED_ZONE) in init_object()
979 memset(p + s->object_size, val, s->inuse - s->object_size); in init_object()
982 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, in restore_bytes() argument
985 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); in restore_bytes()
989 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, in check_bytes_and_report() argument
1010 slab_bug(s, "%s overwritten", what); in check_bytes_and_report()
1014 print_trailer(s, slab, object); in check_bytes_and_report()
1018 restore_bytes(s, what, value, fault, end); in check_bytes_and_report()
1061 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) in check_pad_bytes() argument
1063 unsigned long off = get_info_end(s); /* The end of info */ in check_pad_bytes()
1065 if (s->flags & SLAB_STORE_USER) { in check_pad_bytes()
1069 if (s->flags & SLAB_KMALLOC) in check_pad_bytes()
1073 off += kasan_metadata_size(s); in check_pad_bytes()
1075 if (size_from_object(s) == off) in check_pad_bytes()
1078 return check_bytes_and_report(s, slab, p, "Object padding", in check_pad_bytes()
1079 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
1083 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) in slab_pad_check() argument
1092 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
1098 remainder = length % s->size; in slab_pad_check()
1111 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
1115 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); in slab_pad_check()
1118 static int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1122 u8 *endobject = object + s->object_size; in check_object()
1124 if (s->flags & SLAB_RED_ZONE) { in check_object()
1125 if (!check_bytes_and_report(s, slab, object, "Left Redzone", in check_object()
1126 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
1129 if (!check_bytes_and_report(s, slab, object, "Right Redzone", in check_object()
1130 endobject, val, s->inuse - s->object_size)) in check_object()
1133 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
1134 check_bytes_and_report(s, slab, p, "Alignment padding", in check_object()
1136 s->inuse - s->object_size); in check_object()
1140 if (s->flags & SLAB_POISON) { in check_object()
1141 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
1142 (!check_bytes_and_report(s, slab, p, "Poison", p, in check_object()
1143 POISON_FREE, s->object_size - 1) || in check_object()
1144 !check_bytes_and_report(s, slab, p, "End Poison", in check_object()
1145 p + s->object_size - 1, POISON_END, 1))) in check_object()
1150 check_pad_bytes(s, slab, p); in check_object()
1153 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) in check_object()
1161 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { in check_object()
1162 object_err(s, slab, p, "Freepointer corrupt"); in check_object()
1168 set_freepointer(s, p, NULL); in check_object()
1174 static int check_slab(struct kmem_cache *s, struct slab *slab) in check_slab() argument
1179 slab_err(s, slab, "Not a valid slab page"); in check_slab()
1183 maxobj = order_objects(slab_order(slab), s->size); in check_slab()
1185 slab_err(s, slab, "objects %u > max %u", in check_slab()
1190 slab_err(s, slab, "inuse %u > max %u", in check_slab()
1195 slab_pad_check(s, slab); in check_slab()
1203 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) in on_freelist() argument
1214 if (!check_valid_pointer(s, slab, fp)) { in on_freelist()
1216 object_err(s, slab, object, in on_freelist()
1218 set_freepointer(s, object, NULL); in on_freelist()
1220 slab_err(s, slab, "Freepointer corrupt"); in on_freelist()
1223 slab_fix(s, "Freelist cleared"); in on_freelist()
1229 fp = get_freepointer(s, object); in on_freelist()
1233 max_objects = order_objects(slab_order(slab), s->size); in on_freelist()
1238 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1241 slab_fix(s, "Number of objects adjusted"); in on_freelist()
1244 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1247 slab_fix(s, "Object count adjusted"); in on_freelist()
1252 static void trace(struct kmem_cache *s, struct slab *slab, void *object, in trace() argument
1255 if (s->flags & SLAB_TRACE) { in trace()
1257 s->name, in trace()
1264 s->object_size); in trace()
1273 static void add_full(struct kmem_cache *s, in add_full() argument
1276 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1283 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) in remove_full() argument
1285 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1293 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1295 struct kmem_cache_node *n = get_node(s, node); in slabs_node()
1305 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1307 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1320 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1322 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1329 static void setup_object_debug(struct kmem_cache *s, void *object) in setup_object_debug() argument
1331 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) in setup_object_debug()
1334 init_object(s, object, SLUB_RED_INACTIVE); in setup_object_debug()
1335 init_tracking(s, object); in setup_object_debug()
1339 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) in setup_slab_debug() argument
1341 if (!kmem_cache_debug_flags(s, SLAB_POISON)) in setup_slab_debug()
1349 static inline int alloc_consistency_checks(struct kmem_cache *s, in alloc_consistency_checks() argument
1352 if (!check_slab(s, slab)) in alloc_consistency_checks()
1355 if (!check_valid_pointer(s, slab, object)) { in alloc_consistency_checks()
1356 object_err(s, slab, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1360 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1366 static noinline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1369 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1370 if (!alloc_consistency_checks(s, slab, object)) in alloc_debug_processing()
1375 trace(s, slab, object, 1); in alloc_debug_processing()
1376 set_orig_size(s, object, orig_size); in alloc_debug_processing()
1377 init_object(s, object, SLUB_RED_ACTIVE); in alloc_debug_processing()
1387 slab_fix(s, "Marking all objects used"); in alloc_debug_processing()
1394 static inline int free_consistency_checks(struct kmem_cache *s, in free_consistency_checks() argument
1397 if (!check_valid_pointer(s, slab, object)) { in free_consistency_checks()
1398 slab_err(s, slab, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1402 if (on_freelist(s, slab, object)) { in free_consistency_checks()
1403 object_err(s, slab, object, "Object already free"); in free_consistency_checks()
1407 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1410 if (unlikely(s != slab->slab_cache)) { in free_consistency_checks()
1412 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1419 object_err(s, slab, object, in free_consistency_checks()
1640 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} in setup_object_debug() argument
1642 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} in setup_slab_debug() argument
1644 static inline int alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1648 struct kmem_cache *s, struct slab *slab, in free_debug_processing() argument
1652 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} in slab_pad_check() argument
1653 static inline int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1655 static inline void set_track(struct kmem_cache *s, void *object, in set_track() argument
1657 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1659 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1670 static inline unsigned long slabs_node(struct kmem_cache *s, int node) in slabs_node() argument
1674 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1676 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1679 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
1690 static __always_inline bool slab_free_hook(struct kmem_cache *s, in slab_free_hook() argument
1693 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1694 kmsan_slab_free(s, x); in slab_free_hook()
1696 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1698 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1699 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1702 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) in slab_free_hook()
1703 __kcsan_check_access(x, s->object_size, in slab_free_hook()
1718 memset(kasan_reset_tag(x), 0, s->object_size); in slab_free_hook()
1719 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; in slab_free_hook()
1720 memset((char *)kasan_reset_tag(x) + s->inuse, 0, in slab_free_hook()
1721 s->size - s->inuse - rsize); in slab_free_hook()
1724 return kasan_slab_free(s, x, init); in slab_free_hook()
1727 static inline bool slab_free_freelist_hook(struct kmem_cache *s, in slab_free_freelist_hook() argument
1737 slab_free_hook(s, next, false); in slab_free_freelist_hook()
1747 next = get_freepointer(s, object); in slab_free_freelist_hook()
1750 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { in slab_free_freelist_hook()
1752 set_freepointer(s, object, *head); in slab_free_freelist_hook()
1771 static void *setup_object(struct kmem_cache *s, void *object) in setup_object() argument
1773 setup_object_debug(s, object); in setup_object()
1774 object = kasan_init_slab_obj(s, object); in setup_object()
1775 if (unlikely(s->ctor)) { in setup_object()
1776 kasan_unpoison_object_data(s, object); in setup_object()
1777 s->ctor(object); in setup_object()
1778 kasan_poison_object_data(s, object); in setup_object()
1811 static int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1813 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
1817 if (s->random_seq) in init_cache_random_seq()
1820 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq()
1823 s->name); in init_cache_random_seq()
1828 if (s->random_seq) { in init_cache_random_seq()
1832 s->random_seq[i] *= s->size; in init_cache_random_seq()
1840 struct kmem_cache *s; in init_freelist_randomization() local
1844 list_for_each_entry(s, &slab_caches, list) in init_freelist_randomization()
1845 init_cache_random_seq(s); in init_freelist_randomization()
1851 static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, in next_freelist_entry() argument
1863 idx = s->random_seq[*pos]; in next_freelist_entry()
1873 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
1880 if (slab->objects < 2 || !s->random_seq) in shuffle_freelist()
1883 freelist_count = oo_objects(s->oo); in shuffle_freelist()
1886 page_limit = slab->objects * s->size; in shuffle_freelist()
1887 start = fixup_red_left(s, slab_address(slab)); in shuffle_freelist()
1890 cur = next_freelist_entry(s, slab, &pos, start, page_limit, in shuffle_freelist()
1892 cur = setup_object(s, cur); in shuffle_freelist()
1896 next = next_freelist_entry(s, slab, &pos, start, page_limit, in shuffle_freelist()
1898 next = setup_object(s, next); in shuffle_freelist()
1899 set_freepointer(s, cur, next); in shuffle_freelist()
1902 set_freepointer(s, cur, NULL); in shuffle_freelist()
1907 static inline int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1912 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
1918 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1921 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
1929 flags |= s->allocflags; in allocate_slab()
1936 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
1941 oo = s->min; in allocate_slab()
1950 stat(s, ORDER_FALLBACK); in allocate_slab()
1957 account_slab(slab, oo_order(oo), s, flags); in allocate_slab()
1959 slab->slab_cache = s; in allocate_slab()
1965 setup_slab_debug(s, slab, start); in allocate_slab()
1967 shuffle = shuffle_freelist(s, slab); in allocate_slab()
1970 start = fixup_red_left(s, start); in allocate_slab()
1971 start = setup_object(s, start); in allocate_slab()
1974 next = p + s->size; in allocate_slab()
1975 next = setup_object(s, next); in allocate_slab()
1976 set_freepointer(s, p, next); in allocate_slab()
1979 set_freepointer(s, p, NULL); in allocate_slab()
1985 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
1990 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab()
1992 return allocate_slab(s, in new_slab()
1996 static void __free_slab(struct kmem_cache *s, struct slab *slab) in __free_slab() argument
2002 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { in __free_slab()
2005 slab_pad_check(s, slab); in __free_slab()
2006 for_each_object(p, s, slab_address(slab), slab->objects) in __free_slab()
2007 check_object(s, slab, p, SLUB_RED_INACTIVE); in __free_slab()
2015 unaccount_slab(slab, order, s); in __free_slab()
2026 static void free_slab(struct kmem_cache *s, struct slab *slab) in free_slab() argument
2028 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { in free_slab()
2031 __free_slab(s, slab); in free_slab()
2034 static void discard_slab(struct kmem_cache *s, struct slab *slab) in discard_slab() argument
2036 dec_slabs_node(s, slab_nid(slab), slab->objects); in discard_slab()
2037 free_slab(s, slab); in discard_slab()
2074 static void *alloc_single_from_partial(struct kmem_cache *s, in alloc_single_from_partial() argument
2082 slab->freelist = get_freepointer(s, object); in alloc_single_from_partial()
2085 if (!alloc_debug_processing(s, slab, object, orig_size)) { in alloc_single_from_partial()
2092 add_full(s, n, slab); in alloc_single_from_partial()
2103 static void *alloc_single_from_new_slab(struct kmem_cache *s, in alloc_single_from_new_slab() argument
2107 struct kmem_cache_node *n = get_node(s, nid); in alloc_single_from_new_slab()
2113 slab->freelist = get_freepointer(s, object); in alloc_single_from_new_slab()
2116 if (!alloc_debug_processing(s, slab, object, orig_size)) in alloc_single_from_new_slab()
2127 add_full(s, n, slab); in alloc_single_from_new_slab()
2131 inc_slabs_node(s, nid, slab->objects); in alloc_single_from_new_slab()
2143 static inline void *acquire_slab(struct kmem_cache *s, in acquire_slab() argument
2171 if (!__cmpxchg_double_slab(s, slab, in acquire_slab()
2183 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2185 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, in put_cpu_partial() argument
2193 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
2217 if (kmem_cache_debug(s)) { in get_partial_node()
2218 object = alloc_single_from_partial(s, n, slab, in get_partial_node()
2225 t = acquire_slab(s, n, slab, object == NULL); in get_partial_node()
2231 stat(s, ALLOC_FROM_PARTIAL); in get_partial_node()
2234 put_cpu_partial(s, slab, 0); in get_partial_node()
2235 stat(s, CPU_PARTIAL_NODE); in get_partial_node()
2239 if (!kmem_cache_has_cpu_partial(s) in get_partial_node()
2240 || partial_slabs > s->cpu_partial_slabs / 2) in get_partial_node()
2254 static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc) in get_any_partial() argument
2282 if (!s->remote_node_defrag_ratio || in get_any_partial()
2283 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
2292 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
2295 n->nr_partial > s->min_partial) { in get_any_partial()
2296 object = get_partial_node(s, n, pc); in get_any_partial()
2317 static void *get_partial(struct kmem_cache *s, int node, struct partial_context *pc) in get_partial() argument
2325 object = get_partial_node(s, get_node(s, searchnode), pc); in get_partial()
2329 return get_any_partial(s, pc); in get_partial()
2370 const struct kmem_cache *s, unsigned long tid) in note_cmpxchg_failure() argument
2373 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2375 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2390 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); in note_cmpxchg_failure()
2393 static void init_kmem_cache_cpus(struct kmem_cache *s) in init_kmem_cache_cpus() argument
2399 c = per_cpu_ptr(s->cpu_slab, cpu); in init_kmem_cache_cpus()
2411 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, in deactivate_slab() argument
2415 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in deactivate_slab()
2425 stat(s, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
2436 nextfree = get_freepointer(s, freelist_iter); in deactivate_slab()
2443 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) in deactivate_slab()
2474 set_freepointer(s, freelist_tail, old.freelist); in deactivate_slab()
2481 if (!new.inuse && n->nr_partial >= s->min_partial) { in deactivate_slab()
2490 } else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) { in deactivate_slab()
2503 if (!cmpxchg_double_slab(s, slab, in deactivate_slab()
2516 stat(s, tail); in deactivate_slab()
2518 stat(s, DEACTIVATE_EMPTY); in deactivate_slab()
2519 discard_slab(s, slab); in deactivate_slab()
2520 stat(s, FREE_SLAB); in deactivate_slab()
2522 add_full(s, n, slab); in deactivate_slab()
2524 stat(s, DEACTIVATE_FULL); in deactivate_slab()
2526 stat(s, DEACTIVATE_FULL); in deactivate_slab()
2531 static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) in __unfreeze_partials() argument
2544 n2 = get_node(s, slab_nid(slab)); in __unfreeze_partials()
2564 } while (!__cmpxchg_double_slab(s, slab, in __unfreeze_partials()
2569 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in __unfreeze_partials()
2574 stat(s, FREE_ADD_PARTIAL); in __unfreeze_partials()
2585 stat(s, DEACTIVATE_EMPTY); in __unfreeze_partials()
2586 discard_slab(s, slab); in __unfreeze_partials()
2587 stat(s, FREE_SLAB); in __unfreeze_partials()
2594 static void unfreeze_partials(struct kmem_cache *s) in unfreeze_partials() argument
2599 local_lock_irqsave(&s->cpu_slab->lock, flags); in unfreeze_partials()
2600 partial_slab = this_cpu_read(s->cpu_slab->partial); in unfreeze_partials()
2601 this_cpu_write(s->cpu_slab->partial, NULL); in unfreeze_partials()
2602 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in unfreeze_partials()
2605 __unfreeze_partials(s, partial_slab); in unfreeze_partials()
2608 static void unfreeze_partials_cpu(struct kmem_cache *s, in unfreeze_partials_cpu() argument
2617 __unfreeze_partials(s, partial_slab); in unfreeze_partials_cpu()
2627 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) in put_cpu_partial() argument
2634 local_lock_irqsave(&s->cpu_slab->lock, flags); in put_cpu_partial()
2636 oldslab = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2639 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { in put_cpu_partial()
2657 this_cpu_write(s->cpu_slab->partial, slab); in put_cpu_partial()
2659 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in put_cpu_partial()
2662 __unfreeze_partials(s, slab_to_unfreeze); in put_cpu_partial()
2663 stat(s, CPU_PARTIAL_DRAIN); in put_cpu_partial()
2669 static inline void unfreeze_partials(struct kmem_cache *s) { } in unfreeze_partials() argument
2670 static inline void unfreeze_partials_cpu(struct kmem_cache *s, in unfreeze_partials_cpu() argument
2675 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2681 local_lock_irqsave(&s->cpu_slab->lock, flags); in flush_slab()
2690 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in flush_slab()
2693 deactivate_slab(s, slab, freelist); in flush_slab()
2694 stat(s, CPUSLAB_FLUSH); in flush_slab()
2698 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2700 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2709 deactivate_slab(s, slab, freelist); in __flush_cpu_slab()
2710 stat(s, CPUSLAB_FLUSH); in __flush_cpu_slab()
2713 unfreeze_partials_cpu(s, c); in __flush_cpu_slab()
2718 struct kmem_cache *s; member
2729 struct kmem_cache *s; in flush_cpu_slab() local
2735 s = sfw->s; in flush_cpu_slab()
2736 c = this_cpu_ptr(s->cpu_slab); in flush_cpu_slab()
2739 flush_slab(s, c); in flush_cpu_slab()
2741 unfreeze_partials(s); in flush_cpu_slab()
2744 static bool has_cpu_slab(int cpu, struct kmem_cache *s) in has_cpu_slab() argument
2746 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2754 static void flush_all_cpus_locked(struct kmem_cache *s) in flush_all_cpus_locked() argument
2764 if (!has_cpu_slab(cpu, s)) { in flush_all_cpus_locked()
2770 sfw->s = s; in flush_all_cpus_locked()
2784 static void flush_all(struct kmem_cache *s) in flush_all() argument
2787 flush_all_cpus_locked(s); in flush_all()
2797 struct kmem_cache *s; in slub_cpu_dead() local
2800 list_for_each_entry(s, &slab_caches, list) in slub_cpu_dead()
2801 __flush_cpu_slab(s, cpu); in slub_cpu_dead()
2832 struct kmem_cache *s, struct slab *slab, in free_debug_processing() argument
2836 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in free_debug_processing()
2844 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
2849 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
2850 if (!check_slab(s, slab)) in free_debug_processing()
2855 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", in free_debug_processing()
2865 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
2866 if (!free_consistency_checks(s, slab, object, addr)) in free_debug_processing()
2870 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
2871 set_track_update(s, object, TRACK_FREE, addr, handle); in free_debug_processing()
2872 trace(s, slab, object, 0); in free_debug_processing()
2874 init_object(s, object, SLUB_RED_INACTIVE); in free_debug_processing()
2878 object = get_freepointer(s, object); in free_debug_processing()
2885 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", in free_debug_processing()
2894 set_freepointer(s, tail, prior); in free_debug_processing()
2902 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) in free_debug_processing()
2907 remove_full(s, n, slab); in free_debug_processing()
2910 stat(s, FREE_ADD_PARTIAL); in free_debug_processing()
2914 stat(s, FREE_REMOVE_PARTIAL); in free_debug_processing()
2923 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); in free_debug_processing()
2929 slab_fix(s, "Object at 0x%p not freed", object); in free_debug_processing()
2932 stat(s, FREE_SLAB); in free_debug_processing()
2933 free_slab(s, slab_free); in free_debug_processing()
2955 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2969 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2970 oo_order(s->min)); in slab_out_of_memory()
2972 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
2974 s->name); in slab_out_of_memory()
2976 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
3007 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) in get_freelist() argument
3013 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); in get_freelist()
3025 } while (!__cmpxchg_double_slab(s, slab, in get_freelist()
3052 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in ___slab_alloc() argument
3060 stat(s, ALLOC_SLOWPATH); in ___slab_alloc()
3085 stat(s, ALLOC_NODE_MISMATCH); in ___slab_alloc()
3099 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3101 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3108 freelist = get_freelist(s, slab); in ___slab_alloc()
3113 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3114 stat(s, DEACTIVATE_BYPASS); in ___slab_alloc()
3118 stat(s, ALLOC_REFILL); in ___slab_alloc()
3122 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); in ___slab_alloc()
3130 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
3132 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3137 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3139 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3146 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3147 deactivate_slab(s, slab, freelist); in ___slab_alloc()
3152 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3154 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3158 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3165 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3166 stat(s, CPU_PARTIAL_ALLOC); in ___slab_alloc()
3175 freelist = get_partial(s, node, &pc); in ___slab_alloc()
3179 slub_put_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3180 slab = new_slab(s, gfpflags, node); in ___slab_alloc()
3181 c = slub_get_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3184 slab_out_of_memory(s, gfpflags, node); in ___slab_alloc()
3188 stat(s, ALLOC_SLAB); in ___slab_alloc()
3190 if (kmem_cache_debug(s)) { in ___slab_alloc()
3191 freelist = alloc_single_from_new_slab(s, slab, orig_size); in ___slab_alloc()
3196 if (s->flags & SLAB_STORE_USER) in ___slab_alloc()
3197 set_track(s, freelist, TRACK_ALLOC, addr); in ___slab_alloc()
3211 inc_slabs_node(s, slab_nid(slab), slab->objects); in ___slab_alloc()
3215 if (kmem_cache_debug(s)) { in ___slab_alloc()
3221 if (s->flags & SLAB_STORE_USER) in ___slab_alloc()
3222 set_track(s, freelist, TRACK_ALLOC, addr); in ___slab_alloc()
3232 deactivate_slab(s, slab, get_freepointer(s, freelist)); in ___slab_alloc()
3238 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3247 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3249 deactivate_slab(s, flush_slab, flush_freelist); in ___slab_alloc()
3251 stat(s, CPUSLAB_FLUSH); in ___slab_alloc()
3265 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
3276 c = slub_get_cpu_ptr(s->cpu_slab); in __slab_alloc()
3279 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); in __slab_alloc()
3281 slub_put_cpu_ptr(s->cpu_slab); in __slab_alloc()
3290 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, in maybe_wipe_obj_freeptr() argument
3293 if (unlikely(slab_want_init_on_free(s)) && obj) in maybe_wipe_obj_freeptr()
3294 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), in maybe_wipe_obj_freeptr()
3308 static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, in slab_alloc_node() argument
3318 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); in slab_alloc_node()
3319 if (!s) in slab_alloc_node()
3322 object = kfence_alloc(s, orig_size, gfpflags); in slab_alloc_node()
3339 c = raw_cpu_ptr(s->cpu_slab); in slab_alloc_node()
3364 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); in slab_alloc_node()
3366 void *next_object = get_freepointer_safe(s, object); in slab_alloc_node()
3383 s->cpu_slab->freelist, s->cpu_slab->tid, in slab_alloc_node()
3387 note_cmpxchg_failure("slab_alloc", s, tid); in slab_alloc_node()
3390 prefetch_freepointer(s, next_object); in slab_alloc_node()
3391 stat(s, ALLOC_FASTPATH); in slab_alloc_node()
3394 maybe_wipe_obj_freeptr(s, object); in slab_alloc_node()
3395 init = slab_want_init_on_alloc(gfpflags, s); in slab_alloc_node()
3398 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init); in slab_alloc_node()
3403 static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru, in slab_alloc() argument
3406 return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size); in slab_alloc()
3410 void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, in __kmem_cache_alloc_lru() argument
3413 void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); in __kmem_cache_alloc_lru()
3415 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); in __kmem_cache_alloc_lru()
3420 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) in kmem_cache_alloc() argument
3422 return __kmem_cache_alloc_lru(s, NULL, gfpflags); in kmem_cache_alloc()
3426 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, in kmem_cache_alloc_lru() argument
3429 return __kmem_cache_alloc_lru(s, lru, gfpflags); in kmem_cache_alloc_lru()
3433 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, in __kmem_cache_alloc_node() argument
3437 return slab_alloc_node(s, NULL, gfpflags, node, in __kmem_cache_alloc_node()
3441 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
3443 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); in kmem_cache_alloc_node()
3445 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); in kmem_cache_alloc_node()
3459 static void __slab_free(struct kmem_cache *s, struct slab *slab, in __slab_free() argument
3471 stat(s, FREE_SLOWPATH); in __slab_free()
3476 if (kmem_cache_debug(s)) { in __slab_free()
3477 free_debug_processing(s, slab, head, tail, cnt, addr); in __slab_free()
3488 set_freepointer(s, tail, prior); in __slab_free()
3494 if (kmem_cache_has_cpu_partial(s) && !prior) { in __slab_free()
3506 n = get_node(s, slab_nid(slab)); in __slab_free()
3520 } while (!cmpxchg_double_slab(s, slab, in __slab_free()
3532 stat(s, FREE_FROZEN); in __slab_free()
3538 put_cpu_partial(s, slab, 1); in __slab_free()
3539 stat(s, CPU_PARTIAL_FREE); in __slab_free()
3545 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
3552 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { in __slab_free()
3553 remove_full(s, n, slab); in __slab_free()
3555 stat(s, FREE_ADD_PARTIAL); in __slab_free()
3566 stat(s, FREE_REMOVE_PARTIAL); in __slab_free()
3569 remove_full(s, n, slab); in __slab_free()
3573 stat(s, FREE_SLAB); in __slab_free()
3574 discard_slab(s, slab); in __slab_free()
3592 static __always_inline void do_slab_free(struct kmem_cache *s, in do_slab_free() argument
3608 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
3615 __slab_free(s, slab, head, tail_obj, cnt, addr); in do_slab_free()
3622 set_freepointer(s, tail_obj, freelist); in do_slab_free()
3625 s->cpu_slab->freelist, s->cpu_slab->tid, in do_slab_free()
3629 note_cmpxchg_failure("slab_free", s, tid); in do_slab_free()
3634 local_lock(&s->cpu_slab->lock); in do_slab_free()
3635 c = this_cpu_ptr(s->cpu_slab); in do_slab_free()
3637 local_unlock(&s->cpu_slab->lock); in do_slab_free()
3643 set_freepointer(s, tail_obj, freelist); in do_slab_free()
3647 local_unlock(&s->cpu_slab->lock); in do_slab_free()
3649 stat(s, FREE_FASTPATH); in do_slab_free()
3652 static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab, in slab_free() argument
3656 memcg_slab_free_hook(s, slab, p, cnt); in slab_free()
3661 if (slab_free_freelist_hook(s, &head, &tail, &cnt)) in slab_free()
3662 do_slab_free(s, slab, head, tail, cnt, addr); in slab_free()
3672 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller) in __kmem_cache_free() argument
3674 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller); in __kmem_cache_free()
3677 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() argument
3679 s = cache_from_obj(s, x); in kmem_cache_free()
3680 if (!s) in kmem_cache_free()
3682 trace_kmem_cache_free(_RET_IP_, x, s); in kmem_cache_free()
3683 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_); in kmem_cache_free()
3692 struct kmem_cache *s; member
3708 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
3718 if (!s) { in build_detached_freelist()
3727 df->s = df->slab->slab_cache; in build_detached_freelist()
3730 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
3741 set_freepointer(df->s, object, NULL); in build_detached_freelist()
3749 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
3767 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3775 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3779 slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, in kmem_cache_free_bulk()
3786 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
3794 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); in kmem_cache_alloc_bulk()
3795 if (unlikely(!s)) in kmem_cache_alloc_bulk()
3802 c = slub_get_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3803 local_lock_irq(&s->cpu_slab->lock); in kmem_cache_alloc_bulk()
3806 void *object = kfence_alloc(s, s->object_size, flags); in kmem_cache_alloc_bulk()
3824 local_unlock_irq(&s->cpu_slab->lock); in kmem_cache_alloc_bulk()
3830 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in kmem_cache_alloc_bulk()
3831 _RET_IP_, c, s->object_size); in kmem_cache_alloc_bulk()
3835 c = this_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3836 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3838 local_lock_irq(&s->cpu_slab->lock); in kmem_cache_alloc_bulk()
3842 c->freelist = get_freepointer(s, object); in kmem_cache_alloc_bulk()
3844 maybe_wipe_obj_freeptr(s, p[i]); in kmem_cache_alloc_bulk()
3847 local_unlock_irq(&s->cpu_slab->lock); in kmem_cache_alloc_bulk()
3848 slub_put_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3854 slab_post_alloc_hook(s, objcg, flags, size, p, in kmem_cache_alloc_bulk()
3855 slab_want_init_on_alloc(flags, s)); in kmem_cache_alloc_bulk()
3858 slub_put_cpu_ptr(s->cpu_slab); in kmem_cache_alloc_bulk()
3859 slab_post_alloc_hook(s, objcg, flags, i, p, false); in kmem_cache_alloc_bulk()
3860 kmem_cache_free_bulk(s, i, p); in kmem_cache_alloc_bulk()
4017 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
4026 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
4029 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
4032 init_kmem_cache_cpus(s); in alloc_kmem_cache_cpus()
4084 static void free_kmem_cache_nodes(struct kmem_cache *s) in free_kmem_cache_nodes() argument
4089 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
4090 s->node[node] = NULL; in free_kmem_cache_nodes()
4095 void __kmem_cache_release(struct kmem_cache *s) in __kmem_cache_release() argument
4097 cache_random_seq_destroy(s); in __kmem_cache_release()
4098 free_percpu(s->cpu_slab); in __kmem_cache_release()
4099 free_kmem_cache_nodes(s); in __kmem_cache_release()
4102 static int init_kmem_cache_nodes(struct kmem_cache *s) in init_kmem_cache_nodes() argument
4117 free_kmem_cache_nodes(s); in init_kmem_cache_nodes()
4122 s->node[node] = n; in init_kmem_cache_nodes()
4127 static void set_cpu_partial(struct kmem_cache *s) in set_cpu_partial() argument
4145 if (!kmem_cache_has_cpu_partial(s)) in set_cpu_partial()
4147 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
4149 else if (s->size >= 1024) in set_cpu_partial()
4151 else if (s->size >= 256) in set_cpu_partial()
4156 slub_set_cpu_partial(s, nr_objects); in set_cpu_partial()
4164 static int calculate_sizes(struct kmem_cache *s) in calculate_sizes() argument
4166 slab_flags_t flags = s->flags; in calculate_sizes()
4167 unsigned int size = s->object_size; in calculate_sizes()
4184 !s->ctor) in calculate_sizes()
4185 s->flags |= __OBJECT_POISON; in calculate_sizes()
4187 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
4195 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
4203 s->inuse = size; in calculate_sizes()
4206 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || in calculate_sizes()
4207 s->ctor) { in calculate_sizes()
4222 s->offset = size; in calculate_sizes()
4230 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); in calculate_sizes()
4247 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
4259 s->red_left_pad = sizeof(void *); in calculate_sizes()
4260 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
4261 size += s->red_left_pad; in calculate_sizes()
4270 size = ALIGN(size, s->align); in calculate_sizes()
4271 s->size = size; in calculate_sizes()
4272 s->reciprocal_size = reciprocal_value(size); in calculate_sizes()
4278 s->allocflags = 0; in calculate_sizes()
4280 s->allocflags |= __GFP_COMP; in calculate_sizes()
4282 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
4283 s->allocflags |= GFP_DMA; in calculate_sizes()
4285 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
4286 s->allocflags |= GFP_DMA32; in calculate_sizes()
4288 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
4289 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
4294 s->oo = oo_make(order, size); in calculate_sizes()
4295 s->min = oo_make(get_order(size), size); in calculate_sizes()
4297 return !!oo_objects(s->oo); in calculate_sizes()
4300 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) in kmem_cache_open() argument
4302 s->flags = kmem_cache_flags(s->size, flags, s->name); in kmem_cache_open()
4304 s->random = get_random_long(); in kmem_cache_open()
4307 if (!calculate_sizes(s)) in kmem_cache_open()
4314 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
4315 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
4316 s->offset = 0; in kmem_cache_open()
4317 if (!calculate_sizes(s)) in kmem_cache_open()
4324 if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0) in kmem_cache_open()
4326 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
4333 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); in kmem_cache_open()
4334 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); in kmem_cache_open()
4336 set_cpu_partial(s); in kmem_cache_open()
4339 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
4344 if (init_cache_random_seq(s)) in kmem_cache_open()
4348 if (!init_kmem_cache_nodes(s)) in kmem_cache_open()
4351 if (alloc_kmem_cache_cpus(s)) in kmem_cache_open()
4355 __kmem_cache_release(s); in kmem_cache_open()
4359 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, in list_slab_objects() argument
4366 slab_err(s, slab, text, s->name); in list_slab_objects()
4369 __fill_map(object_map, s, slab); in list_slab_objects()
4371 for_each_object(p, s, addr, slab->objects) { in list_slab_objects()
4373 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { in list_slab_objects()
4375 print_tracking(s, p); in list_slab_objects()
4387 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
4399 list_slab_objects(s, slab, in free_partial()
4406 discard_slab(s, slab); in free_partial()
4409 bool __kmem_cache_empty(struct kmem_cache *s) in __kmem_cache_empty() argument
4414 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
4415 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_empty()
4423 int __kmem_cache_shutdown(struct kmem_cache *s) in __kmem_cache_shutdown() argument
4428 flush_all_cpus_locked(s); in __kmem_cache_shutdown()
4430 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shutdown()
4431 free_partial(s, n); in __kmem_cache_shutdown()
4432 if (n->nr_partial || slabs_node(s, node)) in __kmem_cache_shutdown()
4446 struct kmem_cache *s = slab->slab_cache; in __kmem_obj_info() local
4451 kpp->kp_slab_cache = s; in __kmem_obj_info()
4455 objp = restore_red_left(s, objp0); in __kmem_obj_info()
4459 objnr = obj_to_index(s, slab, objp); in __kmem_obj_info()
4461 objp = base + s->size * objnr; in __kmem_obj_info()
4463 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info()
4464 || (objp - base) % s->size) || in __kmem_obj_info()
4465 !(s->flags & SLAB_STORE_USER)) in __kmem_obj_info()
4468 objp = fixup_red_left(s, objp); in __kmem_obj_info()
4469 trackp = get_track(s, objp, TRACK_ALLOC); in __kmem_obj_info()
4484 trackp = get_track(s, objp, TRACK_FREE); in __kmem_obj_info()
4541 struct kmem_cache *s; in __check_heap_object() local
4548 s = slab->slab_cache; in __check_heap_object()
4559 offset = (ptr - slab_address(slab)) % s->size; in __check_heap_object()
4562 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { in __check_heap_object()
4563 if (offset < s->red_left_pad) in __check_heap_object()
4565 s->name, to_user, offset, n); in __check_heap_object()
4566 offset -= s->red_left_pad; in __check_heap_object()
4570 if (offset >= s->useroffset && in __check_heap_object()
4571 offset - s->useroffset <= s->usersize && in __check_heap_object()
4572 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
4575 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4590 static int __kmem_cache_do_shrink(struct kmem_cache *s) in __kmem_cache_do_shrink() argument
4602 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_do_shrink()
4627 dec_slabs_node(s, node, slab->objects); in __kmem_cache_do_shrink()
4643 free_slab(s, slab); in __kmem_cache_do_shrink()
4645 if (slabs_node(s, node)) in __kmem_cache_do_shrink()
4652 int __kmem_cache_shrink(struct kmem_cache *s) in __kmem_cache_shrink() argument
4654 flush_all(s); in __kmem_cache_shrink()
4655 return __kmem_cache_do_shrink(s); in __kmem_cache_shrink()
4660 struct kmem_cache *s; in slab_mem_going_offline_callback() local
4663 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_offline_callback()
4664 flush_all_cpus_locked(s); in slab_mem_going_offline_callback()
4665 __kmem_cache_do_shrink(s); in slab_mem_going_offline_callback()
4699 struct kmem_cache *s; in slab_mem_going_online_callback() local
4717 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_online_callback()
4722 if (get_node(s, nid)) in slab_mem_going_online_callback()
4735 s->node[nid] = n; in slab_mem_going_online_callback()
4792 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in bootstrap() local
4795 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
4802 __flush_cpu_slab(s, smp_processor_id()); in bootstrap()
4803 for_each_kmem_cache_node(s, node, n) { in bootstrap()
4807 p->slab_cache = s; in bootstrap()
4811 p->slab_cache = s; in bootstrap()
4814 list_add(&s->list, &slab_caches); in bootstrap()
4815 return s; in bootstrap()
4883 struct kmem_cache *s; in __kmem_cache_alias() local
4885 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
4886 if (s) { in __kmem_cache_alias()
4887 if (sysfs_slab_alias(s, name)) in __kmem_cache_alias()
4890 s->refcount++; in __kmem_cache_alias()
4896 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
4897 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
4900 return s; in __kmem_cache_alias()
4903 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) in __kmem_cache_create() argument
4907 err = kmem_cache_open(s, flags); in __kmem_cache_create()
4915 err = sysfs_slab_add(s); in __kmem_cache_create()
4917 __kmem_cache_release(s); in __kmem_cache_create()
4921 if (s->flags & SLAB_STORE_USER) in __kmem_cache_create()
4922 debugfs_slab_add(s); in __kmem_cache_create()
4940 static void validate_slab(struct kmem_cache *s, struct slab *slab, in validate_slab() argument
4946 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) in validate_slab()
4950 __fill_map(obj_map, s, slab); in validate_slab()
4951 for_each_object(p, s, addr, slab->objects) { in validate_slab()
4952 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? in validate_slab()
4955 if (!check_object(s, slab, p, val)) in validate_slab()
4960 static int validate_slab_node(struct kmem_cache *s, in validate_slab_node() argument
4970 validate_slab(s, slab, obj_map); in validate_slab_node()
4975 s->name, count, n->nr_partial); in validate_slab_node()
4979 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
4983 validate_slab(s, slab, obj_map); in validate_slab_node()
4988 s->name, count, atomic_long_read(&n->nr_slabs)); in validate_slab_node()
4997 long validate_slab_cache(struct kmem_cache *s) in validate_slab_cache() argument
5004 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in validate_slab_cache()
5008 flush_all(s); in validate_slab_cache()
5009 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
5010 count += validate_slab_node(s, n, obj_map); in validate_slab_cache()
5074 static int add_location(struct loc_track *t, struct kmem_cache *s, in add_location() argument
5083 unsigned int waste = s->object_size - orig_size; in add_location()
5166 static void process_slab(struct loc_track *t, struct kmem_cache *s, in process_slab() argument
5174 __fill_map(obj_map, s, slab); in process_slab()
5176 for_each_object(p, s, addr, slab->objects) in process_slab()
5177 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) in process_slab()
5178 add_location(t, s, get_track(s, p, alloc), in process_slab()
5179 is_alloc ? get_orig_size(s, p) : in process_slab()
5180 s->object_size); in process_slab()
5200 static ssize_t show_slab_objects(struct kmem_cache *s, in show_slab_objects() argument
5217 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
5269 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
5287 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
5318 ssize_t (*show)(struct kmem_cache *s, char *buf);
5319 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5328 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) in slab_size_show() argument
5330 return sysfs_emit(buf, "%u\n", s->size); in slab_size_show()
5334 static ssize_t align_show(struct kmem_cache *s, char *buf) in align_show() argument
5336 return sysfs_emit(buf, "%u\n", s->align); in align_show()
5340 static ssize_t object_size_show(struct kmem_cache *s, char *buf) in object_size_show() argument
5342 return sysfs_emit(buf, "%u\n", s->object_size); in object_size_show()
5346 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) in objs_per_slab_show() argument
5348 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
5352 static ssize_t order_show(struct kmem_cache *s, char *buf) in order_show() argument
5354 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); in order_show()
5358 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) in min_partial_show() argument
5360 return sysfs_emit(buf, "%lu\n", s->min_partial); in min_partial_show()
5363 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, in min_partial_store() argument
5373 s->min_partial = min; in min_partial_store()
5378 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) in cpu_partial_show() argument
5382 nr_partial = s->cpu_partial; in cpu_partial_show()
5388 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, in cpu_partial_store() argument
5397 if (objects && !kmem_cache_has_cpu_partial(s)) in cpu_partial_store()
5400 slub_set_cpu_partial(s, objects); in cpu_partial_store()
5401 flush_all(s); in cpu_partial_store()
5406 static ssize_t ctor_show(struct kmem_cache *s, char *buf) in ctor_show() argument
5408 if (!s->ctor) in ctor_show()
5410 return sysfs_emit(buf, "%pS\n", s->ctor); in ctor_show()
5414 static ssize_t aliases_show(struct kmem_cache *s, char *buf) in aliases_show() argument
5416 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
5420 static ssize_t partial_show(struct kmem_cache *s, char *buf) in partial_show() argument
5422 return show_slab_objects(s, buf, SO_PARTIAL); in partial_show()
5426 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) in cpu_slabs_show() argument
5428 return show_slab_objects(s, buf, SO_CPU); in cpu_slabs_show()
5432 static ssize_t objects_show(struct kmem_cache *s, char *buf) in objects_show() argument
5434 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); in objects_show()
5438 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) in objects_partial_show() argument
5440 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); in objects_partial_show()
5444 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) in slabs_cpu_partial_show() argument
5455 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5463 objects = (slabs * oo_objects(s->oo)) / 2; in slabs_cpu_partial_show()
5470 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5473 objects = (slabs * oo_objects(s->oo)) / 2; in slabs_cpu_partial_show()
5485 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) in reclaim_account_show() argument
5487 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5491 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) in hwcache_align_show() argument
5493 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5498 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) in cache_dma_show() argument
5500 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5505 static ssize_t usersize_show(struct kmem_cache *s, char *buf) in usersize_show() argument
5507 return sysfs_emit(buf, "%u\n", s->usersize); in usersize_show()
5511 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) in destroy_by_rcu_show() argument
5513 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5518 static ssize_t slabs_show(struct kmem_cache *s, char *buf) in slabs_show() argument
5520 return show_slab_objects(s, buf, SO_ALL); in slabs_show()
5524 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) in total_objects_show() argument
5526 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); in total_objects_show()
5530 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) in sanity_checks_show() argument
5532 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5536 static ssize_t trace_show(struct kmem_cache *s, char *buf) in trace_show() argument
5538 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5542 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) in red_zone_show() argument
5544 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5549 static ssize_t poison_show(struct kmem_cache *s, char *buf) in poison_show() argument
5551 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5556 static ssize_t store_user_show(struct kmem_cache *s, char *buf) in store_user_show() argument
5558 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5563 static ssize_t validate_show(struct kmem_cache *s, char *buf) in validate_show() argument
5568 static ssize_t validate_store(struct kmem_cache *s, in validate_store() argument
5573 if (buf[0] == '1' && kmem_cache_debug(s)) { in validate_store()
5574 ret = validate_slab_cache(s); in validate_store()
5585 static ssize_t failslab_show(struct kmem_cache *s, char *buf) in failslab_show() argument
5587 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5592 static ssize_t shrink_show(struct kmem_cache *s, char *buf) in shrink_show() argument
5597 static ssize_t shrink_store(struct kmem_cache *s, in shrink_store() argument
5601 kmem_cache_shrink(s); in shrink_store()
5609 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) in remote_node_defrag_ratio_show() argument
5611 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
5614 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, in remote_node_defrag_ratio_store() argument
5626 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5634 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) in show_stat() argument
5645 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5666 static void clear_stat(struct kmem_cache *s, enum stat_item si) in clear_stat() argument
5671 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5675 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5677 return show_stat(s, buf, si); \
5679 static ssize_t text##_store(struct kmem_cache *s, \
5684 clear_stat(s, si); \
5718 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) in skip_kfence_show() argument
5720 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); in skip_kfence_show()
5723 static ssize_t skip_kfence_store(struct kmem_cache *s, in skip_kfence_store() argument
5729 s->flags &= ~SLAB_SKIP_KFENCE; in skip_kfence_store()
5731 s->flags |= SLAB_SKIP_KFENCE; in skip_kfence_store()
5823 struct kmem_cache *s; in slab_attr_show() local
5826 s = to_slab(kobj); in slab_attr_show()
5831 return attribute->show(s, buf); in slab_attr_show()
5839 struct kmem_cache *s; in slab_attr_store() local
5842 s = to_slab(kobj); in slab_attr_store()
5847 return attribute->store(s, buf, len); in slab_attr_store()
5867 static inline struct kset *cache_kset(struct kmem_cache *s) in cache_kset() argument
5878 static char *create_unique_id(struct kmem_cache *s) in create_unique_id() argument
5894 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
5896 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
5898 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
5900 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
5902 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
5906 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); in create_unique_id()
5916 static int sysfs_slab_add(struct kmem_cache *s) in sysfs_slab_add() argument
5920 struct kset *kset = cache_kset(s); in sysfs_slab_add()
5921 int unmergeable = slab_unmergeable(s); in sysfs_slab_add()
5924 kobject_init(&s->kobj, &slab_ktype); in sysfs_slab_add()
5938 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
5939 name = s->name; in sysfs_slab_add()
5945 name = create_unique_id(s); in sysfs_slab_add()
5950 s->kobj.kset = kset; in sysfs_slab_add()
5951 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
5955 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
5961 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
5968 kobject_del(&s->kobj); in sysfs_slab_add()
5972 void sysfs_slab_unlink(struct kmem_cache *s) in sysfs_slab_unlink() argument
5975 kobject_del(&s->kobj); in sysfs_slab_unlink()
5978 void sysfs_slab_release(struct kmem_cache *s) in sysfs_slab_release() argument
5981 kobject_put(&s->kobj); in sysfs_slab_release()
5989 struct kmem_cache *s; member
5996 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) in sysfs_slab_alias() argument
6005 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
6012 al->s = s; in sysfs_slab_alias()
6022 struct kmem_cache *s; in slab_sysfs_init() local
6036 list_for_each_entry(s, &slab_caches, list) { in slab_sysfs_init()
6037 err = sysfs_slab_add(s); in slab_sysfs_init()
6040 s->name); in slab_sysfs_init()
6047 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
6177 struct kmem_cache *s = file_inode(filep)->i_private; in slab_debug_trace_open() local
6183 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in slab_debug_trace_open()
6200 for_each_kmem_cache_node(s, node, n) { in slab_debug_trace_open()
6209 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()
6211 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()
6239 static void debugfs_slab_add(struct kmem_cache *s) in debugfs_slab_add() argument
6246 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); in debugfs_slab_add()
6249 slab_cache_dir, s, &slab_debugfs_fops); in debugfs_slab_add()
6252 slab_cache_dir, s, &slab_debugfs_fops); in debugfs_slab_add()
6255 void debugfs_slab_release(struct kmem_cache *s) in debugfs_slab_release() argument
6257 debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); in debugfs_slab_release()
6262 struct kmem_cache *s; in slab_debugfs_init() local
6266 list_for_each_entry(s, &slab_caches, list) in slab_debugfs_init()
6267 if (s->flags & SLAB_STORE_USER) in slab_debugfs_init()
6268 debugfs_slab_add(s); in slab_debugfs_init()
6279 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) in get_slabinfo() argument
6287 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
6297 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
6298 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
6301 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument