Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 57) sorted by relevance

123

/Linux-v4.19/tools/vm/
Dslabinfo.c54 struct slabinfo *slab; member
330 if (a->slab == find && in find_one_alias()
1098 a->slab = s; in link_slabs()
1117 if (!show_single_ref && a->slab->refs == 1) in alias()
1122 if (strcmp(a->slab->name, active) == 0) { in alias()
1127 printf("\n%-12s <- %s", a->slab->name, a->name); in alias()
1128 active = a->slab->name; in alias()
1131 printf("%-15s -> %s\n", a->name, a->slab->name); in alias()
1161 static int slab_mismatch(char *slab) in slab_mismatch() argument
1163 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch()
[all …]
/Linux-v4.19/Documentation/ABI/testing/
Dsysfs-kernel-slab1 What: /sys/kernel/slab
7 The /sys/kernel/slab directory contains a snapshot of the
13 What: /sys/kernel/slab/cache/aliases
22 What: /sys/kernel/slab/cache/align
31 What: /sys/kernel/slab/cache/alloc_calls
42 What: /sys/kernel/slab/cache/alloc_fastpath
53 What: /sys/kernel/slab/cache/alloc_from_partial
59 The alloc_from_partial file shows how many times a cpu slab has
60 been full and it has been refilled by using a slab from the list
65 What: /sys/kernel/slab/cache/alloc_refill
[all …]
/Linux-v4.19/lib/
Dsg_pool.c12 struct kmem_cache *slab; member
129 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init()
131 if (!sgp->slab) { in sg_pool_init()
138 sgp->slab); in sg_pool_init()
153 if (sgp->slab) in sg_pool_init()
154 kmem_cache_destroy(sgp->slab); in sg_pool_init()
167 kmem_cache_destroy(sgp->slab); in sg_pool_exit()
Dstackdepot.c200 void *slab = stack_slabs[parts.slabindex]; in depot_fetch_stack() local
202 struct stack_record *stack = slab + offset; in depot_fetch_stack()
/Linux-v4.19/Documentation/vm/
Dslub.rst9 slab caches. SLUB always includes full debugging but it is off by default.
39 slub_debug=<Debug-Options>,<slab name>
53 caused higher minimum slab orders
67 Red zoning and tracking may realign the slab. We can just apply sanity checks
72 Debugging options may require the minimum possible slab order to increase as
74 sizes). This has a higher liklihood of resulting in slab allocation errors
84 /sys/kernel/slab/<slab name>/
87 corresponding debug option. All options can be set on a slab that does
88 not contain objects. If the slab already contains objects then sanity checks
93 used on the wrong slab.
[all …]
Dsplit_page_table_lock.rst61 Make sure the architecture doesn't use slab allocator for page table
62 allocation: slab uses page->slab_cache for its pages.
/Linux-v4.19/net/dccp/
Dccid.c84 struct kmem_cache *slab; in ccid_kmem_cache_create() local
91 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create()
93 return slab; in ccid_kmem_cache_create()
96 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument
98 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
/Linux-v4.19/tools/perf/Documentation/
Dperf-kmem.txt44 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
46 pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
48 mode selection options - i.e. --slab, --page, --alloc and/or --caller.
57 --slab::
/Linux-v4.19/include/net/
Drequest_sock.h34 struct kmem_cache *slab; member
88 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc()
94 kmem_cache_free(ops->slab, req); in reqsk_alloc()
118 kmem_cache_free(req->rsk_ops->slab, req); in reqsk_free()
/Linux-v4.19/block/
Dbio.c65 struct kmem_cache *slab; member
77 struct kmem_cache *slab = NULL; in bio_find_or_create_slab() local
88 if (!bslab->slab && entry == -1) in bio_find_or_create_slab()
91 slab = bslab->slab; in bio_find_or_create_slab()
98 if (slab) in bio_find_or_create_slab()
117 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, in bio_find_or_create_slab()
119 if (!slab) in bio_find_or_create_slab()
122 bslab->slab = slab; in bio_find_or_create_slab()
127 return slab; in bio_find_or_create_slab()
138 if (bs->bio_slab == bio_slabs[i].slab) { in bio_put_slab()
[all …]
/Linux-v4.19/arch/ia64/include/asm/sn/
Dgeo.h30 slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */ member
115 INVALID_SLAB : g.common.slab; in geo_slab()
/Linux-v4.19/drivers/crypto/chelsio/chtls/
Dchtls_cm.h134 chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab; in chtls_init_rsk_ops()
142 kmem_cache_free(req->rsk_ops->slab, req); in chtls_reqsk_free()
/Linux-v4.19/Documentation/core-api/
Dmm-api.rst41 .. kernel-doc:: include/linux/slab.h
44 .. kernel-doc:: mm/slab.c
/Linux-v4.19/net/core/
Dsock.c1458 struct kmem_cache *slab; in sk_prot_alloc() local
1460 slab = prot->slab; in sk_prot_alloc()
1461 if (slab != NULL) { in sk_prot_alloc()
1462 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); in sk_prot_alloc()
1484 if (slab != NULL) in sk_prot_alloc()
1485 kmem_cache_free(slab, sk); in sk_prot_alloc()
1493 struct kmem_cache *slab; in sk_prot_free() local
1497 slab = prot->slab; in sk_prot_free()
1502 if (slab != NULL) in sk_prot_free()
1503 kmem_cache_free(slab, sk); in sk_prot_free()
[all …]
/Linux-v4.19/arch/ia64/sn/kernel/sn2/
Dsn_hwperf.c90 int *rack, int *bay, int *slot, int *slab) in sn_hwperf_location_to_bpos() argument
96 rack, &type, bay, slab) == 4) in sn_hwperf_location_to_bpos()
100 rack, &type, bay, slot, slab) != 5) in sn_hwperf_location_to_bpos()
111 int rack, bay, slot, slab; in sn_hwperf_geoid_to_cnode() local
114 if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) in sn_hwperf_geoid_to_cnode()
129 slot == this_slot && slab == this_slab) { in sn_hwperf_geoid_to_cnode()
/Linux-v4.19/Documentation/translations/zh_CN/
Dmagic-number.txt99 RED_MAGIC2 0x170fc2a5 (any) mm/slab.c
114 SLAB_C_MAGIC 0x4f17a36d kmem_cache mm/slab.c
121 RED_MAGIC1 0x5a2cf071 (any) mm/slab.c
/Linux-v4.19/Documentation/fault-injection/
Dfault-injection.txt12 injects slab allocation failures. (kmalloc(), kmem_cache_alloc(), ...)
183 Note that this file enables all types of faults (slab, futex, etc).
234 o Inject slab allocation failures into module init/exit code
356 Run a command "make -C tools/testing/selftests/ run_tests" with injecting slab
368 Same as above except to inject page allocation failure instead of slab
/Linux-v4.19/Documentation/process/
Dmagic-number.rst105 RED_MAGIC2 0x170fc2a5 (any) ``mm/slab.c``
119 SLAB_C_MAGIC 0x4f17a36d kmem_cache ``mm/slab.c``
126 RED_MAGIC1 0x5a2cf071 (any) ``mm/slab.c``
/Linux-v4.19/Documentation/translations/ja_JP/
DSubmitChecklist97 21: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
/Linux-v4.19/Documentation/trace/
Devents-kmem.rst26 justified, particularly if kmalloc slab pages are getting significantly
42 of writing, no information is available on what slab is being allocated from,
/Linux-v4.19/tools/testing/fault-injection/
Dfailcmd.sh59 inject slab allocation failures
/Linux-v4.19/mm/
Dmemory-failure.c830 #define slab (1UL << PG_slab) macro
850 { slab, slab, MF_MSG_SLAB, me_kernel },
879 #undef slab
DMakefile69 obj-$(CONFIG_SLAB) += slab.o
/Linux-v4.19/Documentation/sysctl/
Dvm.txt208 reclaimable slab objects like dentries and inodes. Once dropped, their
213 To free reclaimable slab objects (includes dentries and inodes):
215 To free slab objects and pagecache:
459 than this percentage of pages in a zone are reclaimable slab pages.
460 This insures that the slab growth stays under control even in NUMA
465 Note that slab reclaim is triggered in a per zone / node fashion.
466 The process of reclaiming slab memory is currently not node specific
/Linux-v4.19/init/
DKconfig1594 SLUB sysfs support. /sys/slab will not exist and there will be
1602 SLUB creates a directory under /sys/kernel/slab for each
1606 caches under /sys/kernel/slab/CACHE/cgroup but it can lead
1627 This option allows to select a slab allocator.
1633 The regular slab allocator that is established and known to work
1641 SLUB is a slab allocator that minimizes cache line usage
1646 a slab allocator.
1659 bool "Allow slab caches to be merged"
1662 For reduced kernel memory fragmentation, slab caches can be
1678 security feature reduces the predictability of the kernel slab
[all …]

123