Lines Matching +full:i +full:- +full:cache +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Processor cache information made available to userspace via sysfs;
27 /* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
32 struct kobject *kobj; /* bare (not embedded) kobject for cache
37 /* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
44 struct cache *cache; member
48 * cache type */
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
65 #define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
66 #define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
72 /* Embedded systems that use cache-size, cache-block-size,
73 * etc. for the Unified (typically L2) cache. */
75 .size_prop = "cache-size",
76 .line_size_props = { "cache-line-size",
77 "cache-block-size", },
78 .nr_sets_prop = "cache-sets",
81 /* PowerPC Processor binding says the [di]-cache-*
83 * d-cache properties. */
85 .size_prop = "d-cache-size",
86 .line_size_props = { "d-cache-line-size",
87 "d-cache-block-size", },
88 .nr_sets_prop = "d-cache-sets",
92 .size_prop = "i-cache-size",
93 .line_size_props = { "i-cache-line-size",
94 "i-cache-block-size", },
95 .nr_sets_prop = "i-cache-sets",
99 .size_prop = "d-cache-size",
100 .line_size_props = { "d-cache-line-size",
101 "d-cache-block-size", },
102 .nr_sets_prop = "d-cache-sets",
106 /* Cache object: each instance of this corresponds to a distinct cache
110 * cache object. A cache object is released when its shared_cpu_map
113 * A cache object is on two lists: an unsorted global list
114 * (cache_list) of cache objects; and a singly-linked list
115 * representing the local cache hierarchy, which is ordered by level
116 * (e.g. L1d -> L1i -> L2 -> L3).
118 struct cache { struct
119 struct device_node *ofnode; /* OF node for this cache, may be cpu */ argument
120 struct cpumask shared_cpu_map; /* online CPUs using this cache */ argument
121 int type; /* split cache disambiguation */ argument
123 int group_id; /* id of the group of threads that share this cache */ argument
124 struct list_head list; /* global list of cache objects */ argument
125 struct cache *next_local; /* next cache of >= level */ argument
140 static const char *cache_type_string(const struct cache *cache) in cache_type_string() argument
142 return cache_type_info[cache->type].name; in cache_type_string()
145 static void cache_init(struct cache *cache, int type, int level, in cache_init() argument
148 cache->type = type; in cache_init()
149 cache->level = level; in cache_init()
150 cache->ofnode = of_node_get(ofnode); in cache_init()
151 cache->group_id = group_id; in cache_init()
152 INIT_LIST_HEAD(&cache->list); in cache_init()
153 list_add(&cache->list, &cache_list); in cache_init()
156 static struct cache *new_cache(int type, int level, in new_cache()
159 struct cache *cache; in new_cache() local
161 cache = kzalloc(sizeof(*cache), GFP_KERNEL); in new_cache()
162 if (cache) in new_cache()
163 cache_init(cache, type, level, ofnode, group_id); in new_cache()
165 return cache; in new_cache()
168 static void release_cache_debugcheck(struct cache *cache) in release_cache_debugcheck() argument
170 struct cache *iter; in release_cache_debugcheck()
173 WARN_ONCE(iter->next_local == cache, in release_cache_debugcheck()
174 "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n", in release_cache_debugcheck()
175 iter->ofnode, in release_cache_debugcheck()
177 cache->ofnode, in release_cache_debugcheck()
178 cache_type_string(cache)); in release_cache_debugcheck()
181 static void release_cache(struct cache *cache) in release_cache() argument
183 if (!cache) in release_cache()
186 pr_debug("freeing L%d %s cache for %pOFP\n", cache->level, in release_cache()
187 cache_type_string(cache), cache->ofnode); in release_cache()
189 release_cache_debugcheck(cache); in release_cache()
190 list_del(&cache->list); in release_cache()
191 of_node_put(cache->ofnode); in release_cache()
192 kfree(cache); in release_cache()
195 static void cache_cpu_set(struct cache *cache, int cpu) in cache_cpu_set() argument
197 struct cache *next = cache; in cache_cpu_set()
200 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), in cache_cpu_set()
201 "CPU %i already accounted in %pOFP(%s)\n", in cache_cpu_set()
202 cpu, next->ofnode, in cache_cpu_set()
204 cpumask_set_cpu(cpu, &next->shared_cpu_map); in cache_cpu_set()
205 next = next->next_local; in cache_cpu_set()
209 static int cache_size(const struct cache *cache, unsigned int *ret) in cache_size() argument
214 propname = cache_type_info[cache->type].size_prop; in cache_size()
216 cache_size = of_get_property(cache->ofnode, propname, NULL); in cache_size()
218 return -ENODEV; in cache_size()
224 static int cache_size_kb(const struct cache *cache, unsigned int *ret) in cache_size_kb() argument
226 unsigned int size; in cache_size_kb() local
228 if (cache_size(cache, &size)) in cache_size_kb()
229 return -ENODEV; in cache_size_kb()
231 *ret = size / 1024; in cache_size_kb()
235 /* not cache_line_size() because that's a macro in include/linux/cache.h */
236 static int cache_get_line_size(const struct cache *cache, unsigned int *ret) in cache_get_line_size() argument
239 int i, lim; in cache_get_line_size() local
241 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); in cache_get_line_size()
243 for (i = 0; i < lim; i++) { in cache_get_line_size()
246 propname = cache_type_info[cache->type].line_size_props[i]; in cache_get_line_size()
247 line_size = of_get_property(cache->ofnode, propname, NULL); in cache_get_line_size()
253 return -ENODEV; in cache_get_line_size()
259 static int cache_nr_sets(const struct cache *cache, unsigned int *ret) in cache_nr_sets() argument
264 propname = cache_type_info[cache->type].nr_sets_prop; in cache_nr_sets()
266 nr_sets = of_get_property(cache->ofnode, propname, NULL); in cache_nr_sets()
268 return -ENODEV; in cache_nr_sets()
274 static int cache_associativity(const struct cache *cache, unsigned int *ret) in cache_associativity() argument
278 unsigned int size; in cache_associativity() local
280 if (cache_nr_sets(cache, &nr_sets)) in cache_associativity()
283 /* If the cache is fully associative, there is no need to in cache_associativity()
291 if (cache_get_line_size(cache, &line_size)) in cache_associativity()
293 if (cache_size(cache, &size)) in cache_associativity()
296 if (!(nr_sets > 0 && size > 0 && line_size > 0)) in cache_associativity()
299 *ret = (size / nr_sets) / line_size; in cache_associativity()
302 return -ENODEV; in cache_associativity()
306 static struct cache *cache_find_first_sibling(struct cache *cache) in cache_find_first_sibling() argument
308 struct cache *iter; in cache_find_first_sibling()
310 if (cache->type == CACHE_TYPE_UNIFIED || in cache_find_first_sibling()
311 cache->type == CACHE_TYPE_UNIFIED_D) in cache_find_first_sibling()
312 return cache; in cache_find_first_sibling()
315 if (iter->ofnode == cache->ofnode && in cache_find_first_sibling()
316 iter->group_id == cache->group_id && in cache_find_first_sibling()
317 iter->next_local == cache) in cache_find_first_sibling()
320 return cache; in cache_find_first_sibling()
323 /* return the first cache on a local list matching node and thread-group id */
324 static struct cache *cache_lookup_by_node_group(const struct device_node *node, in cache_lookup_by_node_group()
327 struct cache *cache = NULL; in cache_lookup_by_node_group() local
328 struct cache *iter; in cache_lookup_by_node_group()
331 if (iter->ofnode != node || in cache_lookup_by_node_group()
332 iter->group_id != group_id) in cache_lookup_by_node_group()
334 cache = cache_find_first_sibling(iter); in cache_lookup_by_node_group()
338 return cache; in cache_lookup_by_node_group()
343 return of_get_property(np, "cache-unified", NULL); in cache_node_is_unified()
348 * use cache-size, etc. for the unified cache size, but open firmware systems
349 * use d-cache-size, etc. Check on initialization for which type we have, and
352 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
362 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id, in cache_do_one_devnode_unified()
370 static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id, in cache_do_one_devnode_split()
373 struct cache *dcache, *icache; in cache_do_one_devnode_split()
384 dcache->next_local = icache; in cache_do_one_devnode_split()
393 static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level) in cache_do_one_devnode()
395 struct cache *cache; in cache_do_one_devnode() local
398 cache = cache_do_one_devnode_unified(node, group_id, level); in cache_do_one_devnode()
400 cache = cache_do_one_devnode_split(node, group_id, level); in cache_do_one_devnode()
402 return cache; in cache_do_one_devnode()
405 static struct cache *cache_lookup_or_instantiate(struct device_node *node, in cache_lookup_or_instantiate()
409 struct cache *cache; in cache_lookup_or_instantiate() local
411 cache = cache_lookup_by_node_group(node, group_id); in cache_lookup_or_instantiate()
413 WARN_ONCE(cache && cache->level != level, in cache_lookup_or_instantiate()
414 "cache level mismatch on lookup (got %d, expected %d)\n", in cache_lookup_or_instantiate()
415 cache->level, level); in cache_lookup_or_instantiate()
417 if (!cache) in cache_lookup_or_instantiate()
418 cache = cache_do_one_devnode(node, group_id, level); in cache_lookup_or_instantiate()
420 return cache; in cache_lookup_or_instantiate()
423 static void link_cache_lists(struct cache *smaller, struct cache *bigger) in link_cache_lists()
425 while (smaller->next_local) { in link_cache_lists()
426 if (smaller->next_local == bigger) in link_cache_lists()
428 smaller = smaller->next_local; in link_cache_lists()
431 smaller->next_local = bigger; in link_cache_lists()
434 * The cache->next_local list sorts by level ascending: in link_cache_lists()
435 * L1d -> L1i -> L2 -> L3 ... in link_cache_lists()
437 WARN_ONCE((smaller->level == 1 && bigger->level > 2) || in link_cache_lists()
438 (smaller->level > 1 && bigger->level != smaller->level + 1), in link_cache_lists()
439 "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n", in link_cache_lists()
440 smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); in link_cache_lists()
443 static void do_subsidiary_caches_debugcheck(struct cache *cache) in do_subsidiary_caches_debugcheck() argument
445 WARN_ONCE(cache->level != 1, in do_subsidiary_caches_debugcheck()
446 "instantiating cache chain from L%d %s cache for " in do_subsidiary_caches_debugcheck()
447 "%pOFP instead of an L1\n", cache->level, in do_subsidiary_caches_debugcheck()
448 cache_type_string(cache), cache->ofnode); in do_subsidiary_caches_debugcheck()
449 WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), in do_subsidiary_caches_debugcheck()
450 "instantiating cache chain from node %pOFP of type '%s' " in do_subsidiary_caches_debugcheck()
451 "instead of a cpu node\n", cache->ofnode, in do_subsidiary_caches_debugcheck()
452 of_node_get_device_type(cache->ofnode)); in do_subsidiary_caches_debugcheck()
456 * If sub-groups of threads in a core containing @cpu_id share the
457 * L@level-cache (information obtained via "ibm,thread-groups"
458 * device-tree property), then we identify the group by the first
459 * thread-sibling in the group. We define this to be the group-id.
461 * In the absence of any thread-group information for L@level-cache,
462 * this function returns -1.
475 return -1; in get_group_id()
478 static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id) in do_subsidiary_caches() argument
481 int level = cache->level; in do_subsidiary_caches()
483 do_subsidiary_caches_debugcheck(cache); in do_subsidiary_caches()
485 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { in do_subsidiary_caches()
486 struct cache *subcache; in do_subsidiary_caches()
496 link_cache_lists(cache, subcache); in do_subsidiary_caches()
497 cache = subcache; in do_subsidiary_caches()
501 static struct cache *cache_chain_instantiate(unsigned int cpu_id) in cache_chain_instantiate()
504 struct cache *cpu_cache = NULL; in cache_chain_instantiate()
507 pr_debug("creating cache object(s) for CPU %i\n", cpu_id); in cache_chain_instantiate()
510 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); in cache_chain_instantiate()
536 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id); in cacheinfo_create_cache_dir()
540 kobj = kobject_create_and_add("cache", &dev->kobj); in cacheinfo_create_cache_dir()
548 cache_dir->kobj = kobj; in cacheinfo_create_cache_dir()
566 pr_debug("freeing index directory for L%d %s cache\n", in cache_index_release()
567 index->cache->level, cache_type_string(index->cache)); in cache_index_release()
578 return kobj_attr->show(k, kobj_attr, buf); in cache_index_show()
581 static struct cache *index_kobj_to_cache(struct kobject *k) in index_kobj_to_cache()
587 return index->cache; in index_kobj_to_cache()
593 struct cache *cache; in size_show() local
595 cache = index_kobj_to_cache(k); in size_show()
597 if (cache_size_kb(cache, &size_kb)) in size_show()
598 return -ENODEV; in size_show()
604 __ATTR(size, 0444, size_show, NULL);
610 struct cache *cache; in line_size_show() local
612 cache = index_kobj_to_cache(k); in line_size_show()
614 if (cache_get_line_size(cache, &line_size)) in line_size_show()
615 return -ENODEV; in line_size_show()
626 struct cache *cache; in nr_sets_show() local
628 cache = index_kobj_to_cache(k); in nr_sets_show()
630 if (cache_nr_sets(cache, &nr_sets)) in nr_sets_show()
631 return -ENODEV; in nr_sets_show()
642 struct cache *cache; in associativity_show() local
644 cache = index_kobj_to_cache(k); in associativity_show()
646 if (cache_associativity(cache, &associativity)) in associativity_show()
647 return -ENODEV; in associativity_show()
657 struct cache *cache; in type_show() local
659 cache = index_kobj_to_cache(k); in type_show()
661 return sprintf(buf, "%s\n", cache_type_string(cache)); in type_show()
670 struct cache *cache; in level_show() local
673 cache = index->cache; in level_show()
675 return sprintf(buf, "%d\n", cache->level); in level_show()
685 struct cache *cache; in show_shared_cpumap() local
689 cache = index->cache; in show_shared_cpumap()
691 mask = &cache->shared_cpu_map; in show_shared_cpumap()
712 /* Attributes which should always be created -- the kobject/sysfs core
713 * does this automatically via kobj_type->default_attrs. This is the
714 * minimum data required to uniquely identify a cache.
724 /* Attributes which should be created if the cache device node has the
725 * right properties -- see cacheinfo_create_index_opt_attrs
747 struct cache *cache; in cacheinfo_create_index_opt_attrs() local
749 int i; in cacheinfo_create_index_opt_attrs() local
755 cache = dir->cache; in cacheinfo_create_index_opt_attrs()
756 cache_type = cache_type_string(cache); in cacheinfo_create_index_opt_attrs()
760 * attribute's ->show method before registering the in cacheinfo_create_index_opt_attrs()
763 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) { in cacheinfo_create_index_opt_attrs()
767 attr = cache_index_opt_attrs[i]; in cacheinfo_create_index_opt_attrs()
769 rc = attr->show(&dir->kobj, attr, buf); in cacheinfo_create_index_opt_attrs()
773 attr->attr.name, cache->ofnode, in cacheinfo_create_index_opt_attrs()
777 if (sysfs_create_file(&dir->kobj, &attr->attr)) in cacheinfo_create_index_opt_attrs()
779 attr->attr.name, cache->ofnode, cache_type); in cacheinfo_create_index_opt_attrs()
785 static void cacheinfo_create_index_dir(struct cache *cache, int index, in cacheinfo_create_index_dir() argument
795 index_dir->cache = cache; in cacheinfo_create_index_dir()
797 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, in cacheinfo_create_index_dir()
798 cache_dir->kobj, "index%d", index); in cacheinfo_create_index_dir()
800 kobject_put(&index_dir->kobj); in cacheinfo_create_index_dir()
804 index_dir->next = cache_dir->index; in cacheinfo_create_index_dir()
805 cache_dir->index = index_dir; in cacheinfo_create_index_dir()
811 struct cache *cache_list) in cacheinfo_sysfs_populate()
814 struct cache *cache; in cacheinfo_sysfs_populate() local
821 cache = cache_list; in cacheinfo_sysfs_populate()
822 while (cache) { in cacheinfo_sysfs_populate()
823 cacheinfo_create_index_dir(cache, index, cache_dir); in cacheinfo_sysfs_populate()
825 cache = cache->next_local; in cacheinfo_sysfs_populate()
831 struct cache *cache; in cacheinfo_cpu_online() local
833 cache = cache_chain_instantiate(cpu_id); in cacheinfo_cpu_online()
834 if (!cache) in cacheinfo_cpu_online()
837 cacheinfo_sysfs_populate(cpu_id, cache); in cacheinfo_cpu_online()
840 /* functions needed to remove cache entry for cpu offline or suspend/resume */
845 static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) in cache_lookup_by_cpu()
848 struct cache *cache; in cache_lookup_by_cpu() local
852 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id); in cache_lookup_by_cpu()
857 cache = cache_lookup_by_node_group(cpu_node, group_id); in cache_lookup_by_cpu()
860 return cache; in cache_lookup_by_cpu()
867 index = cache_dir->index; in remove_index_dirs()
872 next = index->next; in remove_index_dirs()
873 kobject_put(&index->kobj); in remove_index_dirs()
882 /* Remove cache dir from sysfs */ in remove_cache_dir()
883 kobject_del(cache_dir->kobj); in remove_cache_dir()
885 kobject_put(cache_dir->kobj); in remove_cache_dir()
890 static void cache_cpu_clear(struct cache *cache, int cpu) in cache_cpu_clear() argument
892 while (cache) { in cache_cpu_clear()
893 struct cache *next = cache->next_local; in cache_cpu_clear()
895 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), in cache_cpu_clear()
896 "CPU %i not accounted in %pOFP(%s)\n", in cache_cpu_clear()
897 cpu, cache->ofnode, in cache_cpu_clear()
898 cache_type_string(cache)); in cache_cpu_clear()
900 cpumask_clear_cpu(cpu, &cache->shared_cpu_map); in cache_cpu_clear()
902 /* Release the cache object if all the cpus using it in cache_cpu_clear()
904 if (cpumask_empty(&cache->shared_cpu_map)) in cache_cpu_clear()
905 release_cache(cache); in cache_cpu_clear()
907 cache = next; in cache_cpu_clear()
914 struct cache *cache; in cacheinfo_cpu_offline() local
916 /* Prevent userspace from seeing inconsistent state - remove in cacheinfo_cpu_offline()
926 /* clear the CPU's bit in its cache chain, possibly freeing in cacheinfo_cpu_offline()
927 * cache objects */ in cacheinfo_cpu_offline()
928 cache = cache_lookup_by_cpu(cpu_id); in cacheinfo_cpu_offline()
929 if (cache) in cacheinfo_cpu_offline()
930 cache_cpu_clear(cache, cpu_id); in cacheinfo_cpu_offline()