hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/powerpc/kernel/cacheinfo.c
....@@ -1,14 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Processor cache information made available to userspace via sysfs;
34 * intended to be compatible with x86 intel_cacheinfo implementation.
45 *
56 * Copyright 2008 IBM Corporation
67 * Author: Nathan Lynch
7
- *
8
- * This program is free software; you can redistribute it and/or
9
- * modify it under the terms of the GNU General Public License version
10
- * 2 as published by the Free Software Foundation.
118 */
9
+
10
+#define pr_fmt(fmt) "cacheinfo: " fmt
1211
1312 #include <linux/cpu.h>
1413 #include <linux/cpumask.h>
....@@ -20,6 +19,8 @@
2019 #include <linux/percpu.h>
2120 #include <linux/slab.h>
2221 #include <asm/prom.h>
22
+#include <asm/cputhreads.h>
23
+#include <asm/smp.h>
2324
2425 #include "cacheinfo.h"
2526
....@@ -167,7 +168,7 @@
167168
168169 list_for_each_entry(iter, &cache_list, list)
169170 WARN_ONCE(iter->next_local == cache,
170
- "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
171
+ "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
171172 iter->ofnode,
172173 cache_type_string(iter),
173174 cache->ofnode,
....@@ -179,7 +180,7 @@
179180 if (!cache)
180181 return;
181182
182
- pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
183
+ pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
183184 cache_type_string(cache), cache->ofnode);
184185
185186 release_cache_debugcheck(cache);
....@@ -194,7 +195,7 @@
194195
195196 while (next) {
196197 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
197
- "CPU %i already accounted in %pOF(%s)\n",
198
+ "CPU %i already accounted in %pOFP(%s)\n",
198199 cpu, next->ofnode,
199200 cache_type_string(next));
200201 cpumask_set_cpu(cpu, &next->shared_cpu_map);
....@@ -351,11 +352,9 @@
351352 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
352353 }
353354
354
-/*
355
- */
356355 static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
357356 {
358
- pr_debug("creating L%d ucache for %pOF\n", level, node);
357
+ pr_debug("creating L%d ucache for %pOFP\n", level, node);
359358
360359 return new_cache(cache_is_unified_d(node), level, node);
361360 }
....@@ -365,7 +364,7 @@
365364 {
366365 struct cache *dcache, *icache;
367366
368
- pr_debug("creating L%d dcache and icache for %pOF\n", level,
367
+ pr_debug("creating L%d dcache and icache for %pOFP\n", level,
369368 node);
370369
371370 dcache = new_cache(CACHE_TYPE_DATA, level, node);
....@@ -421,12 +420,27 @@
421420 }
422421
423422 smaller->next_local = bigger;
423
+
424
+ /*
425
+ * The cache->next_local list sorts by level ascending:
426
+ * L1d -> L1i -> L2 -> L3 ...
427
+ */
428
+ WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
429
+ (smaller->level > 1 && bigger->level != smaller->level + 1),
430
+ "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
431
+ smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
424432 }
425433
426434 static void do_subsidiary_caches_debugcheck(struct cache *cache)
427435 {
428
- WARN_ON_ONCE(cache->level != 1);
429
- WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
436
+ WARN_ONCE(cache->level != 1,
437
+ "instantiating cache chain from L%d %s cache for "
438
+ "%pOFP instead of an L1\n", cache->level,
439
+ cache_type_string(cache), cache->ofnode);
440
+ WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
441
+ "instantiating cache chain from node %pOFP of type '%s' "
442
+ "instead of a cpu node\n", cache->ofnode,
443
+ of_node_get_device_type(cache->ofnode));
430444 }
431445
432446 static void do_subsidiary_caches(struct cache *cache)
....@@ -627,24 +641,65 @@
627641 static struct kobj_attribute cache_level_attr =
628642 __ATTR(level, 0444, level_show, NULL);
629643
630
-static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
644
+static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
645
+{
646
+ struct kobject *index_dir_kobj = &index->kobj;
647
+ struct kobject *cache_dir_kobj = index_dir_kobj->parent;
648
+ struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
649
+ struct device *dev = kobj_to_dev(cpu_dev_kobj);
650
+
651
+ return dev->id;
652
+}
653
+
654
+/*
655
+ * On big-core systems, each core has two groups of CPUs each of which
656
+ * has its own L1-cache. The thread-siblings which share l1-cache with
657
+ * @cpu can be obtained via cpu_smallcore_mask().
658
+ */
659
+static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
660
+{
661
+ if (cache->level == 1)
662
+ return cpu_smallcore_mask(cpu);
663
+
664
+ return &cache->shared_cpu_map;
665
+}
666
+
667
+static ssize_t
668
+show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
631669 {
632670 struct cache_index_dir *index;
633671 struct cache *cache;
634
- int ret;
672
+ const struct cpumask *mask;
673
+ int cpu;
635674
636675 index = kobj_to_cache_index_dir(k);
637676 cache = index->cache;
638677
639
- ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
640
- cpumask_pr_args(&cache->shared_cpu_map));
641
- buf[ret++] = '\n';
642
- buf[ret] = '\0';
643
- return ret;
678
+ if (has_big_cores) {
679
+ cpu = index_dir_to_cpu(index);
680
+ mask = get_big_core_shared_cpu_map(cpu, cache);
681
+ } else {
682
+ mask = &cache->shared_cpu_map;
683
+ }
684
+
685
+ return cpumap_print_to_pagebuf(list, buf, mask);
686
+}
687
+
688
+static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
689
+{
690
+ return show_shared_cpumap(k, attr, buf, false);
691
+}
692
+
693
+static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
694
+{
695
+ return show_shared_cpumap(k, attr, buf, true);
644696 }
645697
646698 static struct kobj_attribute cache_shared_cpu_map_attr =
647699 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
700
+
701
+static struct kobj_attribute cache_shared_cpu_list_attr =
702
+ __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
648703
649704 /* Attributes which should always be created -- the kobject/sysfs core
650705 * does this automatically via kobj_type->default_attrs. This is the
....@@ -654,6 +709,7 @@
654709 &cache_type_attr.attr,
655710 &cache_level_attr.attr,
656711 &cache_shared_cpu_map_attr.attr,
712
+ &cache_shared_cpu_list_attr.attr,
657713 NULL,
658714 };
659715
....@@ -705,13 +761,13 @@
705761 rc = attr->show(&dir->kobj, attr, buf);
706762 if (rc <= 0) {
707763 pr_debug("not creating %s attribute for "
708
- "%pOF(%s) (rc = %zd)\n",
764
+ "%pOFP(%s) (rc = %zd)\n",
709765 attr->attr.name, cache->ofnode,
710766 cache_type, rc);
711767 continue;
712768 }
713769 if (sysfs_create_file(&dir->kobj, &attr->attr))
714
- pr_debug("could not create %s attribute for %pOF(%s)\n",
770
+ pr_debug("could not create %s attribute for %pOFP(%s)\n",
715771 attr->attr.name, cache->ofnode, cache_type);
716772 }
717773
....@@ -726,23 +782,21 @@
726782
727783 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
728784 if (!index_dir)
729
- goto err;
785
+ return;
730786
731787 index_dir->cache = cache;
732788
733789 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
734790 cache_dir->kobj, "index%d", index);
735
- if (rc)
736
- goto err;
791
+ if (rc) {
792
+ kobject_put(&index_dir->kobj);
793
+ return;
794
+ }
737795
738796 index_dir->next = cache_dir->index;
739797 cache_dir->index = index_dir;
740798
741799 cacheinfo_create_index_opt_attrs(index_dir);
742
-
743
- return;
744
-err:
745
- kfree(index_dir);
746800 }
747801
748802 static void cacheinfo_sysfs_populate(unsigned int cpu_id,
....@@ -829,7 +883,7 @@
829883 struct cache *next = cache->next_local;
830884
831885 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
832
- "CPU %i not accounted in %pOF(%s)\n",
886
+ "CPU %i not accounted in %pOFP(%s)\n",
833887 cpu, cache->ofnode,
834888 cache_type_string(cache));
835889