| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Processor cache information made available to userspace via sysfs; |
|---|
| 3 | 4 | * intended to be compatible with x86 intel_cacheinfo implementation. |
|---|
| 4 | 5 | * |
|---|
| 5 | 6 | * Copyright 2008 IBM Corporation |
|---|
| 6 | 7 | * Author: Nathan Lynch |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is free software; you can redistribute it and/or |
|---|
| 9 | | - * modify it under the terms of the GNU General Public License version |
|---|
| 10 | | - * 2 as published by the Free Software Foundation. |
|---|
| 11 | 8 | */ |
|---|
| 9 | + |
|---|
| 10 | +#define pr_fmt(fmt) "cacheinfo: " fmt |
|---|
| 12 | 11 | |
|---|
| 13 | 12 | #include <linux/cpu.h> |
|---|
| 14 | 13 | #include <linux/cpumask.h> |
|---|
| .. | .. |
|---|
| 20 | 19 | #include <linux/percpu.h> |
|---|
| 21 | 20 | #include <linux/slab.h> |
|---|
| 22 | 21 | #include <asm/prom.h> |
|---|
| 22 | +#include <asm/cputhreads.h> |
|---|
| 23 | +#include <asm/smp.h> |
|---|
| 23 | 24 | |
|---|
| 24 | 25 | #include "cacheinfo.h" |
|---|
| 25 | 26 | |
|---|
| .. | .. |
|---|
| 167 | 168 | |
|---|
| 168 | 169 | list_for_each_entry(iter, &cache_list, list) |
|---|
| 169 | 170 | WARN_ONCE(iter->next_local == cache, |
|---|
| 170 | | - "cache for %pOF(%s) refers to cache for %pOF(%s)\n", |
|---|
| 171 | + "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n", |
|---|
| 171 | 172 | iter->ofnode, |
|---|
| 172 | 173 | cache_type_string(iter), |
|---|
| 173 | 174 | cache->ofnode, |
|---|
| .. | .. |
|---|
| 179 | 180 | if (!cache) |
|---|
| 180 | 181 | return; |
|---|
| 181 | 182 | |
|---|
| 182 | | - pr_debug("freeing L%d %s cache for %pOF\n", cache->level, |
|---|
| 183 | + pr_debug("freeing L%d %s cache for %pOFP\n", cache->level, |
|---|
| 183 | 184 | cache_type_string(cache), cache->ofnode); |
|---|
| 184 | 185 | |
|---|
| 185 | 186 | release_cache_debugcheck(cache); |
|---|
| .. | .. |
|---|
| 194 | 195 | |
|---|
| 195 | 196 | while (next) { |
|---|
| 196 | 197 | WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map), |
|---|
| 197 | | - "CPU %i already accounted in %pOF(%s)\n", |
|---|
| 198 | + "CPU %i already accounted in %pOFP(%s)\n", |
|---|
| 198 | 199 | cpu, next->ofnode, |
|---|
| 199 | 200 | cache_type_string(next)); |
|---|
| 200 | 201 | cpumask_set_cpu(cpu, &next->shared_cpu_map); |
|---|
| .. | .. |
|---|
| 351 | 352 | CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED; |
|---|
| 352 | 353 | } |
|---|
| 353 | 354 | |
|---|
| 354 | | -/* |
|---|
| 355 | | - */ |
|---|
| 356 | 355 | static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) |
|---|
| 357 | 356 | { |
|---|
| 358 | | - pr_debug("creating L%d ucache for %pOF\n", level, node); |
|---|
| 357 | + pr_debug("creating L%d ucache for %pOFP\n", level, node); |
|---|
| 359 | 358 | |
|---|
| 360 | 359 | return new_cache(cache_is_unified_d(node), level, node); |
|---|
| 361 | 360 | } |
|---|
| .. | .. |
|---|
| 365 | 364 | { |
|---|
| 366 | 365 | struct cache *dcache, *icache; |
|---|
| 367 | 366 | |
|---|
| 368 | | - pr_debug("creating L%d dcache and icache for %pOF\n", level, |
|---|
| 367 | + pr_debug("creating L%d dcache and icache for %pOFP\n", level, |
|---|
| 369 | 368 | node); |
|---|
| 370 | 369 | |
|---|
| 371 | 370 | dcache = new_cache(CACHE_TYPE_DATA, level, node); |
|---|
| .. | .. |
|---|
| 421 | 420 | } |
|---|
| 422 | 421 | |
|---|
| 423 | 422 | smaller->next_local = bigger; |
|---|
| 423 | + |
|---|
| 424 | + /* |
|---|
| 425 | + * The cache->next_local list sorts by level ascending: |
|---|
| 426 | + * L1d -> L1i -> L2 -> L3 ... |
|---|
| 427 | + */ |
|---|
| 428 | + WARN_ONCE((smaller->level == 1 && bigger->level > 2) || |
|---|
| 429 | + (smaller->level > 1 && bigger->level != smaller->level + 1), |
|---|
| 430 | + "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n", |
|---|
| 431 | + smaller->level, smaller->ofnode, bigger->level, bigger->ofnode); |
|---|
| 424 | 432 | } |
|---|
| 425 | 433 | |
|---|
| 426 | 434 | static void do_subsidiary_caches_debugcheck(struct cache *cache) |
|---|
| 427 | 435 | { |
|---|
| 428 | | - WARN_ON_ONCE(cache->level != 1); |
|---|
| 429 | | - WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu")); |
|---|
| 436 | + WARN_ONCE(cache->level != 1, |
|---|
| 437 | + "instantiating cache chain from L%d %s cache for " |
|---|
| 438 | + "%pOFP instead of an L1\n", cache->level, |
|---|
| 439 | + cache_type_string(cache), cache->ofnode); |
|---|
| 440 | + WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"), |
|---|
| 441 | + "instantiating cache chain from node %pOFP of type '%s' " |
|---|
| 442 | + "instead of a cpu node\n", cache->ofnode, |
|---|
| 443 | + of_node_get_device_type(cache->ofnode)); |
|---|
| 430 | 444 | } |
|---|
| 431 | 445 | |
|---|
| 432 | 446 | static void do_subsidiary_caches(struct cache *cache) |
|---|
| .. | .. |
|---|
| 627 | 641 | static struct kobj_attribute cache_level_attr = |
|---|
| 628 | 642 | __ATTR(level, 0444, level_show, NULL); |
|---|
| 629 | 643 | |
|---|
| 630 | | -static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
|---|
| 644 | +static unsigned int index_dir_to_cpu(struct cache_index_dir *index) |
|---|
| 645 | +{ |
|---|
| 646 | + struct kobject *index_dir_kobj = &index->kobj; |
|---|
| 647 | + struct kobject *cache_dir_kobj = index_dir_kobj->parent; |
|---|
| 648 | + struct kobject *cpu_dev_kobj = cache_dir_kobj->parent; |
|---|
| 649 | + struct device *dev = kobj_to_dev(cpu_dev_kobj); |
|---|
| 650 | + |
|---|
| 651 | + return dev->id; |
|---|
| 652 | +} |
|---|
| 653 | + |
|---|
| 654 | +/* |
|---|
| 655 | + * On big-core systems, each core has two groups of CPUs each of which |
|---|
| 656 | + * has its own L1-cache. The thread-siblings which share l1-cache with |
|---|
| 657 | + * @cpu can be obtained via cpu_smallcore_mask(). |
|---|
| 658 | + */ |
|---|
| 659 | +static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache) |
|---|
| 660 | +{ |
|---|
| 661 | + if (cache->level == 1) |
|---|
| 662 | + return cpu_smallcore_mask(cpu); |
|---|
| 663 | + |
|---|
| 664 | + return &cache->shared_cpu_map; |
|---|
| 665 | +} |
|---|
| 666 | + |
|---|
| 667 | +static ssize_t |
|---|
| 668 | +show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list) |
|---|
| 631 | 669 | { |
|---|
| 632 | 670 | struct cache_index_dir *index; |
|---|
| 633 | 671 | struct cache *cache; |
|---|
| 634 | | - int ret; |
|---|
| 672 | + const struct cpumask *mask; |
|---|
| 673 | + int cpu; |
|---|
| 635 | 674 | |
|---|
| 636 | 675 | index = kobj_to_cache_index_dir(k); |
|---|
| 637 | 676 | cache = index->cache; |
|---|
| 638 | 677 | |
|---|
| 639 | | - ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n", |
|---|
| 640 | | - cpumask_pr_args(&cache->shared_cpu_map)); |
|---|
| 641 | | - buf[ret++] = '\n'; |
|---|
| 642 | | - buf[ret] = '\0'; |
|---|
| 643 | | - return ret; |
|---|
| 678 | + if (has_big_cores) { |
|---|
| 679 | + cpu = index_dir_to_cpu(index); |
|---|
| 680 | + mask = get_big_core_shared_cpu_map(cpu, cache); |
|---|
| 681 | + } else { |
|---|
| 682 | + mask = &cache->shared_cpu_map; |
|---|
| 683 | + } |
|---|
| 684 | + |
|---|
| 685 | + return cpumap_print_to_pagebuf(list, buf, mask); |
|---|
| 686 | +} |
|---|
| 687 | + |
|---|
| 688 | +static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
|---|
| 689 | +{ |
|---|
| 690 | + return show_shared_cpumap(k, attr, buf, false); |
|---|
| 691 | +} |
|---|
| 692 | + |
|---|
| 693 | +static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf) |
|---|
| 694 | +{ |
|---|
| 695 | + return show_shared_cpumap(k, attr, buf, true); |
|---|
| 644 | 696 | } |
|---|
| 645 | 697 | |
|---|
| 646 | 698 | static struct kobj_attribute cache_shared_cpu_map_attr = |
|---|
| 647 | 699 | __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL); |
|---|
| 700 | + |
|---|
| 701 | +static struct kobj_attribute cache_shared_cpu_list_attr = |
|---|
| 702 | + __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL); |
|---|
| 648 | 703 | |
|---|
| 649 | 704 | /* Attributes which should always be created -- the kobject/sysfs core |
|---|
| 650 | 705 | * does this automatically via kobj_type->default_attrs. This is the |
|---|
| .. | .. |
|---|
| 654 | 709 | &cache_type_attr.attr, |
|---|
| 655 | 710 | &cache_level_attr.attr, |
|---|
| 656 | 711 | &cache_shared_cpu_map_attr.attr, |
|---|
| 712 | + &cache_shared_cpu_list_attr.attr, |
|---|
| 657 | 713 | NULL, |
|---|
| 658 | 714 | }; |
|---|
| 659 | 715 | |
|---|
| .. | .. |
|---|
| 705 | 761 | rc = attr->show(&dir->kobj, attr, buf); |
|---|
| 706 | 762 | if (rc <= 0) { |
|---|
| 707 | 763 | pr_debug("not creating %s attribute for " |
|---|
| 708 | | - "%pOF(%s) (rc = %zd)\n", |
|---|
| 764 | + "%pOFP(%s) (rc = %zd)\n", |
|---|
| 709 | 765 | attr->attr.name, cache->ofnode, |
|---|
| 710 | 766 | cache_type, rc); |
|---|
| 711 | 767 | continue; |
|---|
| 712 | 768 | } |
|---|
| 713 | 769 | if (sysfs_create_file(&dir->kobj, &attr->attr)) |
|---|
| 714 | | - pr_debug("could not create %s attribute for %pOF(%s)\n", |
|---|
| 770 | + pr_debug("could not create %s attribute for %pOFP(%s)\n", |
|---|
| 715 | 771 | attr->attr.name, cache->ofnode, cache_type); |
|---|
| 716 | 772 | } |
|---|
| 717 | 773 | |
|---|
| .. | .. |
|---|
| 726 | 782 | |
|---|
| 727 | 783 | index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL); |
|---|
| 728 | 784 | if (!index_dir) |
|---|
| 729 | | - goto err; |
|---|
| 785 | + return; |
|---|
| 730 | 786 | |
|---|
| 731 | 787 | index_dir->cache = cache; |
|---|
| 732 | 788 | |
|---|
| 733 | 789 | rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type, |
|---|
| 734 | 790 | cache_dir->kobj, "index%d", index); |
|---|
| 735 | | - if (rc) |
|---|
| 736 | | - goto err; |
|---|
| 791 | + if (rc) { |
|---|
| 792 | + kobject_put(&index_dir->kobj); |
|---|
| 793 | + return; |
|---|
| 794 | + } |
|---|
| 737 | 795 | |
|---|
| 738 | 796 | index_dir->next = cache_dir->index; |
|---|
| 739 | 797 | cache_dir->index = index_dir; |
|---|
| 740 | 798 | |
|---|
| 741 | 799 | cacheinfo_create_index_opt_attrs(index_dir); |
|---|
| 742 | | - |
|---|
| 743 | | - return; |
|---|
| 744 | | -err: |
|---|
| 745 | | - kfree(index_dir); |
|---|
| 746 | 800 | } |
|---|
| 747 | 801 | |
|---|
| 748 | 802 | static void cacheinfo_sysfs_populate(unsigned int cpu_id, |
|---|
| .. | .. |
|---|
| 829 | 883 | struct cache *next = cache->next_local; |
|---|
| 830 | 884 | |
|---|
| 831 | 885 | WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), |
|---|
| 832 | | - "CPU %i not accounted in %pOF(%s)\n", |
|---|
| 886 | + "CPU %i not accounted in %pOFP(%s)\n", |
|---|
| 833 | 887 | cpu, cache->ofnode, |
|---|
| 834 | 888 | cache_type_string(cache)); |
|---|
| 835 | 889 | |
|---|