.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2017 SiFive |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or |
---|
5 | | - * modify it under the terms of the GNU General Public License |
---|
6 | | - * as published by the Free Software Foundation, version 2. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | 4 | */ |
---|
13 | 5 | |
---|
14 | | -#include <linux/cacheinfo.h> |
---|
15 | 6 | #include <linux/cpu.h> |
---|
16 | 7 | #include <linux/of.h> |
---|
17 | 8 | #include <linux/of_device.h> |
---|
| 9 | +#include <asm/cacheinfo.h> |
---|
18 | 10 | |
---|
19 | | -static void ci_leaf_init(struct cacheinfo *this_leaf, |
---|
20 | | - struct device_node *node, |
---|
21 | | - enum cache_type type, unsigned int level) |
---|
| 11 | +static struct riscv_cacheinfo_ops *rv_cache_ops; |
---|
| 12 | + |
---|
| 13 | +void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops) |
---|
| 14 | +{ |
---|
| 15 | + rv_cache_ops = ops; |
---|
| 16 | +} |
---|
| 17 | +EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops); |
---|
| 18 | + |
---|
| 19 | +const struct attribute_group * |
---|
| 20 | +cache_get_priv_group(struct cacheinfo *this_leaf) |
---|
| 21 | +{ |
---|
| 22 | + if (rv_cache_ops && rv_cache_ops->get_priv_group) |
---|
| 23 | + return rv_cache_ops->get_priv_group(this_leaf); |
---|
| 24 | + return NULL; |
---|
| 25 | +} |
---|
| 26 | + |
---|
| 27 | +static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type) |
---|
| 28 | +{ |
---|
| 29 | + /* |
---|
| 30 | + * Using raw_smp_processor_id() elides a preemptability check, but this |
---|
| 31 | + * is really indicative of a larger problem: the cacheinfo UABI assumes |
---|
| 32 | + * that cores have a homonogenous view of the cache hierarchy. That |
---|
| 33 | + * happens to be the case for the current set of RISC-V systems, but |
---|
| 34 | + * likely won't be true in general. Since there's no way to provide |
---|
| 35 | + * correct information for these systems via the current UABI we're |
---|
| 36 | + * just eliding the check for now. |
---|
| 37 | + */ |
---|
| 38 | + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id()); |
---|
| 39 | + struct cacheinfo *this_leaf; |
---|
| 40 | + int index; |
---|
| 41 | + |
---|
| 42 | + for (index = 0; index < this_cpu_ci->num_leaves; index++) { |
---|
| 43 | + this_leaf = this_cpu_ci->info_list + index; |
---|
| 44 | + if (this_leaf->level == level && this_leaf->type == type) |
---|
| 45 | + return this_leaf; |
---|
| 46 | + } |
---|
| 47 | + |
---|
| 48 | + return NULL; |
---|
| 49 | +} |
---|
| 50 | + |
---|
| 51 | +uintptr_t get_cache_size(u32 level, enum cache_type type) |
---|
| 52 | +{ |
---|
| 53 | + struct cacheinfo *this_leaf = get_cacheinfo(level, type); |
---|
| 54 | + |
---|
| 55 | + return this_leaf ? this_leaf->size : 0; |
---|
| 56 | +} |
---|
| 57 | + |
---|
| 58 | +uintptr_t get_cache_geometry(u32 level, enum cache_type type) |
---|
| 59 | +{ |
---|
| 60 | + struct cacheinfo *this_leaf = get_cacheinfo(level, type); |
---|
| 61 | + |
---|
| 62 | + return this_leaf ? (this_leaf->ways_of_associativity << 16 | |
---|
| 63 | + this_leaf->coherency_line_size) : |
---|
| 64 | + 0; |
---|
| 65 | +} |
---|
| 66 | + |
---|
| 67 | +static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type, |
---|
| 68 | + unsigned int level, unsigned int size, |
---|
| 69 | + unsigned int sets, unsigned int line_size) |
---|
22 | 70 | { |
---|
23 | 71 | this_leaf->level = level; |
---|
24 | 72 | this_leaf->type = type; |
---|
25 | | - /* not a sector cache */ |
---|
26 | | - this_leaf->physical_line_partition = 1; |
---|
27 | | - /* TODO: Add to DTS */ |
---|
28 | | - this_leaf->attributes = |
---|
29 | | - CACHE_WRITE_BACK |
---|
30 | | - | CACHE_READ_ALLOCATE |
---|
31 | | - | CACHE_WRITE_ALLOCATE; |
---|
| 73 | + this_leaf->size = size; |
---|
| 74 | + this_leaf->number_of_sets = sets; |
---|
| 75 | + this_leaf->coherency_line_size = line_size; |
---|
| 76 | + |
---|
| 77 | + /* |
---|
| 78 | + * If the cache is fully associative, there is no need to |
---|
| 79 | + * check the other properties. |
---|
| 80 | + */ |
---|
| 81 | + if (sets == 1) |
---|
| 82 | + return; |
---|
| 83 | + |
---|
| 84 | + /* |
---|
| 85 | + * Set the ways number for n-ways associative, make sure |
---|
| 86 | + * all properties are big than zero. |
---|
| 87 | + */ |
---|
| 88 | + if (sets > 0 && size > 0 && line_size > 0) |
---|
| 89 | + this_leaf->ways_of_associativity = (size / sets) / line_size; |
---|
| 90 | +} |
---|
| 91 | + |
---|
| 92 | +static void fill_cacheinfo(struct cacheinfo **this_leaf, |
---|
| 93 | + struct device_node *node, unsigned int level) |
---|
| 94 | +{ |
---|
| 95 | + unsigned int size, sets, line_size; |
---|
| 96 | + |
---|
| 97 | + if (!of_property_read_u32(node, "cache-size", &size) && |
---|
| 98 | + !of_property_read_u32(node, "cache-block-size", &line_size) && |
---|
| 99 | + !of_property_read_u32(node, "cache-sets", &sets)) { |
---|
| 100 | + ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size); |
---|
| 101 | + } |
---|
| 102 | + |
---|
| 103 | + if (!of_property_read_u32(node, "i-cache-size", &size) && |
---|
| 104 | + !of_property_read_u32(node, "i-cache-sets", &sets) && |
---|
| 105 | + !of_property_read_u32(node, "i-cache-block-size", &line_size)) { |
---|
| 106 | + ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size); |
---|
| 107 | + } |
---|
| 108 | + |
---|
| 109 | + if (!of_property_read_u32(node, "d-cache-size", &size) && |
---|
| 110 | + !of_property_read_u32(node, "d-cache-sets", &sets) && |
---|
| 111 | + !of_property_read_u32(node, "d-cache-block-size", &line_size)) { |
---|
| 112 | + ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size); |
---|
| 113 | + } |
---|
32 | 114 | } |
---|
33 | 115 | |
---|
34 | 116 | int init_cache_level(unsigned int cpu) |
---|
35 | 117 | { |
---|
36 | 118 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
---|
37 | 119 | struct device_node *np = of_cpu_device_node_get(cpu); |
---|
| 120 | + struct device_node *prev = NULL; |
---|
38 | 121 | int levels = 0, leaves = 0, level; |
---|
39 | 122 | |
---|
40 | 123 | if (of_property_read_bool(np, "cache-size")) |
---|
.. | .. |
---|
46 | 129 | if (leaves > 0) |
---|
47 | 130 | levels = 1; |
---|
48 | 131 | |
---|
| 132 | + prev = np; |
---|
49 | 133 | while ((np = of_find_next_cache_node(np))) { |
---|
| 134 | + of_node_put(prev); |
---|
| 135 | + prev = np; |
---|
50 | 136 | if (!of_device_is_compatible(np, "cache")) |
---|
51 | 137 | break; |
---|
52 | 138 | if (of_property_read_u32(np, "cache-level", &level)) |
---|
.. | .. |
---|
62 | 148 | levels = level; |
---|
63 | 149 | } |
---|
64 | 150 | |
---|
| 151 | + of_node_put(np); |
---|
65 | 152 | this_cpu_ci->num_levels = levels; |
---|
66 | 153 | this_cpu_ci->num_leaves = leaves; |
---|
| 154 | + |
---|
67 | 155 | return 0; |
---|
68 | 156 | } |
---|
69 | 157 | |
---|
.. | .. |
---|
72 | 160 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); |
---|
73 | 161 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
---|
74 | 162 | struct device_node *np = of_cpu_device_node_get(cpu); |
---|
| 163 | + struct device_node *prev = NULL; |
---|
75 | 164 | int levels = 1, level = 1; |
---|
76 | 165 | |
---|
77 | | - if (of_property_read_bool(np, "cache-size")) |
---|
78 | | - ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level); |
---|
79 | | - if (of_property_read_bool(np, "i-cache-size")) |
---|
80 | | - ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level); |
---|
81 | | - if (of_property_read_bool(np, "d-cache-size")) |
---|
82 | | - ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); |
---|
| 166 | + /* Level 1 caches in cpu node */ |
---|
| 167 | + fill_cacheinfo(&this_leaf, np, level); |
---|
83 | 168 | |
---|
| 169 | + /* Next level caches in cache nodes */ |
---|
| 170 | + prev = np; |
---|
84 | 171 | while ((np = of_find_next_cache_node(np))) { |
---|
| 172 | + of_node_put(prev); |
---|
| 173 | + prev = np; |
---|
| 174 | + |
---|
85 | 175 | if (!of_device_is_compatible(np, "cache")) |
---|
86 | 176 | break; |
---|
87 | 177 | if (of_property_read_u32(np, "cache-level", &level)) |
---|
88 | 178 | break; |
---|
89 | 179 | if (level <= levels) |
---|
90 | 180 | break; |
---|
91 | | - if (of_property_read_bool(np, "cache-size")) |
---|
92 | | - ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level); |
---|
93 | | - if (of_property_read_bool(np, "i-cache-size")) |
---|
94 | | - ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level); |
---|
95 | | - if (of_property_read_bool(np, "d-cache-size")) |
---|
96 | | - ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level); |
---|
| 181 | + |
---|
| 182 | + fill_cacheinfo(&this_leaf, np, level); |
---|
| 183 | + |
---|
97 | 184 | levels = level; |
---|
98 | 185 | } |
---|
| 186 | + of_node_put(np); |
---|
99 | 187 | |
---|
100 | 188 | return 0; |
---|
101 | 189 | } |
---|