hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/linux/topology.h
....@@ -27,6 +27,7 @@
2727 #ifndef _LINUX_TOPOLOGY_H
2828 #define _LINUX_TOPOLOGY_H
2929
30
+#include <linux/arch_topology.h>
3031 #include <linux/cpumask.h>
3132 #include <linux/bitops.h>
3233 #include <linux/mmzone.h>
....@@ -47,6 +48,7 @@
4748 /* Conform to ACPI 2.0 SLIT distance definitions */
4849 #define LOCAL_DISTANCE 10
4950 #define REMOTE_DISTANCE 20
51
+#define DISTANCE_BITS 8
5052 #ifndef node_distance
5153 #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
5254 #endif
....@@ -59,6 +61,20 @@
5961 */
6062 #define RECLAIM_DISTANCE 30
6163 #endif
64
+
65
+/*
66
+ * The following tunable allows platforms to override the default node
67
+ * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are
68
+ * sufficiently fast that the default value actually hurts
69
+ * performance.
70
+ *
71
+ * AMD EPYC machines use this because even though the 2-hop distance
72
+ * is 32 (3.2x slower than a local memory access) performance actually
73
+ * *improves* if allowed to reclaim memory and load balance tasks
74
+ * between NUMA nodes 2-hops apart.
75
+ */
76
+extern int __read_mostly node_reclaim_distance;
77
+
6278 #ifndef PENALTY_FOR_NODE_WITH_CPUS
6379 #define PENALTY_FOR_NODE_WITH_CPUS (1)
6480 #endif
....@@ -115,20 +131,11 @@
115131 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
116132 */
117133 DECLARE_PER_CPU(int, _numa_mem_);
118
-extern int _node_numa_mem_[MAX_NUMNODES];
119134
120135 #ifndef set_numa_mem
121136 static inline void set_numa_mem(int node)
122137 {
123138 this_cpu_write(_numa_mem_, node);
124
- _node_numa_mem_[numa_node_id()] = node;
125
-}
126
-#endif
127
-
128
-#ifndef node_to_mem_node
129
-static inline int node_to_mem_node(int node)
130
-{
131
- return _node_numa_mem_[node];
132139 }
133140 #endif
134141
....@@ -151,7 +158,6 @@
151158 static inline void set_cpu_numa_mem(int cpu, int node)
152159 {
153160 per_cpu(_numa_mem_, cpu) = node;
154
- _node_numa_mem_[cpu_to_node(cpu)] = node;
155161 }
156162 #endif
157163
....@@ -162,13 +168,6 @@
162168 static inline int numa_mem_id(void)
163169 {
164170 return numa_node_id();
165
-}
166
-#endif
167
-
168
-#ifndef node_to_mem_node
169
-static inline int node_to_mem_node(int node)
170
-{
171
- return node;
172171 }
173172 #endif
174173
....@@ -184,6 +183,9 @@
184183 #ifndef topology_physical_package_id
185184 #define topology_physical_package_id(cpu) ((void)(cpu), -1)
186185 #endif
186
+#ifndef topology_die_id
187
+#define topology_die_id(cpu) ((void)(cpu), -1)
188
+#endif
187189 #ifndef topology_core_id
188190 #define topology_core_id(cpu) ((void)(cpu), 0)
189191 #endif
....@@ -193,8 +195,11 @@
193195 #ifndef topology_core_cpumask
194196 #define topology_core_cpumask(cpu) cpumask_of(cpu)
195197 #endif
198
+#ifndef topology_die_cpumask
199
+#define topology_die_cpumask(cpu) cpumask_of(cpu)
200
+#endif
196201
197
-#ifdef CONFIG_SCHED_SMT
202
+#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
198203 static inline const struct cpumask *cpu_smt_mask(int cpu)
199204 {
200205 return topology_sibling_cpumask(cpu);