hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/include/linux/sched/topology.h
....@@ -4,6 +4,7 @@
44
55 #include <linux/topology.h>
66 #include <linux/android_kabi.h>
7
+#include <linux/android_vendor.h>
78
89 #include <linux/sched/idle.h>
910
....@@ -12,21 +13,29 @@
1213 */
1314 #ifdef CONFIG_SMP
1415
15
-#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
16
-#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
17
-#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
18
-#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
19
-#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
20
-#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
21
-#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */
22
-#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */
23
-#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
24
-#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */
25
-#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
26
-#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
27
-#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
28
-#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
29
-#define SD_NUMA 0x4000 /* cross-node balancing */
16
+/* Generate SD flag indexes */
17
+#define SD_FLAG(name, mflags) __##name,
18
+enum {
19
+ #include <linux/sched/sd_flags.h>
20
+ __SD_FLAG_CNT,
21
+};
22
+#undef SD_FLAG
23
+/* Generate SD flag bits */
24
+#define SD_FLAG(name, mflags) name = 1 << __##name,
25
+enum {
26
+ #include <linux/sched/sd_flags.h>
27
+};
28
+#undef SD_FLAG
29
+
30
+#ifdef CONFIG_SCHED_DEBUG
31
+
32
+struct sd_flag_debug {
33
+ unsigned int meta_flags;
34
+ char *name;
35
+};
36
+extern const struct sd_flag_debug sd_flag_debug[];
37
+
38
+#endif
3039
3140 #ifdef CONFIG_SCHED_SMT
3241 static inline int cpu_smt_flags(void)
....@@ -68,25 +77,19 @@
6877 atomic_t nr_busy_cpus;
6978 int has_idle_cores;
7079
71
- bool overutilized;
80
+ ANDROID_VENDOR_DATA(1);
7281 };
7382
7483 struct sched_domain {
7584 /* These fields must be setup */
76
- struct sched_domain *parent; /* top domain must be null terminated */
77
- struct sched_domain *child; /* bottom domain must be null terminated */
85
+ struct sched_domain __rcu *parent; /* top domain must be null terminated */
86
+ struct sched_domain __rcu *child; /* bottom domain must be null terminated */
7887 struct sched_group *groups; /* the balancing groups of the domain */
7988 unsigned long min_interval; /* Minimum balance interval ms */
8089 unsigned long max_interval; /* Maximum balance interval ms */
8190 unsigned int busy_factor; /* less balancing by factor if busy */
8291 unsigned int imbalance_pct; /* No balance until over watermark */
8392 unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
84
- unsigned int busy_idx;
85
- unsigned int idle_idx;
86
- unsigned int newidle_idx;
87
- unsigned int wake_idx;
88
- unsigned int forkexec_idx;
89
- unsigned int smt_gain;
9093
9194 int nohz_idle; /* NOHZ IDLE status */
9295 int flags; /* See SD_* */
....@@ -155,13 +158,17 @@
155158 * by attaching extra space to the end of the structure,
156159 * depending on how many CPUs the kernel has booted up with)
157160 */
158
- unsigned long span[0];
161
+ unsigned long span[];
159162 };
160163
161164 static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
162165 {
163166 return to_cpumask(sd->span);
164167 }
168
+
169
+extern void partition_sched_domains_locked(int ndoms_new,
170
+ cpumask_var_t doms_new[],
171
+ struct sched_domain_attr *dattr_new);
165172
166173 extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
167174 struct sched_domain_attr *dattr_new);
....@@ -203,20 +210,15 @@
203210 # define SD_INIT_NAME(type)
204211 #endif
205212
206
-#ifndef arch_scale_cpu_capacity
207
-static __always_inline
208
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
209
-{
210
- if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
211
- return sd->smt_gain / sd->span_weight;
212
-
213
- return SCHED_CAPACITY_SCALE;
214
-}
215
-#endif
216
-
217213 #else /* CONFIG_SMP */
218214
219215 struct sched_domain_attr;
216
+
217
+static inline void
218
+partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
219
+ struct sched_domain_attr *dattr_new)
220
+{
221
+}
220222
221223 static inline void
222224 partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
....@@ -229,15 +231,40 @@
229231 return true;
230232 }
231233
234
+#endif /* !CONFIG_SMP */
235
+
232236 #ifndef arch_scale_cpu_capacity
237
+/**
238
+ * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
239
+ * @cpu: the CPU in question.
240
+ *
241
+ * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
242
+ *
243
+ * max_perf(cpu)
244
+ * ----------------------------- * SCHED_CAPACITY_SCALE
245
+ * max(max_perf(c) : c \in CPUs)
246
+ */
233247 static __always_inline
234
-unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
248
+unsigned long arch_scale_cpu_capacity(int cpu)
235249 {
236250 return SCHED_CAPACITY_SCALE;
237251 }
238252 #endif
239253
240
-#endif /* !CONFIG_SMP */
254
+#ifndef arch_scale_thermal_pressure
255
+static __always_inline
256
+unsigned long arch_scale_thermal_pressure(int cpu)
257
+{
258
+ return 0;
259
+}
260
+#endif
261
+
262
+#ifndef arch_set_thermal_pressure
263
+static __always_inline
264
+void arch_set_thermal_pressure(const struct cpumask *cpus,
265
+ unsigned long th_pressure)
266
+{ }
267
+#endif
241268
242269 static inline int task_node(const struct task_struct *p)
243270 {