hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/kernel/sched/pelt.h
....@@ -7,6 +7,26 @@
77 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
88 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
99
10
+#ifdef CONFIG_SCHED_THERMAL_PRESSURE
11
+int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
12
+
13
+static inline u64 thermal_load_avg(struct rq *rq)
14
+{
15
+ return READ_ONCE(rq->avg_thermal.load_avg);
16
+}
17
+#else
18
+static inline int
19
+update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
20
+{
21
+ return 0;
22
+}
23
+
24
+static inline u64 thermal_load_avg(struct rq *rq)
25
+{
26
+ return 0;
27
+}
28
+#endif
29
+
1030 #ifdef CONFIG_HAVE_SCHED_AVG_IRQ
1131 int update_irq_load_avg(struct rq *rq, u64 running);
1232 #else
....@@ -17,14 +37,12 @@
1737 }
1838 #endif
1939
20
-/*
21
- * When a task is dequeued, its estimated utilization should not be update if
22
- * its util_avg has not been updated at least once.
23
- * This flag is used to synchronize util_avg updates with util_est updates.
24
- * We map this information into the LSB bit of the utilization saved at
25
- * dequeue time (i.e. util_est.dequeued).
26
- */
27
-#define UTIL_AVG_UNCHANGED 0x1
40
+#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
41
+
42
+static inline u32 get_pelt_divider(struct sched_avg *avg)
43
+{
44
+ return PELT_MIN_DIVIDER + avg->period_contrib;
45
+}
2846
2947 static inline void cfs_se_util_change(struct sched_avg *avg)
3048 {
....@@ -33,7 +51,7 @@
3351 if (!sched_feat(UTIL_EST))
3452 return;
3553
36
- /* Avoid store if the flag has been already set */
54
+ /* Avoid store if the flag has been already reset */
3755 enqueued = avg->util_est.enqueued;
3856 if (!(enqueued & UTIL_AVG_UNCHANGED))
3957 return;
....@@ -42,6 +60,8 @@
4260 enqueued &= ~UTIL_AVG_UNCHANGED;
4361 WRITE_ONCE(avg->util_est.enqueued, enqueued);
4462 }
63
+
64
+extern unsigned int sched_pelt_lshift;
4565
4666 /*
4767 * The clock_pelt scales the time to reflect the effective amount of
....@@ -57,9 +77,13 @@
5777 */
5878 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
5979 {
80
+ delta <<= READ_ONCE(sched_pelt_lshift);
81
+
82
+ per_cpu(clock_task_mult, rq->cpu) += delta;
83
+
6084 if (unlikely(is_idle_task(rq->curr))) {
6185 /* The rq is idle, we can sync to clock_task */
62
- rq->clock_pelt = rq_clock_task(rq);
86
+ rq->clock_pelt = rq_clock_task_mult(rq);
6387 return;
6488 }
6589
....@@ -79,7 +103,7 @@
79103 * Scale the elapsed time to reflect the real amount of
80104 * computation
81105 */
82
- delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
106
+ delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
83107 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
84108
85109 rq->clock_pelt += delta;
....@@ -111,7 +135,8 @@
111135 * rq's clock_task.
112136 */
113137 if (util_sum >= divider)
114
- rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
138
+ rq->lost_idle_time += rq_clock_task_mult(rq) -
139
+ rq->clock_pelt;
115140 }
116141
117142 static inline u64 rq_clock_pelt(struct rq *rq)
....@@ -127,9 +152,9 @@
127152 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
128153 {
129154 if (unlikely(cfs_rq->throttle_count))
130
- return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
155
+ return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
131156
132
- return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
157
+ return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
133158 }
134159 #else
135160 static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
....@@ -159,6 +184,17 @@
159184 }
160185
161186 static inline int
187
+update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
188
+{
189
+ return 0;
190
+}
191
+
192
+static inline u64 thermal_load_avg(struct rq *rq)
193
+{
194
+ return 0;
195
+}
196
+
197
+static inline int
162198 update_irq_load_avg(struct rq *rq, u64 running)
163199 {
164200 return 0;