.. | .. |
---|
7 | 7 | int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); |
---|
8 | 8 | int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); |
---|
9 | 9 | |
---|
| 10 | +#ifdef CONFIG_SCHED_THERMAL_PRESSURE |
---|
| 11 | +int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); |
---|
| 12 | + |
---|
| 13 | +static inline u64 thermal_load_avg(struct rq *rq) |
---|
| 14 | +{ |
---|
| 15 | + return READ_ONCE(rq->avg_thermal.load_avg); |
---|
| 16 | +} |
---|
| 17 | +#else |
---|
| 18 | +static inline int |
---|
| 19 | +update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) |
---|
| 20 | +{ |
---|
| 21 | + return 0; |
---|
| 22 | +} |
---|
| 23 | + |
---|
| 24 | +static inline u64 thermal_load_avg(struct rq *rq) |
---|
| 25 | +{ |
---|
| 26 | + return 0; |
---|
| 27 | +} |
---|
| 28 | +#endif |
---|
| 29 | + |
---|
10 | 30 | #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
---|
11 | 31 | int update_irq_load_avg(struct rq *rq, u64 running); |
---|
12 | 32 | #else |
---|
.. | .. |
---|
17 | 37 | } |
---|
18 | 38 | #endif |
---|
19 | 39 | |
---|
20 | | -/* |
---|
21 | | - * When a task is dequeued, its estimated utilization should not be update if |
---|
22 | | - * its util_avg has not been updated at least once. |
---|
23 | | - * This flag is used to synchronize util_avg updates with util_est updates. |
---|
24 | | - * We map this information into the LSB bit of the utilization saved at |
---|
25 | | - * dequeue time (i.e. util_est.dequeued). |
---|
26 | | - */ |
---|
27 | | -#define UTIL_AVG_UNCHANGED 0x1 |
---|
| 40 | +#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024) |
---|
| 41 | + |
---|
| 42 | +static inline u32 get_pelt_divider(struct sched_avg *avg) |
---|
| 43 | +{ |
---|
| 44 | + return PELT_MIN_DIVIDER + avg->period_contrib; |
---|
| 45 | +} |
---|
28 | 46 | |
---|
29 | 47 | static inline void cfs_se_util_change(struct sched_avg *avg) |
---|
30 | 48 | { |
---|
.. | .. |
---|
33 | 51 | if (!sched_feat(UTIL_EST)) |
---|
34 | 52 | return; |
---|
35 | 53 | |
---|
36 | | - /* Avoid store if the flag has been already set */ |
---|
| 54 | + /* Avoid store if the flag has been already reset */ |
---|
37 | 55 | enqueued = avg->util_est.enqueued; |
---|
38 | 56 | if (!(enqueued & UTIL_AVG_UNCHANGED)) |
---|
39 | 57 | return; |
---|
.. | .. |
---|
42 | 60 | enqueued &= ~UTIL_AVG_UNCHANGED; |
---|
43 | 61 | WRITE_ONCE(avg->util_est.enqueued, enqueued); |
---|
44 | 62 | } |
---|
| 63 | + |
---|
| 64 | +extern unsigned int sched_pelt_lshift; |
---|
45 | 65 | |
---|
46 | 66 | /* |
---|
47 | 67 | * The clock_pelt scales the time to reflect the effective amount of |
---|
.. | .. |
---|
57 | 77 | */ |
---|
58 | 78 | static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) |
---|
59 | 79 | { |
---|
| 80 | + delta <<= READ_ONCE(sched_pelt_lshift); |
---|
| 81 | + |
---|
| 82 | + per_cpu(clock_task_mult, rq->cpu) += delta; |
---|
| 83 | + |
---|
60 | 84 | if (unlikely(is_idle_task(rq->curr))) { |
---|
61 | 85 | /* The rq is idle, we can sync to clock_task */ |
---|
62 | | - rq->clock_pelt = rq_clock_task(rq); |
---|
| 86 | + rq->clock_pelt = rq_clock_task_mult(rq); |
---|
63 | 87 | return; |
---|
64 | 88 | } |
---|
65 | 89 | |
---|
.. | .. |
---|
79 | 103 | * Scale the elapsed time to reflect the real amount of |
---|
80 | 104 | * computation |
---|
81 | 105 | */ |
---|
82 | | - delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq))); |
---|
| 106 | + delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); |
---|
83 | 107 | delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); |
---|
84 | 108 | |
---|
85 | 109 | rq->clock_pelt += delta; |
---|
.. | .. |
---|
111 | 135 | * rq's clock_task. |
---|
112 | 136 | */ |
---|
113 | 137 | if (util_sum >= divider) |
---|
114 | | - rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt; |
---|
| 138 | + rq->lost_idle_time += rq_clock_task_mult(rq) - |
---|
| 139 | + rq->clock_pelt; |
---|
115 | 140 | } |
---|
116 | 141 | |
---|
117 | 142 | static inline u64 rq_clock_pelt(struct rq *rq) |
---|
.. | .. |
---|
127 | 152 | static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) |
---|
128 | 153 | { |
---|
129 | 154 | if (unlikely(cfs_rq->throttle_count)) |
---|
130 | | - return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time; |
---|
| 155 | + return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time; |
---|
131 | 156 | |
---|
132 | | - return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time; |
---|
| 157 | + return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time; |
---|
133 | 158 | } |
---|
134 | 159 | #else |
---|
135 | 160 | static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) |
---|
.. | .. |
---|
159 | 184 | } |
---|
160 | 185 | |
---|
161 | 186 | static inline int |
---|
| 187 | +update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) |
---|
| 188 | +{ |
---|
| 189 | + return 0; |
---|
| 190 | +} |
---|
| 191 | + |
---|
| 192 | +static inline u64 thermal_load_avg(struct rq *rq) |
---|
| 193 | +{ |
---|
| 194 | + return 0; |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | +static inline int |
---|
162 | 198 | update_irq_load_avg(struct rq *rq, u64 running) |
---|
163 | 199 | { |
---|
164 | 200 | return 0; |
---|