hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/sched/fair.c
....@@ -3938,14 +3938,16 @@
39383938 }
39393939
39403940 #ifdef CONFIG_UCLAMP_TASK
3941
-static inline unsigned long uclamp_task_util(struct task_struct *p)
3941
+static inline unsigned long uclamp_task_util(struct task_struct *p,
3942
+ unsigned long uclamp_min,
3943
+ unsigned long uclamp_max)
39423944 {
3943
- return clamp(task_util_est(p),
3944
- uclamp_eff_value(p, UCLAMP_MIN),
3945
- uclamp_eff_value(p, UCLAMP_MAX));
3945
+ return clamp(task_util_est(p), uclamp_min, uclamp_max);
39463946 }
39473947 #else
3948
-static inline unsigned long uclamp_task_util(struct task_struct *p)
3948
+static inline unsigned long uclamp_task_util(struct task_struct *p,
3949
+ unsigned long uclamp_min,
3950
+ unsigned long uclamp_max)
39493951 {
39503952 return task_util_est(p);
39513953 }
....@@ -4089,9 +4091,135 @@
40894091 trace_sched_util_est_se_tp(&p->se);
40904092 }
40914093
4092
-static inline int task_fits_capacity(struct task_struct *p, long capacity)
4094
+static inline int util_fits_cpu(unsigned long util,
4095
+ unsigned long uclamp_min,
4096
+ unsigned long uclamp_max,
4097
+ int cpu)
40934098 {
4094
- return fits_capacity(uclamp_task_util(p), capacity);
4099
+ unsigned long capacity_orig, capacity_orig_thermal;
4100
+ unsigned long capacity = capacity_of(cpu);
4101
+ bool fits, uclamp_max_fits;
4102
+
4103
+ /*
4104
+ * Check if the real util fits without any uclamp boost/cap applied.
4105
+ */
4106
+ fits = fits_capacity(util, capacity);
4107
+
4108
+ if (!uclamp_is_used())
4109
+ return fits;
4110
+
4111
+ /*
4112
+ * We must use capacity_orig_of() for comparing against uclamp_min and
4113
+ * uclamp_max. We only care about capacity pressure (by using
4114
+ * capacity_of()) for comparing against the real util.
4115
+ *
4116
+ * If a task is boosted to 1024 for example, we don't want a tiny
4117
+ * pressure to skew the check whether it fits a CPU or not.
4118
+ *
4119
+ * Similarly if a task is capped to capacity_orig_of(little_cpu), it
4120
+ * should fit a little cpu even if there's some pressure.
4121
+ *
4122
+ * Only exception is for thermal pressure since it has a direct impact
4123
+ * on available OPP of the system.
4124
+ *
4125
+ * We honour it for uclamp_min only as a drop in performance level
4126
+ * could result in not getting the requested minimum performance level.
4127
+ *
4128
+ * For uclamp_max, we can tolerate a drop in performance level as the
4129
+ * goal is to cap the task. So it's okay if it's getting less.
4130
+ *
4131
+ * In case of capacity inversion, which is not handled yet, we should
4132
+ * honour the inverted capacity for both uclamp_min and uclamp_max all
4133
+ * the time.
4134
+ */
4135
+ capacity_orig = capacity_orig_of(cpu);
4136
+ capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
4137
+
4138
+ /*
4139
+ * We want to force a task to fit a cpu as implied by uclamp_max.
4140
+ * But we do have some corner cases to cater for..
4141
+ *
4142
+ *
4143
+ * C=z
4144
+ * | ___
4145
+ * | C=y | |
4146
+ * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4147
+ * | C=x | | | |
4148
+ * | ___ | | | |
4149
+ * | | | | | | | (util somewhere in this region)
4150
+ * | | | | | | |
4151
+ * | | | | | | |
4152
+ * +----------------------------------------
4153
+ * cpu0 cpu1 cpu2
4154
+ *
4155
+ * In the above example if a task is capped to a specific performance
4156
+ * point, y, then when:
4157
+ *
4158
+ * * util = 80% of x then it does not fit on cpu0 and should migrate
4159
+ * to cpu1
4160
+ * * util = 80% of y then it is forced to fit on cpu1 to honour
4161
+ * uclamp_max request.
4162
+ *
4163
+ * which is what we're enforcing here. A task always fits if
4164
+ * uclamp_max <= capacity_orig. But when uclamp_max > capacity_orig,
4165
+ * the normal upmigration rules should withhold still.
4166
+ *
4167
+ * Only exception is when we are on max capacity, then we need to be
4168
+ * careful not to block overutilized state. This is so because:
4169
+ *
4170
+ * 1. There's no concept of capping at max_capacity! We can't go
4171
+ * beyond this performance level anyway.
4172
+ * 2. The system is being saturated when we're operating near
4173
+ * max capacity, it doesn't make sense to block overutilized.
4174
+ */
4175
+ uclamp_max_fits = (capacity_orig == SCHED_CAPACITY_SCALE) && (uclamp_max == SCHED_CAPACITY_SCALE);
4176
+ uclamp_max_fits = !uclamp_max_fits && (uclamp_max <= capacity_orig);
4177
+ fits = fits || uclamp_max_fits;
4178
+
4179
+ /*
4180
+ *
4181
+ * C=z
4182
+ * | ___ (region a, capped, util >= uclamp_max)
4183
+ * | C=y | |
4184
+ * |_ _ _ _ _ _ _ _ _ ___ _ _ _ | _ | _ _ _ _ _ uclamp_max
4185
+ * | C=x | | | |
4186
+ * | ___ | | | | (region b, uclamp_min <= util <= uclamp_max)
4187
+ * |_ _ _|_ _|_ _ _ _| _ | _ _ _| _ | _ _ _ _ _ uclamp_min
4188
+ * | | | | | | |
4189
+ * | | | | | | | (region c, boosted, util < uclamp_min)
4190
+ * +----------------------------------------
4191
+ * cpu0 cpu1 cpu2
4192
+ *
4193
+ * a) If util > uclamp_max, then we're capped, we don't care about
4194
+ * actual fitness value here. We only care if uclamp_max fits
4195
+ * capacity without taking margin/pressure into account.
4196
+ * See comment above.
4197
+ *
4198
+ * b) If uclamp_min <= util <= uclamp_max, then the normal
4199
+ * fits_capacity() rules apply. Except we need to ensure that we
4200
+ * enforce we remain within uclamp_max, see comment above.
4201
+ *
4202
+ * c) If util < uclamp_min, then we are boosted. Same as (b) but we
4203
+ * need to take into account the boosted value fits the CPU without
4204
+ * taking margin/pressure into account.
4205
+ *
4206
+ * Cases (a) and (b) are handled in the 'fits' variable already. We
4207
+ * just need to consider an extra check for case (c) after ensuring we
4208
+ * handle the case uclamp_min > uclamp_max.
4209
+ */
4210
+ uclamp_min = min(uclamp_min, uclamp_max);
4211
+ if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE)
4212
+ fits = fits && (uclamp_min <= capacity_orig_thermal);
4213
+
4214
+ return fits;
4215
+}
4216
+
4217
+static inline int task_fits_cpu(struct task_struct *p, int cpu)
4218
+{
4219
+ unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
4220
+ unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
4221
+ unsigned long util = task_util_est(p);
4222
+ return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
40954223 }
40964224
40974225 static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
....@@ -4107,7 +4235,7 @@
41074235 return;
41084236 }
41094237
4110
- if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
4238
+ if (task_fits_cpu(p, cpu_of(rq))) {
41114239 rq->misfit_task_load = 0;
41124240 return;
41134241 }
....@@ -4168,6 +4296,29 @@
41684296 #endif
41694297 }
41704298
4299
+static inline bool entity_is_long_sleeper(struct sched_entity *se)
4300
+{
4301
+ struct cfs_rq *cfs_rq;
4302
+ u64 sleep_time;
4303
+
4304
+ if (se->exec_start == 0)
4305
+ return false;
4306
+
4307
+ cfs_rq = cfs_rq_of(se);
4308
+
4309
+ sleep_time = rq_clock_task(rq_of(cfs_rq));
4310
+
4311
+ /* Happen while migrating because of clock task divergence */
4312
+ if (sleep_time <= se->exec_start)
4313
+ return false;
4314
+
4315
+ sleep_time -= se->exec_start;
4316
+ if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
4317
+ return true;
4318
+
4319
+ return false;
4320
+}
4321
+
41714322 static void
41724323 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
41734324 {
....@@ -4196,8 +4347,29 @@
41964347 vruntime -= thresh;
41974348 }
41984349
4199
- /* ensure we never gain time by being placed backwards. */
4200
- se->vruntime = max_vruntime(se->vruntime, vruntime);
4350
+ /*
4351
+ * Pull vruntime of the entity being placed to the base level of
4352
+ * cfs_rq, to prevent boosting it if placed backwards.
4353
+ * However, min_vruntime can advance much faster than real time, with
4354
+ * the extreme being when an entity with the minimal weight always runs
4355
+ * on the cfs_rq. If the waking entity slept for a long time, its
4356
+ * vruntime difference from min_vruntime may overflow s64 and their
4357
+ * comparison may get inversed, so ignore the entity's original
4358
+ * vruntime in that case.
4359
+ * The maximal vruntime speedup is given by the ratio of normal to
4360
+ * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
4361
+ * When placing a migrated waking entity, its exec_start has been set
4362
+ * from a different rq. In order to take into account a possible
4363
+ * divergence between new and prev rq's clocks task because of irq and
4364
+ * stolen time, we take an additional margin.
4365
+ * So, cutting off on the sleep time of
4366
+ * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
4367
+ * should be safe.
4368
+ */
4369
+ if (entity_is_long_sleeper(se))
4370
+ se->vruntime = vruntime;
4371
+ else
4372
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
42014373 trace_android_rvh_place_entity(cfs_rq, se, initial, vruntime);
42024374 }
42034375
....@@ -4294,6 +4466,9 @@
42944466
42954467 if (flags & ENQUEUE_WAKEUP)
42964468 place_entity(cfs_rq, se, 0);
4469
+ /* Entity has migrated, no longer consider this task hot */
4470
+ if (flags & ENQUEUE_MIGRATED)
4471
+ se->exec_start = 0;
42974472
42984473 check_schedstat_required();
42994474 update_stats_enqueue(cfs_rq, se, flags);
....@@ -4431,7 +4606,7 @@
44314606 if (skip_preempt)
44324607 return;
44334608 if (delta_exec > ideal_runtime) {
4434
- resched_curr_lazy(rq_of(cfs_rq));
4609
+ resched_curr(rq_of(cfs_rq));
44354610 /*
44364611 * The current task ran long enough, ensure it doesn't get
44374612 * re-elected due to buddy favours.
....@@ -4455,7 +4630,7 @@
44554630 return;
44564631
44574632 if (delta > ideal_runtime)
4458
- resched_curr_lazy(rq_of(cfs_rq));
4633
+ resched_curr(rq_of(cfs_rq));
44594634 }
44604635
44614636 void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
....@@ -4604,7 +4779,7 @@
46044779 * validating it and just reschedule.
46054780 */
46064781 if (queued) {
4607
- resched_curr_lazy(rq_of(cfs_rq));
4782
+ resched_curr(rq_of(cfs_rq));
46084783 return;
46094784 }
46104785 /*
....@@ -4741,7 +4916,7 @@
47414916 * hierarchy can be throttled
47424917 */
47434918 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4744
- resched_curr_lazy(rq_of(cfs_rq));
4919
+ resched_curr(rq_of(cfs_rq));
47454920 }
47464921
47474922 static __always_inline
....@@ -5476,7 +5651,7 @@
54765651
54775652 if (delta < 0) {
54785653 if (rq->curr == p)
5479
- resched_curr_lazy(rq);
5654
+ resched_curr(rq);
54805655 return;
54815656 }
54825657 hrtick_start(rq, delta);
....@@ -5514,13 +5689,15 @@
55145689
55155690 static inline bool cpu_overutilized(int cpu)
55165691 {
5692
+ unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
5693
+ unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
55175694 int overutilized = -1;
55185695
55195696 trace_android_rvh_cpu_overutilized(cpu, &overutilized);
55205697 if (overutilized != -1)
55215698 return overutilized;
55225699
5523
- return !fits_capacity(cpu_util(cpu), capacity_of(cpu));
5700
+ return !util_fits_cpu(cpu_util(cpu), rq_util_min, rq_util_max, cpu);
55245701 }
55255702
55265703 static inline void update_overutilized_status(struct rq *rq)
....@@ -6262,21 +6439,23 @@
62626439 static int
62636440 select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
62646441 {
6265
- unsigned long task_util, best_cap = 0;
6442
+ unsigned long task_util, util_min, util_max, best_cap = 0;
62666443 int cpu, best_cpu = -1;
62676444 struct cpumask *cpus;
62686445
62696446 cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
62706447 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
62716448
6272
- task_util = uclamp_task_util(p);
6449
+ task_util = task_util_est(p);
6450
+ util_min = uclamp_eff_value(p, UCLAMP_MIN);
6451
+ util_max = uclamp_eff_value(p, UCLAMP_MAX);
62736452
62746453 for_each_cpu_wrap(cpu, cpus, target) {
62756454 unsigned long cpu_cap = capacity_of(cpu);
62766455
62776456 if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
62786457 continue;
6279
- if (fits_capacity(task_util, cpu_cap))
6458
+ if (util_fits_cpu(task_util, util_min, util_max, cpu))
62806459 return cpu;
62816460
62826461 if (cpu_cap > best_cap) {
....@@ -6288,10 +6467,13 @@
62886467 return best_cpu;
62896468 }
62906469
6291
-static inline bool asym_fits_capacity(int task_util, int cpu)
6470
+static inline bool asym_fits_cpu(unsigned long util,
6471
+ unsigned long util_min,
6472
+ unsigned long util_max,
6473
+ int cpu)
62926474 {
62936475 if (static_branch_unlikely(&sched_asym_cpucapacity))
6294
- return fits_capacity(task_util, capacity_of(cpu));
6476
+ return util_fits_cpu(util, util_min, util_max, cpu);
62956477
62966478 return true;
62976479 }
....@@ -6302,7 +6484,7 @@
63026484 static int select_idle_sibling(struct task_struct *p, int prev, int target)
63036485 {
63046486 struct sched_domain *sd;
6305
- unsigned long task_util;
6487
+ unsigned long task_util, util_min, util_max;
63066488 int i, recent_used_cpu;
63076489
63086490 /*
....@@ -6311,11 +6493,13 @@
63116493 */
63126494 if (static_branch_unlikely(&sched_asym_cpucapacity)) {
63136495 sync_entity_load_avg(&p->se);
6314
- task_util = uclamp_task_util(p);
6496
+ task_util = task_util_est(p);
6497
+ util_min = uclamp_eff_value(p, UCLAMP_MIN);
6498
+ util_max = uclamp_eff_value(p, UCLAMP_MAX);
63156499 }
63166500
63176501 if ((available_idle_cpu(target) || sched_idle_cpu(target)) &&
6318
- asym_fits_capacity(task_util, target))
6502
+ asym_fits_cpu(task_util, util_min, util_max, target))
63196503 return target;
63206504
63216505 /*
....@@ -6323,7 +6507,7 @@
63236507 */
63246508 if (prev != target && cpus_share_cache(prev, target) &&
63256509 (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
6326
- asym_fits_capacity(task_util, prev))
6510
+ asym_fits_cpu(task_util, util_min, util_max, prev))
63276511 return prev;
63286512
63296513 /*
....@@ -6338,7 +6522,7 @@
63386522 in_task() &&
63396523 prev == smp_processor_id() &&
63406524 this_rq()->nr_running <= 1 &&
6341
- asym_fits_capacity(task_util, prev)) {
6525
+ asym_fits_cpu(task_util, util_min, util_max, prev)) {
63426526 return prev;
63436527 }
63446528
....@@ -6349,7 +6533,7 @@
63496533 cpus_share_cache(recent_used_cpu, target) &&
63506534 (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
63516535 cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) &&
6352
- asym_fits_capacity(task_util, recent_used_cpu)) {
6536
+ asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
63536537 /*
63546538 * Replace recent_used_cpu with prev as it is a potential
63556539 * candidate for the next wake:
....@@ -6682,6 +6866,8 @@
66826866 {
66836867 unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
66846868 unsigned long best_delta2 = ULONG_MAX;
6869
+ unsigned long p_util_min = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MIN) : 0;
6870
+ unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
66856871 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
66866872 int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
66876873 unsigned long max_spare_cap_ls = 0, target_cap;
....@@ -6707,7 +6893,7 @@
67076893 cpu = smp_processor_id();
67086894 if (sync && cpu_rq(cpu)->nr_running == 1 &&
67096895 cpumask_test_cpu(cpu, p->cpus_ptr) &&
6710
- task_fits_capacity(p, capacity_of(cpu))) {
6896
+ task_fits_cpu(p, cpu)) {
67116897 rcu_read_unlock();
67126898 return cpu;
67136899 }
....@@ -6722,7 +6908,7 @@
67226908 if (!sd)
67236909 goto fail;
67246910
6725
- if (!task_util_est(p))
6911
+ if (!uclamp_task_util(p, p_util_min, p_util_max))
67266912 goto unlock;
67276913
67286914 latency_sensitive = uclamp_latency_sensitive(p);
....@@ -6731,6 +6917,8 @@
67316917
67326918 for (; pd; pd = pd->next) {
67336919 unsigned long cur_delta, spare_cap, max_spare_cap = 0;
6920
+ unsigned long rq_util_min, rq_util_max;
6921
+ unsigned long util_min, util_max;
67346922 unsigned long base_energy_pd;
67356923 int max_spare_cap_cpu = -1;
67366924
....@@ -6754,8 +6942,26 @@
67546942 * much capacity we can get out of the CPU; this is
67556943 * aligned with schedutil_cpu_util().
67566944 */
6757
- util = uclamp_rq_util_with(cpu_rq(cpu), util, p);
6758
- if (!fits_capacity(util, cpu_cap))
6945
+ if (uclamp_is_used()) {
6946
+ if (uclamp_rq_is_idle(cpu_rq(cpu))) {
6947
+ util_min = p_util_min;
6948
+ util_max = p_util_max;
6949
+ } else {
6950
+ /*
6951
+ * Open code uclamp_rq_util_with() except for
6952
+ * the clamp() part. Ie: apply max aggregation
6953
+ * only. util_fits_cpu() logic requires to
6954
+ * operate on non clamped util but must use the
6955
+ * max-aggregated uclamp_{min, max}.
6956
+ */
6957
+ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
6958
+ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
6959
+
6960
+ util_min = max(rq_util_min, p_util_min);
6961
+ util_max = max(rq_util_max, p_util_max);
6962
+ }
6963
+ }
6964
+ if (!util_fits_cpu(util, util_min, util_max, cpu))
67596965 continue;
67606966
67616967 /* Always use prev_cpu as a candidate. */
....@@ -7035,9 +7241,6 @@
70357241 /* Tell new CPU we are migrated */
70367242 p->se.avg.last_update_time = 0;
70377243
7038
- /* We have migrated, no longer consider this task hot */
7039
- p->se.exec_start = 0;
7040
-
70417244 update_scan_period(p, new_cpu);
70427245 }
70437246
....@@ -7211,7 +7414,7 @@
72117414 return;
72127415
72137416 preempt:
7214
- resched_curr_lazy(rq);
7417
+ resched_curr(rq);
72157418 /*
72167419 * Only set the backward buddy when the current task is still
72177420 * on the rq. This can happen when a wakeup gets interleaved
....@@ -7983,7 +8186,7 @@
79838186
79848187 case migrate_misfit:
79858188 /* This is not a misfit task */
7986
- if (task_fits_capacity(p, capacity_of(env->src_cpu)))
8189
+ if (task_fits_cpu(p, env->src_cpu))
79878190 goto next;
79888191
79898192 env->imbalance = 0;
....@@ -8926,6 +9129,10 @@
89269129
89279130 memset(sgs, 0, sizeof(*sgs));
89289131
9132
+ /* Assume that task can't fit any CPU of the group */
9133
+ if (sd->flags & SD_ASYM_CPUCAPACITY)
9134
+ sgs->group_misfit_task_load = 1;
9135
+
89299136 for_each_cpu(i, sched_group_span(group)) {
89309137 struct rq *rq = cpu_rq(i);
89319138 unsigned int local;
....@@ -8945,12 +9152,12 @@
89459152 if (!nr_running && idle_cpu_without(i, p))
89469153 sgs->idle_cpus++;
89479154
8948
- }
9155
+ /* Check if task fits in the CPU */
9156
+ if (sd->flags & SD_ASYM_CPUCAPACITY &&
9157
+ sgs->group_misfit_task_load &&
9158
+ task_fits_cpu(p, i))
9159
+ sgs->group_misfit_task_load = 0;
89499160
8950
- /* Check if task fits in the group */
8951
- if (sd->flags & SD_ASYM_CPUCAPACITY &&
8952
- !task_fits_capacity(p, group->sgc->max_capacity)) {
8953
- sgs->group_misfit_task_load = 1;
89549161 }
89559162
89569163 sgs->group_capacity = group->sgc->capacity;
....@@ -9395,8 +9602,6 @@
93959602 local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) /
93969603 local->group_capacity;
93979604
9398
- sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
9399
- sds->total_capacity;
94009605 /*
94019606 * If the local group is more loaded than the selected
94029607 * busiest group don't try to pull any tasks.
....@@ -9405,6 +9610,19 @@
94059610 env->imbalance = 0;
94069611 return;
94079612 }
9613
+
9614
+ sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) /
9615
+ sds->total_capacity;
9616
+
9617
+ /*
9618
+ * If the local group is more loaded than the average system
9619
+ * load, don't try to pull any tasks.
9620
+ */
9621
+ if (local->avg_load >= sds->avg_load) {
9622
+ env->imbalance = 0;
9623
+ return;
9624
+ }
9625
+
94089626 }
94099627
94109628 /*
....@@ -9837,7 +10055,7 @@
983710055 .sd = sd,
983810056 .dst_cpu = this_cpu,
983910057 .dst_rq = this_rq,
9840
- .dst_grpmask = sched_group_span(sd->groups),
10058
+ .dst_grpmask = group_balance_mask(sd->groups),
984110059 .idle = idle,
984210060 .loop_break = sched_nr_migrate_break,
984310061 .cpus = cpus,
....@@ -11060,7 +11278,7 @@
1106011278 * 'current' within the tree based on its new key value.
1106111279 */
1106211280 swap(curr->vruntime, se->vruntime);
11063
- resched_curr_lazy(rq);
11281
+ resched_curr(rq);
1106411282 }
1106511283
1106611284 se->vruntime -= cfs_rq->min_vruntime;
....@@ -11087,7 +11305,7 @@
1108711305 */
1108811306 if (rq->curr == p) {
1108911307 if (p->prio > oldprio)
11090
- resched_curr_lazy(rq);
11308
+ resched_curr(rq);
1109111309 } else
1109211310 check_preempt_curr(rq, p, 0);
1109311311 }