hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/kernel/sched/fair.c
....@@ -1705,7 +1705,7 @@
17051705 * be incurred if the tasks were swapped.
17061706 */
17071707 /* Skip this swap candidate if cannot move to the source cpu */
1708
- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
1708
+ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
17091709 goto unlock;
17101710
17111711 /*
....@@ -1803,7 +1803,7 @@
18031803
18041804 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
18051805 /* Skip this CPU if the source task cannot migrate */
1806
- if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
1806
+ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
18071807 continue;
18081808
18091809 env->dst_cpu = cpu;
....@@ -4210,7 +4210,7 @@
42104210 ideal_runtime = sched_slice(cfs_rq, curr);
42114211 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
42124212 if (delta_exec > ideal_runtime) {
4213
- resched_curr(rq_of(cfs_rq));
4213
+ resched_curr_lazy(rq_of(cfs_rq));
42144214 /*
42154215 * The current task ran long enough, ensure it doesn't get
42164216 * re-elected due to buddy favours.
....@@ -4234,7 +4234,7 @@
42344234 return;
42354235
42364236 if (delta > ideal_runtime)
4237
- resched_curr(rq_of(cfs_rq));
4237
+ resched_curr_lazy(rq_of(cfs_rq));
42384238 }
42394239
42404240 static void
....@@ -4376,7 +4376,7 @@
43764376 * validating it and just reschedule.
43774377 */
43784378 if (queued) {
4379
- resched_curr(rq_of(cfs_rq));
4379
+ resched_curr_lazy(rq_of(cfs_rq));
43804380 return;
43814381 }
43824382 /*
....@@ -4510,7 +4510,7 @@
45104510 * hierarchy can be throttled
45114511 */
45124512 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
4513
- resched_curr(rq_of(cfs_rq));
4513
+ resched_curr_lazy(rq_of(cfs_rq));
45144514 }
45154515
45164516 static __always_inline
....@@ -4703,7 +4703,7 @@
47034703 struct rq *rq = rq_of(cfs_rq);
47044704 struct rq_flags rf;
47054705
4706
- rq_lock(rq, &rf);
4706
+ rq_lock_irqsave(rq, &rf);
47074707 if (!cfs_rq_throttled(cfs_rq))
47084708 goto next;
47094709
....@@ -4722,7 +4722,7 @@
47224722 unthrottle_cfs_rq(cfs_rq);
47234723
47244724 next:
4725
- rq_unlock(rq, &rf);
4725
+ rq_unlock_irqrestore(rq, &rf);
47264726
47274727 if (!remaining)
47284728 break;
....@@ -4738,7 +4738,7 @@
47384738 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
47394739 * used to track this state.
47404740 */
4741
-static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4741
+static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
47424742 {
47434743 u64 runtime;
47444744 int throttled;
....@@ -4778,10 +4778,10 @@
47784778 while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
47794779 runtime = cfs_b->runtime;
47804780 cfs_b->distribute_running = 1;
4781
- raw_spin_unlock(&cfs_b->lock);
4781
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
47824782 /* we can't nest cfs_b->lock while distributing bandwidth */
47834783 runtime = distribute_cfs_runtime(cfs_b, runtime);
4784
- raw_spin_lock(&cfs_b->lock);
4784
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
47854785
47864786 cfs_b->distribute_running = 0;
47874787 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
....@@ -4889,16 +4889,17 @@
48894889 static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
48904890 {
48914891 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4892
+ unsigned long flags;
48924893
48934894 /* confirm we're still not at a refresh boundary */
4894
- raw_spin_lock(&cfs_b->lock);
4895
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
48954896 if (cfs_b->distribute_running) {
4896
- raw_spin_unlock(&cfs_b->lock);
4897
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
48974898 return;
48984899 }
48994900
49004901 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4901
- raw_spin_unlock(&cfs_b->lock);
4902
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
49024903 return;
49034904 }
49044905
....@@ -4908,17 +4909,17 @@
49084909 if (runtime)
49094910 cfs_b->distribute_running = 1;
49104911
4911
- raw_spin_unlock(&cfs_b->lock);
4912
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
49124913
49134914 if (!runtime)
49144915 return;
49154916
49164917 runtime = distribute_cfs_runtime(cfs_b, runtime);
49174918
4918
- raw_spin_lock(&cfs_b->lock);
4919
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
49194920 cfs_b->runtime -= min(runtime, cfs_b->runtime);
49204921 cfs_b->distribute_running = 0;
4921
- raw_spin_unlock(&cfs_b->lock);
4922
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
49224923 }
49234924
49244925 /*
....@@ -4998,11 +4999,12 @@
49984999 {
49995000 struct cfs_bandwidth *cfs_b =
50005001 container_of(timer, struct cfs_bandwidth, period_timer);
5002
+ unsigned long flags;
50015003 int overrun;
50025004 int idle = 0;
50035005 int count = 0;
50045006
5005
- raw_spin_lock(&cfs_b->lock);
5007
+ raw_spin_lock_irqsave(&cfs_b->lock, flags);
50065008 for (;;) {
50075009 overrun = hrtimer_forward_now(timer, cfs_b->period);
50085010 if (!overrun)
....@@ -5038,11 +5040,11 @@
50385040 count = 0;
50395041 }
50405042
5041
- idle = do_sched_cfs_period_timer(cfs_b, overrun);
5043
+ idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
50425044 }
50435045 if (idle)
50445046 cfs_b->period_active = 0;
5045
- raw_spin_unlock(&cfs_b->lock);
5047
+ raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
50465048
50475049 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
50485050 }
....@@ -5216,7 +5218,7 @@
52165218
52175219 if (delta < 0) {
52185220 if (rq->curr == p)
5219
- resched_curr(rq);
5221
+ resched_curr_lazy(rq);
52205222 return;
52215223 }
52225224 hrtick_start(rq, delta);
....@@ -6045,7 +6047,7 @@
60456047
60466048 /* Skip over this group if it has no CPUs allowed */
60476049 if (!cpumask_intersects(sched_group_span(group),
6048
- &p->cpus_allowed))
6050
+ p->cpus_ptr))
60496051 continue;
60506052
60516053 #ifdef CONFIG_ROCKCHIP_SCHED_PERFORMANCE_BIAS
....@@ -6191,7 +6193,7 @@
61916193 return cpumask_first(sched_group_span(group));
61926194
61936195 /* Traverse only the allowed CPUs */
6194
- for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
6196
+ for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
61956197 if (available_idle_cpu(i)) {
61966198 struct rq *rq = cpu_rq(i);
61976199 struct cpuidle_state *idle = idle_get_state(rq);
....@@ -6231,7 +6233,7 @@
62316233 {
62326234 int new_cpu = cpu;
62336235
6234
- if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
6236
+ if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr))
62356237 return prev_cpu;
62366238
62376239 /*
....@@ -6348,7 +6350,7 @@
63486350 if (!test_idle_cores(target, false))
63496351 return -1;
63506352
6351
- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
6353
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
63526354
63536355 for_each_cpu_wrap(core, cpus, target) {
63546356 bool idle = true;
....@@ -6382,7 +6384,7 @@
63826384 return -1;
63836385
63846386 for_each_cpu(cpu, cpu_smt_mask(target)) {
6385
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
6387
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
63866388 continue;
63876389 if (available_idle_cpu(cpu))
63886390 return cpu;
....@@ -6443,7 +6445,7 @@
64436445
64446446 time = local_clock();
64456447
6446
- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
6448
+ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
64476449
64486450 for_each_cpu_wrap(cpu, cpus, target) {
64496451 if (!--nr)
....@@ -6483,7 +6485,7 @@
64836485 recent_used_cpu != target &&
64846486 cpus_share_cache(recent_used_cpu, target) &&
64856487 available_idle_cpu(recent_used_cpu) &&
6486
- cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
6488
+ cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr)) {
64876489 /*
64886490 * Replace recent_used_cpu with prev as it is a potential
64896491 * candidate for the next wake:
....@@ -6701,7 +6703,7 @@
67016703 /* Scan CPUs in all SDs */
67026704 sg = sd->groups;
67036705 do {
6704
- for_each_cpu_and(i, &p->cpus_allowed, sched_group_span(sg)) {
6706
+ for_each_cpu_and(i, p->cpus_ptr, sched_group_span(sg)) {
67056707 unsigned long capacity_curr = capacity_curr_of(i);
67066708 unsigned long capacity_orig = capacity_orig_of(i);
67076709 unsigned long wake_util, new_util;
....@@ -7119,7 +7121,7 @@
71197121 max_spare_cap = 0;
71207122
71217123 for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
7122
- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
7124
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
71237125 continue;
71247126
71257127 util = cpu_util_next(cpu, p, cpu);
....@@ -7237,7 +7239,7 @@
72377239
72387240 if (sysctl_sched_sync_hint_enable && sync) {
72397241 cpu = smp_processor_id();
7240
- if (cpumask_test_cpu(cpu, &p->cpus_allowed))
7242
+ if (cpumask_test_cpu(cpu, p->cpus_ptr))
72417243 return cpu;
72427244 }
72437245
....@@ -7282,7 +7284,7 @@
72827284 goto unlock;
72837285 }
72847286
7285
- if (cpumask_test_cpu(prev_cpu, &p->cpus_allowed))
7287
+ if (cpumask_test_cpu(prev_cpu, p->cpus_ptr))
72867288 prev_energy = best_energy = compute_energy(p, prev_cpu, pd);
72877289 else
72887290 prev_energy = best_energy = ULONG_MAX;
....@@ -7355,7 +7357,7 @@
73557357
73567358 want_affine = !wake_wide(p, sibling_count_hint) &&
73577359 !wake_cap(p, cpu, prev_cpu) &&
7358
- cpumask_test_cpu(cpu, &p->cpus_allowed);
7360
+ cpumask_test_cpu(cpu, p->cpus_ptr);
73597361 }
73607362
73617363 sd_loop:
....@@ -7617,7 +7619,7 @@
76177619 return;
76187620
76197621 preempt:
7620
- resched_curr(rq);
7622
+ resched_curr_lazy(rq);
76217623 /*
76227624 * Only set the backward buddy when the current task is still
76237625 * on the rq. This can happen when a wakeup gets interleaved
....@@ -8113,14 +8115,14 @@
81138115 /*
81148116 * We do not migrate tasks that are:
81158117 * 1) throttled_lb_pair, or
8116
- * 2) cannot be migrated to this CPU due to cpus_allowed, or
8118
+ * 2) cannot be migrated to this CPU due to cpus_ptr, or
81178119 * 3) running (obviously), or
81188120 * 4) are cache-hot on their current CPU.
81198121 */
81208122 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
81218123 return 0;
81228124
8123
- if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
8125
+ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
81248126 int cpu;
81258127
81268128 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
....@@ -8140,7 +8142,7 @@
81408142
81418143 /* Prevent to re-select dst_cpu via env's CPUs: */
81428144 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
8143
- if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
8145
+ if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
81448146 env->flags |= LBF_DST_PINNED;
81458147 env->new_dst_cpu = cpu;
81468148 break;
....@@ -8806,7 +8808,7 @@
88068808
88078809 /*
88088810 * Group imbalance indicates (and tries to solve) the problem where balancing
8809
- * groups is inadequate due to ->cpus_allowed constraints.
8811
+ * groups is inadequate due to ->cpus_ptr constraints.
88108812 *
88118813 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
88128814 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
....@@ -9507,7 +9509,7 @@
95079509 /*
95089510 * If the busiest group is imbalanced the below checks don't
95099511 * work because they assume all things are equal, which typically
9510
- * isn't true due to cpus_allowed constraints and the like.
9512
+ * isn't true due to cpus_ptr constraints and the like.
95119513 */
95129514 if (busiest->group_type == group_imbalanced)
95139515 goto force_balance;
....@@ -9947,7 +9949,7 @@
99479949 * if the curr task on busiest CPU can't be
99489950 * moved to this_cpu:
99499951 */
9950
- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
9952
+ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
99519953 raw_spin_unlock_irqrestore(&busiest->lock,
99529954 flags);
99539955 env.flags |= LBF_ALL_PINNED;
....@@ -10943,7 +10945,7 @@
1094310945 * 'current' within the tree based on its new key value.
1094410946 */
1094510947 swap(curr->vruntime, se->vruntime);
10946
- resched_curr(rq);
10948
+ resched_curr_lazy(rq);
1094710949 }
1094810950
1094910951 se->vruntime -= cfs_rq->min_vruntime;
....@@ -10967,7 +10969,7 @@
1096710969 */
1096810970 if (rq->curr == p) {
1096910971 if (p->prio > oldprio)
10970
- resched_curr(rq);
10972
+ resched_curr_lazy(rq);
1097110973 } else
1097210974 check_preempt_curr(rq, p, 0);
1097310975 }