hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/sched/deadline.c
....@@ -565,7 +565,7 @@
565565
566566 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
567567 {
568
- return rq->online && dl_task(prev);
568
+ return dl_task(prev);
569569 }
570570
571571 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
....@@ -1922,7 +1922,7 @@
19221922 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
19231923 {
19241924 if (!task_running(rq, p) &&
1925
- cpumask_test_cpu(cpu, &p->cpus_mask))
1925
+ cpumask_test_cpu(cpu, p->cpus_ptr))
19261926 return 1;
19271927 return 0;
19281928 }
....@@ -2012,8 +2012,8 @@
20122012 return this_cpu;
20132013 }
20142014
2015
- best_cpu = cpumask_any_and_distribute(later_mask,
2016
- sched_domain_span(sd));
2015
+ best_cpu = cpumask_first_and(later_mask,
2016
+ sched_domain_span(sd));
20172017 /*
20182018 * Last chance: if a CPU being in both later_mask
20192019 * and current sd span is valid, that becomes our
....@@ -2035,7 +2035,7 @@
20352035 if (this_cpu != -1)
20362036 return this_cpu;
20372037
2038
- cpu = cpumask_any_distribute(later_mask);
2038
+ cpu = cpumask_any(later_mask);
20392039 if (cpu < nr_cpu_ids)
20402040 return cpu;
20412041
....@@ -2072,7 +2072,7 @@
20722072 /* Retry if something changed. */
20732073 if (double_lock_balance(rq, later_rq)) {
20742074 if (unlikely(task_rq(task) != rq ||
2075
- !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2075
+ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
20762076 task_running(rq, task) ||
20772077 !dl_task(task) ||
20782078 !task_on_rq_queued(task))) {
....@@ -2139,9 +2139,6 @@
21392139 return 0;
21402140
21412141 retry:
2142
- if (is_migration_disabled(next_task))
2143
- return 0;
2144
-
21452142 if (WARN_ON(next_task == rq->curr))
21462143 return 0;
21472144
....@@ -2219,7 +2216,7 @@
22192216 static void pull_dl_task(struct rq *this_rq)
22202217 {
22212218 int this_cpu = this_rq->cpu, cpu;
2222
- struct task_struct *p, *push_task;
2219
+ struct task_struct *p;
22232220 bool resched = false;
22242221 struct rq *src_rq;
22252222 u64 dmin = LONG_MAX;
....@@ -2249,7 +2246,6 @@
22492246 continue;
22502247
22512248 /* Might drop this_rq->lock */
2252
- push_task = NULL;
22532249 double_lock_balance(this_rq, src_rq);
22542250
22552251 /*
....@@ -2281,28 +2277,17 @@
22812277 src_rq->curr->dl.deadline))
22822278 goto skip;
22832279
2284
- if (is_migration_disabled(p)) {
2285
- trace_sched_migrate_pull_tp(p);
2286
- push_task = get_push_task(src_rq);
2287
- } else {
2288
- deactivate_task(src_rq, p, 0);
2289
- set_task_cpu(p, this_cpu);
2290
- activate_task(this_rq, p, 0);
2291
- dmin = p->dl.deadline;
2292
- resched = true;
2293
- }
2280
+ resched = true;
2281
+
2282
+ deactivate_task(src_rq, p, 0);
2283
+ set_task_cpu(p, this_cpu);
2284
+ activate_task(this_rq, p, 0);
2285
+ dmin = p->dl.deadline;
22942286
22952287 /* Is there any other task even earlier? */
22962288 }
22972289 skip:
22982290 double_unlock_balance(this_rq, src_rq);
2299
-
2300
- if (push_task) {
2301
- raw_spin_unlock(&this_rq->lock);
2302
- stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2303
- push_task, &src_rq->push_work);
2304
- raw_spin_lock(&this_rq->lock);
2305
- }
23062291 }
23072292
23082293 if (resched)
....@@ -2326,8 +2311,7 @@
23262311 }
23272312
23282313 static void set_cpus_allowed_dl(struct task_struct *p,
2329
- const struct cpumask *new_mask,
2330
- u32 flags)
2314
+ const struct cpumask *new_mask)
23312315 {
23322316 struct root_domain *src_rd;
23332317 struct rq *rq;
....@@ -2356,7 +2340,7 @@
23562340 raw_spin_unlock(&src_dl_b->lock);
23572341 }
23582342
2359
- set_cpus_allowed_common(p, new_mask, flags);
2343
+ set_cpus_allowed_common(p, new_mask);
23602344 }
23612345
23622346 /* Assumes rq->lock is held */
....@@ -2554,7 +2538,6 @@
25542538 .rq_online = rq_online_dl,
25552539 .rq_offline = rq_offline_dl,
25562540 .task_woken = task_woken_dl,
2557
- .find_lock_rq = find_lock_later_rq,
25582541 #endif
25592542
25602543 .task_tick = task_tick_dl,