hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/sched/deadline.c
....@@ -17,6 +17,7 @@
1717 */
1818 #include "sched.h"
1919 #include "pelt.h"
20
+#include <linux/cpuset.h>
2021
2122 struct dl_bandwidth def_dl_bandwidth;
2223
....@@ -565,7 +566,7 @@
565566
566567 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
567568 {
568
- return rq->online && dl_task(prev);
569
+ return dl_task(prev);
569570 }
570571
571572 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
....@@ -1847,8 +1848,7 @@
18471848 deadline_queue_push_tasks(rq);
18481849 }
18491850
1850
-static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1851
- struct dl_rq *dl_rq)
1851
+static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
18521852 {
18531853 struct rb_node *left = rb_first_cached(&dl_rq->root);
18541854
....@@ -1867,7 +1867,7 @@
18671867 if (!sched_dl_runnable(rq))
18681868 return NULL;
18691869
1870
- dl_se = pick_next_dl_entity(rq, dl_rq);
1870
+ dl_se = pick_next_dl_entity(dl_rq);
18711871 BUG_ON(!dl_se);
18721872 p = dl_task_of(dl_se);
18731873 set_next_task_dl(rq, p, true);
....@@ -1922,7 +1922,7 @@
19221922 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
19231923 {
19241924 if (!task_running(rq, p) &&
1925
- cpumask_test_cpu(cpu, &p->cpus_mask))
1925
+ cpumask_test_cpu(cpu, p->cpus_ptr))
19261926 return 1;
19271927 return 0;
19281928 }
....@@ -2012,8 +2012,8 @@
20122012 return this_cpu;
20132013 }
20142014
2015
- best_cpu = cpumask_any_and_distribute(later_mask,
2016
- sched_domain_span(sd));
2015
+ best_cpu = cpumask_first_and(later_mask,
2016
+ sched_domain_span(sd));
20172017 /*
20182018 * Last chance: if a CPU being in both later_mask
20192019 * and current sd span is valid, that becomes our
....@@ -2035,7 +2035,7 @@
20352035 if (this_cpu != -1)
20362036 return this_cpu;
20372037
2038
- cpu = cpumask_any_distribute(later_mask);
2038
+ cpu = cpumask_any(later_mask);
20392039 if (cpu < nr_cpu_ids)
20402040 return cpu;
20412041
....@@ -2072,7 +2072,7 @@
20722072 /* Retry if something changed. */
20732073 if (double_lock_balance(rq, later_rq)) {
20742074 if (unlikely(task_rq(task) != rq ||
2075
- !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2075
+ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
20762076 task_running(rq, task) ||
20772077 !dl_task(task) ||
20782078 !task_on_rq_queued(task))) {
....@@ -2139,9 +2139,6 @@
21392139 return 0;
21402140
21412141 retry:
2142
- if (is_migration_disabled(next_task))
2143
- return 0;
2144
-
21452142 if (WARN_ON(next_task == rq->curr))
21462143 return 0;
21472144
....@@ -2219,7 +2216,7 @@
22192216 static void pull_dl_task(struct rq *this_rq)
22202217 {
22212218 int this_cpu = this_rq->cpu, cpu;
2222
- struct task_struct *p, *push_task;
2219
+ struct task_struct *p;
22232220 bool resched = false;
22242221 struct rq *src_rq;
22252222 u64 dmin = LONG_MAX;
....@@ -2249,7 +2246,6 @@
22492246 continue;
22502247
22512248 /* Might drop this_rq->lock */
2252
- push_task = NULL;
22532249 double_lock_balance(this_rq, src_rq);
22542250
22552251 /*
....@@ -2281,28 +2277,17 @@
22812277 src_rq->curr->dl.deadline))
22822278 goto skip;
22832279
2284
- if (is_migration_disabled(p)) {
2285
- trace_sched_migrate_pull_tp(p);
2286
- push_task = get_push_task(src_rq);
2287
- } else {
2288
- deactivate_task(src_rq, p, 0);
2289
- set_task_cpu(p, this_cpu);
2290
- activate_task(this_rq, p, 0);
2291
- dmin = p->dl.deadline;
2292
- resched = true;
2293
- }
2280
+ resched = true;
2281
+
2282
+ deactivate_task(src_rq, p, 0);
2283
+ set_task_cpu(p, this_cpu);
2284
+ activate_task(this_rq, p, 0);
2285
+ dmin = p->dl.deadline;
22942286
22952287 /* Is there any other task even earlier? */
22962288 }
22972289 skip:
22982290 double_unlock_balance(this_rq, src_rq);
2299
-
2300
- if (push_task) {
2301
- raw_spin_unlock(&this_rq->lock);
2302
- stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2303
- push_task, &src_rq->push_work);
2304
- raw_spin_lock(&this_rq->lock);
2305
- }
23062291 }
23072292
23082293 if (resched)
....@@ -2326,8 +2311,7 @@
23262311 }
23272312
23282313 static void set_cpus_allowed_dl(struct task_struct *p,
2329
- const struct cpumask *new_mask,
2330
- u32 flags)
2314
+ const struct cpumask *new_mask)
23312315 {
23322316 struct root_domain *src_rd;
23332317 struct rq *rq;
....@@ -2356,7 +2340,7 @@
23562340 raw_spin_unlock(&src_dl_b->lock);
23572341 }
23582342
2359
- set_cpus_allowed_common(p, new_mask, flags);
2343
+ set_cpus_allowed_common(p, new_mask);
23602344 }
23612345
23622346 /* Assumes rq->lock is held */
....@@ -2437,6 +2421,12 @@
24372421 if (task_on_rq_queued(p) && p->dl.dl_runtime)
24382422 task_non_contending(p);
24392423
2424
+ /*
2425
+ * In case a task is setscheduled out from SCHED_DEADLINE we need to
2426
+ * keep track of that on its cpuset (for correct bandwidth tracking).
2427
+ */
2428
+ dec_dl_tasks_cs(p);
2429
+
24402430 if (!task_on_rq_queued(p)) {
24412431 /*
24422432 * Inactive timer is armed. However, p is leaving DEADLINE and
....@@ -2476,6 +2466,12 @@
24762466 {
24772467 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
24782468 put_task_struct(p);
2469
+
2470
+ /*
2471
+ * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2472
+ * track of that on its cpuset (for correct bandwidth tracking).
2473
+ */
2474
+ inc_dl_tasks_cs(p);
24792475
24802476 /* If p is not queued we will update its parameters at next wakeup. */
24812477 if (!task_on_rq_queued(p)) {
....@@ -2554,7 +2550,6 @@
25542550 .rq_online = rq_online_dl,
25552551 .rq_offline = rq_offline_dl,
25562552 .task_woken = task_woken_dl,
2557
- .find_lock_rq = find_lock_later_rq,
25582553 #endif
25592554
25602555 .task_tick = task_tick_dl,
....@@ -2866,26 +2861,38 @@
28662861 return ret;
28672862 }
28682863
2869
-int dl_cpu_busy(int cpu, struct task_struct *p)
2864
+enum dl_bw_request {
2865
+ dl_bw_req_check_overflow = 0,
2866
+ dl_bw_req_alloc,
2867
+ dl_bw_req_free
2868
+};
2869
+
2870
+static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
28702871 {
2871
- unsigned long flags, cap;
2872
+ unsigned long flags;
28722873 struct dl_bw *dl_b;
2873
- bool overflow;
2874
+ bool overflow = 0;
28742875
28752876 rcu_read_lock_sched();
28762877 dl_b = dl_bw_of(cpu);
28772878 raw_spin_lock_irqsave(&dl_b->lock, flags);
2878
- cap = dl_bw_capacity(cpu);
2879
- overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
28802879
2881
- if (!overflow && p) {
2882
- /*
2883
- * We reserve space for this task in the destination
2884
- * root_domain, as we can't fail after this point.
2885
- * We will free resources in the source root_domain
2886
- * later on (see set_cpus_allowed_dl()).
2887
- */
2888
- __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
2880
+ if (req == dl_bw_req_free) {
2881
+ __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
2882
+ } else {
2883
+ unsigned long cap = dl_bw_capacity(cpu);
2884
+
2885
+ overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
2886
+
2887
+ if (req == dl_bw_req_alloc && !overflow) {
2888
+ /*
2889
+ * We reserve space in the destination
2890
+ * root_domain, as we can't fail after this point.
2891
+ * We will free resources in the source root_domain
2892
+ * later on (see set_cpus_allowed_dl()).
2893
+ */
2894
+ __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
2895
+ }
28892896 }
28902897
28912898 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
....@@ -2893,6 +2900,21 @@
28932900
28942901 return overflow ? -EBUSY : 0;
28952902 }
2903
+
2904
+int dl_bw_check_overflow(int cpu)
2905
+{
2906
+ return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
2907
+}
2908
+
2909
+int dl_bw_alloc(int cpu, u64 dl_bw)
2910
+{
2911
+ return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
2912
+}
2913
+
2914
+void dl_bw_free(int cpu, u64 dl_bw)
2915
+{
2916
+ dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
2917
+}
28962918 #endif
28972919
28982920 #ifdef CONFIG_SCHED_DEBUG