hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/kernel/sched/deadline.c
....@@ -17,6 +17,7 @@
1717 */
1818 #include "sched.h"
1919 #include "pelt.h"
20
+#include <linux/cpuset.h>
2021
2122 struct dl_bandwidth def_dl_bandwidth;
2223
....@@ -1847,8 +1848,7 @@
18471848 deadline_queue_push_tasks(rq);
18481849 }
18491850
1850
-static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1851
- struct dl_rq *dl_rq)
1851
+static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
18521852 {
18531853 struct rb_node *left = rb_first_cached(&dl_rq->root);
18541854
....@@ -1867,7 +1867,7 @@
18671867 if (!sched_dl_runnable(rq))
18681868 return NULL;
18691869
1870
- dl_se = pick_next_dl_entity(rq, dl_rq);
1870
+ dl_se = pick_next_dl_entity(dl_rq);
18711871 BUG_ON(!dl_se);
18721872 p = dl_task_of(dl_se);
18731873 set_next_task_dl(rq, p, true);
....@@ -2421,6 +2421,12 @@
24212421 if (task_on_rq_queued(p) && p->dl.dl_runtime)
24222422 task_non_contending(p);
24232423
2424
+ /*
2425
+ * In case a task is setscheduled out from SCHED_DEADLINE we need to
2426
+ * keep track of that on its cpuset (for correct bandwidth tracking).
2427
+ */
2428
+ dec_dl_tasks_cs(p);
2429
+
24242430 if (!task_on_rq_queued(p)) {
24252431 /*
24262432 * Inactive timer is armed. However, p is leaving DEADLINE and
....@@ -2460,6 +2466,12 @@
24602466 {
24612467 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
24622468 put_task_struct(p);
2469
+
2470
+ /*
2471
+ * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2472
+ * track of that on its cpuset (for correct bandwidth tracking).
2473
+ */
2474
+ inc_dl_tasks_cs(p);
24632475
24642476 /* If p is not queued we will update its parameters at next wakeup. */
24652477 if (!task_on_rq_queued(p)) {
....@@ -2849,26 +2861,38 @@
28492861 return ret;
28502862 }
28512863
2852
-int dl_cpu_busy(int cpu, struct task_struct *p)
2864
+enum dl_bw_request {
2865
+ dl_bw_req_check_overflow = 0,
2866
+ dl_bw_req_alloc,
2867
+ dl_bw_req_free
2868
+};
2869
+
2870
+static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
28532871 {
2854
- unsigned long flags, cap;
2872
+ unsigned long flags;
28552873 struct dl_bw *dl_b;
2856
- bool overflow;
2874
+ bool overflow = 0;
28572875
28582876 rcu_read_lock_sched();
28592877 dl_b = dl_bw_of(cpu);
28602878 raw_spin_lock_irqsave(&dl_b->lock, flags);
2861
- cap = dl_bw_capacity(cpu);
2862
- overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0);
28632879
2864
- if (!overflow && p) {
2865
- /*
2866
- * We reserve space for this task in the destination
2867
- * root_domain, as we can't fail after this point.
2868
- * We will free resources in the source root_domain
2869
- * later on (see set_cpus_allowed_dl()).
2870
- */
2871
- __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu));
2880
+ if (req == dl_bw_req_free) {
2881
+ __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
2882
+ } else {
2883
+ unsigned long cap = dl_bw_capacity(cpu);
2884
+
2885
+ overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
2886
+
2887
+ if (req == dl_bw_req_alloc && !overflow) {
2888
+ /*
2889
+ * We reserve space in the destination
2890
+ * root_domain, as we can't fail after this point.
2891
+ * We will free resources in the source root_domain
2892
+ * later on (see set_cpus_allowed_dl()).
2893
+ */
2894
+ __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
2895
+ }
28722896 }
28732897
28742898 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
....@@ -2876,6 +2900,21 @@
28762900
28772901 return overflow ? -EBUSY : 0;
28782902 }
2903
+
2904
+int dl_bw_check_overflow(int cpu)
2905
+{
2906
+ return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
2907
+}
2908
+
2909
+int dl_bw_alloc(int cpu, u64 dl_bw)
2910
+{
2911
+ return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
2912
+}
2913
+
2914
+void dl_bw_free(int cpu, u64 dl_bw)
2915
+{
2916
+ dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
2917
+}
28792918 #endif
28802919
28812920 #ifdef CONFIG_SCHED_DEBUG