hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/kernel/sched/rt.c
....@@ -272,7 +272,7 @@
272272 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
273273 {
274274 /* Try to pull RT tasks here if we lower this rq's prio */
275
- return rq->online && rq->rt.highest_prio.curr > prev->prio;
275
+ return rq->rt.highest_prio.curr > prev->prio;
276276 }
277277
278278 static inline int rt_overloaded(struct rq *rq)
....@@ -1696,8 +1696,7 @@
16961696 rt_queue_push_tasks(rq);
16971697 }
16981698
1699
-static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1700
- struct rt_rq *rt_rq)
1699
+static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
17011700 {
17021701 struct rt_prio_array *array = &rt_rq->active;
17031702 struct sched_rt_entity *next = NULL;
....@@ -1708,6 +1707,8 @@
17081707 BUG_ON(idx >= MAX_RT_PRIO);
17091708
17101709 queue = array->queue + idx;
1710
+ if (SCHED_WARN_ON(list_empty(queue)))
1711
+ return NULL;
17111712 next = list_entry(queue->next, struct sched_rt_entity, run_list);
17121713
17131714 return next;
....@@ -1719,8 +1720,9 @@
17191720 struct rt_rq *rt_rq = &rq->rt;
17201721
17211722 do {
1722
- rt_se = pick_next_rt_entity(rq, rt_rq);
1723
- BUG_ON(!rt_se);
1723
+ rt_se = pick_next_rt_entity(rt_rq);
1724
+ if (unlikely(!rt_se))
1725
+ return NULL;
17241726 rt_rq = group_rt_rq(rt_se);
17251727 } while (rt_rq);
17261728
....@@ -1761,7 +1763,7 @@
17611763 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
17621764 {
17631765 if (!task_running(rq, p) &&
1764
- cpumask_test_cpu(cpu, &p->cpus_mask))
1766
+ cpumask_test_cpu(cpu, p->cpus_ptr))
17651767 return 1;
17661768
17671769 return 0;
....@@ -1864,8 +1866,8 @@
18641866 return this_cpu;
18651867 }
18661868
1867
- best_cpu = cpumask_any_and_distribute(lowest_mask,
1868
- sched_domain_span(sd));
1869
+ best_cpu = cpumask_first_and(lowest_mask,
1870
+ sched_domain_span(sd));
18691871 if (best_cpu < nr_cpu_ids) {
18701872 rcu_read_unlock();
18711873 return best_cpu;
....@@ -1882,7 +1884,7 @@
18821884 if (this_cpu != -1)
18831885 return this_cpu;
18841886
1885
- cpu = cpumask_any_distribute(lowest_mask);
1887
+ cpu = cpumask_any(lowest_mask);
18861888 if (cpu < nr_cpu_ids)
18871889 return cpu;
18881890
....@@ -1923,7 +1925,7 @@
19231925 * Also make sure that it wasn't scheduled on its rq.
19241926 */
19251927 if (unlikely(task_rq(task) != rq ||
1926
- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1928
+ !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
19271929 task_running(rq, task) ||
19281930 !rt_task(task) ||
19291931 !task_on_rq_queued(task))) {
....@@ -1971,7 +1973,7 @@
19711973 * running task can migrate over to a CPU that is running a task
19721974 * of lesser priority.
19731975 */
1974
-static int push_rt_task(struct rq *rq, bool pull)
1976
+static int push_rt_task(struct rq *rq)
19751977 {
19761978 struct task_struct *next_task;
19771979 struct rq *lowest_rq;
....@@ -1985,39 +1987,6 @@
19851987 return 0;
19861988
19871989 retry:
1988
- if (is_migration_disabled(next_task)) {
1989
- struct task_struct *push_task = NULL;
1990
- int cpu;
1991
-
1992
- if (!pull)
1993
- return 0;
1994
-
1995
- trace_sched_migrate_pull_tp(next_task);
1996
-
1997
- if (rq->push_busy)
1998
- return 0;
1999
-
2000
- cpu = find_lowest_rq(rq->curr);
2001
- if (cpu == -1 || cpu == rq->cpu)
2002
- return 0;
2003
-
2004
- /*
2005
- * Given we found a CPU with lower priority than @next_task,
2006
- * therefore it should be running. However we cannot migrate it
2007
- * to this other CPU, instead attempt to push the current
2008
- * running task on this CPU away.
2009
- */
2010
- push_task = get_push_task(rq);
2011
- if (push_task) {
2012
- raw_spin_unlock(&rq->lock);
2013
- stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2014
- push_task, &rq->push_work);
2015
- raw_spin_lock(&rq->lock);
2016
- }
2017
-
2018
- return 0;
2019
- }
2020
-
20211990 if (WARN_ON(next_task == rq->curr))
20221991 return 0;
20231992
....@@ -2072,10 +2041,12 @@
20722041 deactivate_task(rq, next_task, 0);
20732042 set_task_cpu(next_task, lowest_rq->cpu);
20742043 activate_task(lowest_rq, next_task, 0);
2075
- resched_curr(lowest_rq);
20762044 ret = 1;
20772045
2046
+ resched_curr(lowest_rq);
2047
+
20782048 double_unlock_balance(rq, lowest_rq);
2049
+
20792050 out:
20802051 put_task_struct(next_task);
20812052
....@@ -2085,7 +2056,7 @@
20852056 static void push_rt_tasks(struct rq *rq)
20862057 {
20872058 /* push_rt_task will return true if it moved an RT */
2088
- while (push_rt_task(rq, false))
2059
+ while (push_rt_task(rq))
20892060 ;
20902061 }
20912062
....@@ -2238,8 +2209,7 @@
22382209 */
22392210 if (has_pushable_tasks(rq)) {
22402211 raw_spin_lock(&rq->lock);
2241
- while (push_rt_task(rq, true))
2242
- ;
2212
+ push_rt_tasks(rq);
22432213 raw_spin_unlock(&rq->lock);
22442214 }
22452215
....@@ -2264,7 +2234,7 @@
22642234 {
22652235 int this_cpu = this_rq->cpu, cpu;
22662236 bool resched = false;
2267
- struct task_struct *p, *push_task;
2237
+ struct task_struct *p;
22682238 struct rq *src_rq;
22692239 int rt_overload_count = rt_overloaded(this_rq);
22702240
....@@ -2311,7 +2281,6 @@
23112281 * double_lock_balance, and another CPU could
23122282 * alter this_rq
23132283 */
2314
- push_task = NULL;
23152284 double_lock_balance(this_rq, src_rq);
23162285
23172286 /*
....@@ -2339,15 +2308,11 @@
23392308 if (p->prio < src_rq->curr->prio)
23402309 goto skip;
23412310
2342
- if (is_migration_disabled(p)) {
2343
- trace_sched_migrate_pull_tp(p);
2344
- push_task = get_push_task(src_rq);
2345
- } else {
2346
- deactivate_task(src_rq, p, 0);
2347
- set_task_cpu(p, this_cpu);
2348
- activate_task(this_rq, p, 0);
2349
- resched = true;
2350
- }
2311
+ resched = true;
2312
+
2313
+ deactivate_task(src_rq, p, 0);
2314
+ set_task_cpu(p, this_cpu);
2315
+ activate_task(this_rq, p, 0);
23512316 /*
23522317 * We continue with the search, just in
23532318 * case there's an even higher prio task
....@@ -2357,13 +2322,6 @@
23572322 }
23582323 skip:
23592324 double_unlock_balance(this_rq, src_rq);
2360
-
2361
- if (push_task) {
2362
- raw_spin_unlock(&this_rq->lock);
2363
- stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2364
- push_task, &src_rq->push_work);
2365
- raw_spin_lock(&this_rq->lock);
2366
- }
23672325 }
23682326
23692327 if (resched)
....@@ -2612,7 +2570,6 @@
26122570 .rq_offline = rq_offline_rt,
26132571 .task_woken = task_woken_rt,
26142572 .switched_from = switched_from_rt,
2615
- .find_lock_rq = find_lock_lowest_rq,
26162573 #endif
26172574
26182575 .task_tick = task_tick_rt,