| .. | .. |
|---|
| 272 | 272 | static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) |
|---|
| 273 | 273 | { |
|---|
| 274 | 274 | /* Try to pull RT tasks here if we lower this rq's prio */ |
|---|
| 275 | | - return rq->online && rq->rt.highest_prio.curr > prev->prio; |
|---|
| 275 | + return rq->rt.highest_prio.curr > prev->prio; |
|---|
| 276 | 276 | } |
|---|
| 277 | 277 | |
|---|
| 278 | 278 | static inline int rt_overloaded(struct rq *rq) |
|---|
| .. | .. |
|---|
| 1761 | 1761 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
|---|
| 1762 | 1762 | { |
|---|
| 1763 | 1763 | if (!task_running(rq, p) && |
|---|
| 1764 | | - cpumask_test_cpu(cpu, &p->cpus_mask)) |
|---|
| 1764 | + cpumask_test_cpu(cpu, p->cpus_ptr)) |
|---|
| 1765 | 1765 | return 1; |
|---|
| 1766 | 1766 | |
|---|
| 1767 | 1767 | return 0; |
|---|
| .. | .. |
|---|
| 1864 | 1864 | return this_cpu; |
|---|
| 1865 | 1865 | } |
|---|
| 1866 | 1866 | |
|---|
| 1867 | | - best_cpu = cpumask_any_and_distribute(lowest_mask, |
|---|
| 1868 | | - sched_domain_span(sd)); |
|---|
| 1867 | + best_cpu = cpumask_first_and(lowest_mask, |
|---|
| 1868 | + sched_domain_span(sd)); |
|---|
| 1869 | 1869 | if (best_cpu < nr_cpu_ids) { |
|---|
| 1870 | 1870 | rcu_read_unlock(); |
|---|
| 1871 | 1871 | return best_cpu; |
|---|
| .. | .. |
|---|
| 1882 | 1882 | if (this_cpu != -1) |
|---|
| 1883 | 1883 | return this_cpu; |
|---|
| 1884 | 1884 | |
|---|
| 1885 | | - cpu = cpumask_any_distribute(lowest_mask); |
|---|
| 1885 | + cpu = cpumask_any(lowest_mask); |
|---|
| 1886 | 1886 | if (cpu < nr_cpu_ids) |
|---|
| 1887 | 1887 | return cpu; |
|---|
| 1888 | 1888 | |
|---|
| .. | .. |
|---|
| 1923 | 1923 | * Also make sure that it wasn't scheduled on its rq. |
|---|
| 1924 | 1924 | */ |
|---|
| 1925 | 1925 | if (unlikely(task_rq(task) != rq || |
|---|
| 1926 | | - !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || |
|---|
| 1926 | + !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || |
|---|
| 1927 | 1927 | task_running(rq, task) || |
|---|
| 1928 | 1928 | !rt_task(task) || |
|---|
| 1929 | 1929 | !task_on_rq_queued(task))) { |
|---|
| .. | .. |
|---|
| 1971 | 1971 | * running task can migrate over to a CPU that is running a task |
|---|
| 1972 | 1972 | * of lesser priority. |
|---|
| 1973 | 1973 | */ |
|---|
| 1974 | | -static int push_rt_task(struct rq *rq, bool pull) |
|---|
| 1974 | +static int push_rt_task(struct rq *rq) |
|---|
| 1975 | 1975 | { |
|---|
| 1976 | 1976 | struct task_struct *next_task; |
|---|
| 1977 | 1977 | struct rq *lowest_rq; |
|---|
| .. | .. |
|---|
| 1985 | 1985 | return 0; |
|---|
| 1986 | 1986 | |
|---|
| 1987 | 1987 | retry: |
|---|
| 1988 | | - if (is_migration_disabled(next_task)) { |
|---|
| 1989 | | - struct task_struct *push_task = NULL; |
|---|
| 1990 | | - int cpu; |
|---|
| 1991 | | - |
|---|
| 1992 | | - if (!pull) |
|---|
| 1993 | | - return 0; |
|---|
| 1994 | | - |
|---|
| 1995 | | - trace_sched_migrate_pull_tp(next_task); |
|---|
| 1996 | | - |
|---|
| 1997 | | - if (rq->push_busy) |
|---|
| 1998 | | - return 0; |
|---|
| 1999 | | - |
|---|
| 2000 | | - cpu = find_lowest_rq(rq->curr); |
|---|
| 2001 | | - if (cpu == -1 || cpu == rq->cpu) |
|---|
| 2002 | | - return 0; |
|---|
| 2003 | | - |
|---|
| 2004 | | - /* |
|---|
| 2005 | | - * Given we found a CPU with lower priority than @next_task, |
|---|
| 2006 | | - * therefore it should be running. However we cannot migrate it |
|---|
| 2007 | | - * to this other CPU, instead attempt to push the current |
|---|
| 2008 | | - * running task on this CPU away. |
|---|
| 2009 | | - */ |
|---|
| 2010 | | - push_task = get_push_task(rq); |
|---|
| 2011 | | - if (push_task) { |
|---|
| 2012 | | - raw_spin_unlock(&rq->lock); |
|---|
| 2013 | | - stop_one_cpu_nowait(rq->cpu, push_cpu_stop, |
|---|
| 2014 | | - push_task, &rq->push_work); |
|---|
| 2015 | | - raw_spin_lock(&rq->lock); |
|---|
| 2016 | | - } |
|---|
| 2017 | | - |
|---|
| 2018 | | - return 0; |
|---|
| 2019 | | - } |
|---|
| 2020 | | - |
|---|
| 2021 | 1988 | if (WARN_ON(next_task == rq->curr)) |
|---|
| 2022 | 1989 | return 0; |
|---|
| 2023 | 1990 | |
|---|
| .. | .. |
|---|
| 2072 | 2039 | deactivate_task(rq, next_task, 0); |
|---|
| 2073 | 2040 | set_task_cpu(next_task, lowest_rq->cpu); |
|---|
| 2074 | 2041 | activate_task(lowest_rq, next_task, 0); |
|---|
| 2075 | | - resched_curr(lowest_rq); |
|---|
| 2076 | 2042 | ret = 1; |
|---|
| 2077 | 2043 | |
|---|
| 2044 | + resched_curr(lowest_rq); |
|---|
| 2045 | + |
|---|
| 2078 | 2046 | double_unlock_balance(rq, lowest_rq); |
|---|
| 2047 | + |
|---|
| 2079 | 2048 | out: |
|---|
| 2080 | 2049 | put_task_struct(next_task); |
|---|
| 2081 | 2050 | |
|---|
| .. | .. |
|---|
| 2085 | 2054 | static void push_rt_tasks(struct rq *rq) |
|---|
| 2086 | 2055 | { |
|---|
| 2087 | 2056 | /* push_rt_task will return true if it moved an RT */ |
|---|
| 2088 | | - while (push_rt_task(rq, false)) |
|---|
| 2057 | + while (push_rt_task(rq)) |
|---|
| 2089 | 2058 | ; |
|---|
| 2090 | 2059 | } |
|---|
| 2091 | 2060 | |
|---|
| .. | .. |
|---|
| 2238 | 2207 | */ |
|---|
| 2239 | 2208 | if (has_pushable_tasks(rq)) { |
|---|
| 2240 | 2209 | raw_spin_lock(&rq->lock); |
|---|
| 2241 | | - while (push_rt_task(rq, true)) |
|---|
| 2242 | | - ; |
|---|
| 2210 | + push_rt_tasks(rq); |
|---|
| 2243 | 2211 | raw_spin_unlock(&rq->lock); |
|---|
| 2244 | 2212 | } |
|---|
| 2245 | 2213 | |
|---|
| .. | .. |
|---|
| 2264 | 2232 | { |
|---|
| 2265 | 2233 | int this_cpu = this_rq->cpu, cpu; |
|---|
| 2266 | 2234 | bool resched = false; |
|---|
| 2267 | | - struct task_struct *p, *push_task; |
|---|
| 2235 | + struct task_struct *p; |
|---|
| 2268 | 2236 | struct rq *src_rq; |
|---|
| 2269 | 2237 | int rt_overload_count = rt_overloaded(this_rq); |
|---|
| 2270 | 2238 | |
|---|
| .. | .. |
|---|
| 2311 | 2279 | * double_lock_balance, and another CPU could |
|---|
| 2312 | 2280 | * alter this_rq |
|---|
| 2313 | 2281 | */ |
|---|
| 2314 | | - push_task = NULL; |
|---|
| 2315 | 2282 | double_lock_balance(this_rq, src_rq); |
|---|
| 2316 | 2283 | |
|---|
| 2317 | 2284 | /* |
|---|
| .. | .. |
|---|
| 2339 | 2306 | if (p->prio < src_rq->curr->prio) |
|---|
| 2340 | 2307 | goto skip; |
|---|
| 2341 | 2308 | |
|---|
| 2342 | | - if (is_migration_disabled(p)) { |
|---|
| 2343 | | - trace_sched_migrate_pull_tp(p); |
|---|
| 2344 | | - push_task = get_push_task(src_rq); |
|---|
| 2345 | | - } else { |
|---|
| 2346 | | - deactivate_task(src_rq, p, 0); |
|---|
| 2347 | | - set_task_cpu(p, this_cpu); |
|---|
| 2348 | | - activate_task(this_rq, p, 0); |
|---|
| 2349 | | - resched = true; |
|---|
| 2350 | | - } |
|---|
| 2309 | + resched = true; |
|---|
| 2310 | + |
|---|
| 2311 | + deactivate_task(src_rq, p, 0); |
|---|
| 2312 | + set_task_cpu(p, this_cpu); |
|---|
| 2313 | + activate_task(this_rq, p, 0); |
|---|
| 2351 | 2314 | /* |
|---|
| 2352 | 2315 | * We continue with the search, just in |
|---|
| 2353 | 2316 | * case there's an even higher prio task |
|---|
| .. | .. |
|---|
| 2357 | 2320 | } |
|---|
| 2358 | 2321 | skip: |
|---|
| 2359 | 2322 | double_unlock_balance(this_rq, src_rq); |
|---|
| 2360 | | - |
|---|
| 2361 | | - if (push_task) { |
|---|
| 2362 | | - raw_spin_unlock(&this_rq->lock); |
|---|
| 2363 | | - stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, |
|---|
| 2364 | | - push_task, &src_rq->push_work); |
|---|
| 2365 | | - raw_spin_lock(&this_rq->lock); |
|---|
| 2366 | | - } |
|---|
| 2367 | 2323 | } |
|---|
| 2368 | 2324 | |
|---|
| 2369 | 2325 | if (resched) |
|---|
| .. | .. |
|---|
| 2612 | 2568 | .rq_offline = rq_offline_rt, |
|---|
| 2613 | 2569 | .task_woken = task_woken_rt, |
|---|
| 2614 | 2570 | .switched_from = switched_from_rt, |
|---|
| 2615 | | - .find_lock_rq = find_lock_lowest_rq, |
|---|
| 2616 | 2571 | #endif |
|---|
| 2617 | 2572 | |
|---|
| 2618 | 2573 | .task_tick = task_tick_rt, |
|---|