.. | .. |
---|
272 | 272 | static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) |
---|
273 | 273 | { |
---|
274 | 274 | /* Try to pull RT tasks here if we lower this rq's prio */ |
---|
275 | | - return rq->online && rq->rt.highest_prio.curr > prev->prio; |
---|
| 275 | + return rq->rt.highest_prio.curr > prev->prio; |
---|
276 | 276 | } |
---|
277 | 277 | |
---|
278 | 278 | static inline int rt_overloaded(struct rq *rq) |
---|
.. | .. |
---|
1696 | 1696 | rt_queue_push_tasks(rq); |
---|
1697 | 1697 | } |
---|
1698 | 1698 | |
---|
1699 | | -static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, |
---|
1700 | | - struct rt_rq *rt_rq) |
---|
| 1699 | +static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq) |
---|
1701 | 1700 | { |
---|
1702 | 1701 | struct rt_prio_array *array = &rt_rq->active; |
---|
1703 | 1702 | struct sched_rt_entity *next = NULL; |
---|
.. | .. |
---|
1708 | 1707 | BUG_ON(idx >= MAX_RT_PRIO); |
---|
1709 | 1708 | |
---|
1710 | 1709 | queue = array->queue + idx; |
---|
| 1710 | + if (SCHED_WARN_ON(list_empty(queue))) |
---|
| 1711 | + return NULL; |
---|
1711 | 1712 | next = list_entry(queue->next, struct sched_rt_entity, run_list); |
---|
1712 | 1713 | |
---|
1713 | 1714 | return next; |
---|
.. | .. |
---|
1719 | 1720 | struct rt_rq *rt_rq = &rq->rt; |
---|
1720 | 1721 | |
---|
1721 | 1722 | do { |
---|
1722 | | - rt_se = pick_next_rt_entity(rq, rt_rq); |
---|
1723 | | - BUG_ON(!rt_se); |
---|
| 1723 | + rt_se = pick_next_rt_entity(rt_rq); |
---|
| 1724 | + if (unlikely(!rt_se)) |
---|
| 1725 | + return NULL; |
---|
1724 | 1726 | rt_rq = group_rt_rq(rt_se); |
---|
1725 | 1727 | } while (rt_rq); |
---|
1726 | 1728 | |
---|
.. | .. |
---|
1761 | 1763 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
---|
1762 | 1764 | { |
---|
1763 | 1765 | if (!task_running(rq, p) && |
---|
1764 | | - cpumask_test_cpu(cpu, &p->cpus_mask)) |
---|
| 1766 | + cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
1765 | 1767 | return 1; |
---|
1766 | 1768 | |
---|
1767 | 1769 | return 0; |
---|
.. | .. |
---|
1864 | 1866 | return this_cpu; |
---|
1865 | 1867 | } |
---|
1866 | 1868 | |
---|
1867 | | - best_cpu = cpumask_any_and_distribute(lowest_mask, |
---|
1868 | | - sched_domain_span(sd)); |
---|
| 1869 | + best_cpu = cpumask_first_and(lowest_mask, |
---|
| 1870 | + sched_domain_span(sd)); |
---|
1869 | 1871 | if (best_cpu < nr_cpu_ids) { |
---|
1870 | 1872 | rcu_read_unlock(); |
---|
1871 | 1873 | return best_cpu; |
---|
.. | .. |
---|
1882 | 1884 | if (this_cpu != -1) |
---|
1883 | 1885 | return this_cpu; |
---|
1884 | 1886 | |
---|
1885 | | - cpu = cpumask_any_distribute(lowest_mask); |
---|
| 1887 | + cpu = cpumask_any(lowest_mask); |
---|
1886 | 1888 | if (cpu < nr_cpu_ids) |
---|
1887 | 1889 | return cpu; |
---|
1888 | 1890 | |
---|
.. | .. |
---|
1923 | 1925 | * Also make sure that it wasn't scheduled on its rq. |
---|
1924 | 1926 | */ |
---|
1925 | 1927 | if (unlikely(task_rq(task) != rq || |
---|
1926 | | - !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) || |
---|
| 1928 | + !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || |
---|
1927 | 1929 | task_running(rq, task) || |
---|
1928 | 1930 | !rt_task(task) || |
---|
1929 | 1931 | !task_on_rq_queued(task))) { |
---|
.. | .. |
---|
1971 | 1973 | * running task can migrate over to a CPU that is running a task |
---|
1972 | 1974 | * of lesser priority. |
---|
1973 | 1975 | */ |
---|
1974 | | -static int push_rt_task(struct rq *rq, bool pull) |
---|
| 1976 | +static int push_rt_task(struct rq *rq) |
---|
1975 | 1977 | { |
---|
1976 | 1978 | struct task_struct *next_task; |
---|
1977 | 1979 | struct rq *lowest_rq; |
---|
.. | .. |
---|
1985 | 1987 | return 0; |
---|
1986 | 1988 | |
---|
1987 | 1989 | retry: |
---|
1988 | | - if (is_migration_disabled(next_task)) { |
---|
1989 | | - struct task_struct *push_task = NULL; |
---|
1990 | | - int cpu; |
---|
1991 | | - |
---|
1992 | | - if (!pull) |
---|
1993 | | - return 0; |
---|
1994 | | - |
---|
1995 | | - trace_sched_migrate_pull_tp(next_task); |
---|
1996 | | - |
---|
1997 | | - if (rq->push_busy) |
---|
1998 | | - return 0; |
---|
1999 | | - |
---|
2000 | | - cpu = find_lowest_rq(rq->curr); |
---|
2001 | | - if (cpu == -1 || cpu == rq->cpu) |
---|
2002 | | - return 0; |
---|
2003 | | - |
---|
2004 | | - /* |
---|
2005 | | - * Given we found a CPU with lower priority than @next_task, |
---|
2006 | | - * therefore it should be running. However we cannot migrate it |
---|
2007 | | - * to this other CPU, instead attempt to push the current |
---|
2008 | | - * running task on this CPU away. |
---|
2009 | | - */ |
---|
2010 | | - push_task = get_push_task(rq); |
---|
2011 | | - if (push_task) { |
---|
2012 | | - raw_spin_unlock(&rq->lock); |
---|
2013 | | - stop_one_cpu_nowait(rq->cpu, push_cpu_stop, |
---|
2014 | | - push_task, &rq->push_work); |
---|
2015 | | - raw_spin_lock(&rq->lock); |
---|
2016 | | - } |
---|
2017 | | - |
---|
2018 | | - return 0; |
---|
2019 | | - } |
---|
2020 | | - |
---|
2021 | 1990 | if (WARN_ON(next_task == rq->curr)) |
---|
2022 | 1991 | return 0; |
---|
2023 | 1992 | |
---|
.. | .. |
---|
2072 | 2041 | deactivate_task(rq, next_task, 0); |
---|
2073 | 2042 | set_task_cpu(next_task, lowest_rq->cpu); |
---|
2074 | 2043 | activate_task(lowest_rq, next_task, 0); |
---|
2075 | | - resched_curr(lowest_rq); |
---|
2076 | 2044 | ret = 1; |
---|
2077 | 2045 | |
---|
| 2046 | + resched_curr(lowest_rq); |
---|
| 2047 | + |
---|
2078 | 2048 | double_unlock_balance(rq, lowest_rq); |
---|
| 2049 | + |
---|
2079 | 2050 | out: |
---|
2080 | 2051 | put_task_struct(next_task); |
---|
2081 | 2052 | |
---|
.. | .. |
---|
2085 | 2056 | static void push_rt_tasks(struct rq *rq) |
---|
2086 | 2057 | { |
---|
2087 | 2058 | /* push_rt_task will return true if it moved an RT */ |
---|
2088 | | - while (push_rt_task(rq, false)) |
---|
| 2059 | + while (push_rt_task(rq)) |
---|
2089 | 2060 | ; |
---|
2090 | 2061 | } |
---|
2091 | 2062 | |
---|
.. | .. |
---|
2238 | 2209 | */ |
---|
2239 | 2210 | if (has_pushable_tasks(rq)) { |
---|
2240 | 2211 | raw_spin_lock(&rq->lock); |
---|
2241 | | - while (push_rt_task(rq, true)) |
---|
2242 | | - ; |
---|
| 2212 | + push_rt_tasks(rq); |
---|
2243 | 2213 | raw_spin_unlock(&rq->lock); |
---|
2244 | 2214 | } |
---|
2245 | 2215 | |
---|
.. | .. |
---|
2264 | 2234 | { |
---|
2265 | 2235 | int this_cpu = this_rq->cpu, cpu; |
---|
2266 | 2236 | bool resched = false; |
---|
2267 | | - struct task_struct *p, *push_task; |
---|
| 2237 | + struct task_struct *p; |
---|
2268 | 2238 | struct rq *src_rq; |
---|
2269 | 2239 | int rt_overload_count = rt_overloaded(this_rq); |
---|
2270 | 2240 | |
---|
.. | .. |
---|
2311 | 2281 | * double_lock_balance, and another CPU could |
---|
2312 | 2282 | * alter this_rq |
---|
2313 | 2283 | */ |
---|
2314 | | - push_task = NULL; |
---|
2315 | 2284 | double_lock_balance(this_rq, src_rq); |
---|
2316 | 2285 | |
---|
2317 | 2286 | /* |
---|
.. | .. |
---|
2339 | 2308 | if (p->prio < src_rq->curr->prio) |
---|
2340 | 2309 | goto skip; |
---|
2341 | 2310 | |
---|
2342 | | - if (is_migration_disabled(p)) { |
---|
2343 | | - trace_sched_migrate_pull_tp(p); |
---|
2344 | | - push_task = get_push_task(src_rq); |
---|
2345 | | - } else { |
---|
2346 | | - deactivate_task(src_rq, p, 0); |
---|
2347 | | - set_task_cpu(p, this_cpu); |
---|
2348 | | - activate_task(this_rq, p, 0); |
---|
2349 | | - resched = true; |
---|
2350 | | - } |
---|
| 2311 | + resched = true; |
---|
| 2312 | + |
---|
| 2313 | + deactivate_task(src_rq, p, 0); |
---|
| 2314 | + set_task_cpu(p, this_cpu); |
---|
| 2315 | + activate_task(this_rq, p, 0); |
---|
2351 | 2316 | /* |
---|
2352 | 2317 | * We continue with the search, just in |
---|
2353 | 2318 | * case there's an even higher prio task |
---|
.. | .. |
---|
2357 | 2322 | } |
---|
2358 | 2323 | skip: |
---|
2359 | 2324 | double_unlock_balance(this_rq, src_rq); |
---|
2360 | | - |
---|
2361 | | - if (push_task) { |
---|
2362 | | - raw_spin_unlock(&this_rq->lock); |
---|
2363 | | - stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, |
---|
2364 | | - push_task, &src_rq->push_work); |
---|
2365 | | - raw_spin_lock(&this_rq->lock); |
---|
2366 | | - } |
---|
2367 | 2325 | } |
---|
2368 | 2326 | |
---|
2369 | 2327 | if (resched) |
---|
.. | .. |
---|
2612 | 2570 | .rq_offline = rq_offline_rt, |
---|
2613 | 2571 | .task_woken = task_woken_rt, |
---|
2614 | 2572 | .switched_from = switched_from_rt, |
---|
2615 | | - .find_lock_rq = find_lock_lowest_rq, |
---|
2616 | 2573 | #endif |
---|
2617 | 2574 | |
---|
2618 | 2575 | .task_tick = task_tick_rt, |
---|