.. | .. |
---|
565 | 565 | |
---|
566 | 566 | static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) |
---|
567 | 567 | { |
---|
568 | | - return rq->online && dl_task(prev); |
---|
| 568 | + return dl_task(prev); |
---|
569 | 569 | } |
---|
570 | 570 | |
---|
571 | 571 | static DEFINE_PER_CPU(struct callback_head, dl_push_head); |
---|
.. | .. |
---|
1922 | 1922 | static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) |
---|
1923 | 1923 | { |
---|
1924 | 1924 | if (!task_running(rq, p) && |
---|
1925 | | - cpumask_test_cpu(cpu, &p->cpus_mask)) |
---|
| 1925 | + cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
1926 | 1926 | return 1; |
---|
1927 | 1927 | return 0; |
---|
1928 | 1928 | } |
---|
.. | .. |
---|
2012 | 2012 | return this_cpu; |
---|
2013 | 2013 | } |
---|
2014 | 2014 | |
---|
2015 | | - best_cpu = cpumask_any_and_distribute(later_mask, |
---|
2016 | | - sched_domain_span(sd)); |
---|
| 2015 | + best_cpu = cpumask_first_and(later_mask, |
---|
| 2016 | + sched_domain_span(sd)); |
---|
2017 | 2017 | /* |
---|
2018 | 2018 | * Last chance: if a CPU being in both later_mask |
---|
2019 | 2019 | * and current sd span is valid, that becomes our |
---|
.. | .. |
---|
2035 | 2035 | if (this_cpu != -1) |
---|
2036 | 2036 | return this_cpu; |
---|
2037 | 2037 | |
---|
2038 | | - cpu = cpumask_any_distribute(later_mask); |
---|
| 2038 | + cpu = cpumask_any(later_mask); |
---|
2039 | 2039 | if (cpu < nr_cpu_ids) |
---|
2040 | 2040 | return cpu; |
---|
2041 | 2041 | |
---|
.. | .. |
---|
2072 | 2072 | /* Retry if something changed. */ |
---|
2073 | 2073 | if (double_lock_balance(rq, later_rq)) { |
---|
2074 | 2074 | if (unlikely(task_rq(task) != rq || |
---|
2075 | | - !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || |
---|
| 2075 | + !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || |
---|
2076 | 2076 | task_running(rq, task) || |
---|
2077 | 2077 | !dl_task(task) || |
---|
2078 | 2078 | !task_on_rq_queued(task))) { |
---|
.. | .. |
---|
2139 | 2139 | return 0; |
---|
2140 | 2140 | |
---|
2141 | 2141 | retry: |
---|
2142 | | - if (is_migration_disabled(next_task)) |
---|
2143 | | - return 0; |
---|
2144 | | - |
---|
2145 | 2142 | if (WARN_ON(next_task == rq->curr)) |
---|
2146 | 2143 | return 0; |
---|
2147 | 2144 | |
---|
.. | .. |
---|
2219 | 2216 | static void pull_dl_task(struct rq *this_rq) |
---|
2220 | 2217 | { |
---|
2221 | 2218 | int this_cpu = this_rq->cpu, cpu; |
---|
2222 | | - struct task_struct *p, *push_task; |
---|
| 2219 | + struct task_struct *p; |
---|
2223 | 2220 | bool resched = false; |
---|
2224 | 2221 | struct rq *src_rq; |
---|
2225 | 2222 | u64 dmin = LONG_MAX; |
---|
.. | .. |
---|
2249 | 2246 | continue; |
---|
2250 | 2247 | |
---|
2251 | 2248 | /* Might drop this_rq->lock */ |
---|
2252 | | - push_task = NULL; |
---|
2253 | 2249 | double_lock_balance(this_rq, src_rq); |
---|
2254 | 2250 | |
---|
2255 | 2251 | /* |
---|
.. | .. |
---|
2281 | 2277 | src_rq->curr->dl.deadline)) |
---|
2282 | 2278 | goto skip; |
---|
2283 | 2279 | |
---|
2284 | | - if (is_migration_disabled(p)) { |
---|
2285 | | - trace_sched_migrate_pull_tp(p); |
---|
2286 | | - push_task = get_push_task(src_rq); |
---|
2287 | | - } else { |
---|
2288 | | - deactivate_task(src_rq, p, 0); |
---|
2289 | | - set_task_cpu(p, this_cpu); |
---|
2290 | | - activate_task(this_rq, p, 0); |
---|
2291 | | - dmin = p->dl.deadline; |
---|
2292 | | - resched = true; |
---|
2293 | | - } |
---|
| 2280 | + resched = true; |
---|
| 2281 | + |
---|
| 2282 | + deactivate_task(src_rq, p, 0); |
---|
| 2283 | + set_task_cpu(p, this_cpu); |
---|
| 2284 | + activate_task(this_rq, p, 0); |
---|
| 2285 | + dmin = p->dl.deadline; |
---|
2294 | 2286 | |
---|
2295 | 2287 | /* Is there any other task even earlier? */ |
---|
2296 | 2288 | } |
---|
2297 | 2289 | skip: |
---|
2298 | 2290 | double_unlock_balance(this_rq, src_rq); |
---|
2299 | | - |
---|
2300 | | - if (push_task) { |
---|
2301 | | - raw_spin_unlock(&this_rq->lock); |
---|
2302 | | - stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, |
---|
2303 | | - push_task, &src_rq->push_work); |
---|
2304 | | - raw_spin_lock(&this_rq->lock); |
---|
2305 | | - } |
---|
2306 | 2291 | } |
---|
2307 | 2292 | |
---|
2308 | 2293 | if (resched) |
---|
.. | .. |
---|
2326 | 2311 | } |
---|
2327 | 2312 | |
---|
2328 | 2313 | static void set_cpus_allowed_dl(struct task_struct *p, |
---|
2329 | | - const struct cpumask *new_mask, |
---|
2330 | | - u32 flags) |
---|
| 2314 | + const struct cpumask *new_mask) |
---|
2331 | 2315 | { |
---|
2332 | 2316 | struct root_domain *src_rd; |
---|
2333 | 2317 | struct rq *rq; |
---|
.. | .. |
---|
2356 | 2340 | raw_spin_unlock(&src_dl_b->lock); |
---|
2357 | 2341 | } |
---|
2358 | 2342 | |
---|
2359 | | - set_cpus_allowed_common(p, new_mask, flags); |
---|
| 2343 | + set_cpus_allowed_common(p, new_mask); |
---|
2360 | 2344 | } |
---|
2361 | 2345 | |
---|
2362 | 2346 | /* Assumes rq->lock is held */ |
---|
.. | .. |
---|
2554 | 2538 | .rq_online = rq_online_dl, |
---|
2555 | 2539 | .rq_offline = rq_offline_dl, |
---|
2556 | 2540 | .task_woken = task_woken_dl, |
---|
2557 | | - .find_lock_rq = find_lock_later_rq, |
---|
2558 | 2541 | #endif |
---|
2559 | 2542 | |
---|
2560 | 2543 | .task_tick = task_tick_dl, |
---|