.. | .. |
---|
17 | 17 | */ |
---|
18 | 18 | #include "sched.h" |
---|
19 | 19 | #include "pelt.h" |
---|
| 20 | +#include <linux/cpuset.h> |
---|
20 | 21 | |
---|
21 | 22 | struct dl_bandwidth def_dl_bandwidth; |
---|
22 | 23 | |
---|
.. | .. |
---|
565 | 566 | |
---|
566 | 567 | static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev) |
---|
567 | 568 | { |
---|
568 | | - return rq->online && dl_task(prev); |
---|
| 569 | + return dl_task(prev); |
---|
569 | 570 | } |
---|
570 | 571 | |
---|
571 | 572 | static DEFINE_PER_CPU(struct callback_head, dl_push_head); |
---|
.. | .. |
---|
1847 | 1848 | deadline_queue_push_tasks(rq); |
---|
1848 | 1849 | } |
---|
1849 | 1850 | |
---|
1850 | | -static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, |
---|
1851 | | - struct dl_rq *dl_rq) |
---|
| 1851 | +static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) |
---|
1852 | 1852 | { |
---|
1853 | 1853 | struct rb_node *left = rb_first_cached(&dl_rq->root); |
---|
1854 | 1854 | |
---|
.. | .. |
---|
1867 | 1867 | if (!sched_dl_runnable(rq)) |
---|
1868 | 1868 | return NULL; |
---|
1869 | 1869 | |
---|
1870 | | - dl_se = pick_next_dl_entity(rq, dl_rq); |
---|
| 1870 | + dl_se = pick_next_dl_entity(dl_rq); |
---|
1871 | 1871 | BUG_ON(!dl_se); |
---|
1872 | 1872 | p = dl_task_of(dl_se); |
---|
1873 | 1873 | set_next_task_dl(rq, p, true); |
---|
.. | .. |
---|
1922 | 1922 | static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) |
---|
1923 | 1923 | { |
---|
1924 | 1924 | if (!task_running(rq, p) && |
---|
1925 | | - cpumask_test_cpu(cpu, &p->cpus_mask)) |
---|
| 1925 | + cpumask_test_cpu(cpu, p->cpus_ptr)) |
---|
1926 | 1926 | return 1; |
---|
1927 | 1927 | return 0; |
---|
1928 | 1928 | } |
---|
.. | .. |
---|
2012 | 2012 | return this_cpu; |
---|
2013 | 2013 | } |
---|
2014 | 2014 | |
---|
2015 | | - best_cpu = cpumask_any_and_distribute(later_mask, |
---|
2016 | | - sched_domain_span(sd)); |
---|
| 2015 | + best_cpu = cpumask_first_and(later_mask, |
---|
| 2016 | + sched_domain_span(sd)); |
---|
2017 | 2017 | /* |
---|
2018 | 2018 | * Last chance: if a CPU being in both later_mask |
---|
2019 | 2019 | * and current sd span is valid, that becomes our |
---|
.. | .. |
---|
2035 | 2035 | if (this_cpu != -1) |
---|
2036 | 2036 | return this_cpu; |
---|
2037 | 2037 | |
---|
2038 | | - cpu = cpumask_any_distribute(later_mask); |
---|
| 2038 | + cpu = cpumask_any(later_mask); |
---|
2039 | 2039 | if (cpu < nr_cpu_ids) |
---|
2040 | 2040 | return cpu; |
---|
2041 | 2041 | |
---|
.. | .. |
---|
2072 | 2072 | /* Retry if something changed. */ |
---|
2073 | 2073 | if (double_lock_balance(rq, later_rq)) { |
---|
2074 | 2074 | if (unlikely(task_rq(task) != rq || |
---|
2075 | | - !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) || |
---|
| 2075 | + !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || |
---|
2076 | 2076 | task_running(rq, task) || |
---|
2077 | 2077 | !dl_task(task) || |
---|
2078 | 2078 | !task_on_rq_queued(task))) { |
---|
.. | .. |
---|
2139 | 2139 | return 0; |
---|
2140 | 2140 | |
---|
2141 | 2141 | retry: |
---|
2142 | | - if (is_migration_disabled(next_task)) |
---|
2143 | | - return 0; |
---|
2144 | | - |
---|
2145 | 2142 | if (WARN_ON(next_task == rq->curr)) |
---|
2146 | 2143 | return 0; |
---|
2147 | 2144 | |
---|
.. | .. |
---|
2219 | 2216 | static void pull_dl_task(struct rq *this_rq) |
---|
2220 | 2217 | { |
---|
2221 | 2218 | int this_cpu = this_rq->cpu, cpu; |
---|
2222 | | - struct task_struct *p, *push_task; |
---|
| 2219 | + struct task_struct *p; |
---|
2223 | 2220 | bool resched = false; |
---|
2224 | 2221 | struct rq *src_rq; |
---|
2225 | 2222 | u64 dmin = LONG_MAX; |
---|
.. | .. |
---|
2249 | 2246 | continue; |
---|
2250 | 2247 | |
---|
2251 | 2248 | /* Might drop this_rq->lock */ |
---|
2252 | | - push_task = NULL; |
---|
2253 | 2249 | double_lock_balance(this_rq, src_rq); |
---|
2254 | 2250 | |
---|
2255 | 2251 | /* |
---|
.. | .. |
---|
2281 | 2277 | src_rq->curr->dl.deadline)) |
---|
2282 | 2278 | goto skip; |
---|
2283 | 2279 | |
---|
2284 | | - if (is_migration_disabled(p)) { |
---|
2285 | | - trace_sched_migrate_pull_tp(p); |
---|
2286 | | - push_task = get_push_task(src_rq); |
---|
2287 | | - } else { |
---|
2288 | | - deactivate_task(src_rq, p, 0); |
---|
2289 | | - set_task_cpu(p, this_cpu); |
---|
2290 | | - activate_task(this_rq, p, 0); |
---|
2291 | | - dmin = p->dl.deadline; |
---|
2292 | | - resched = true; |
---|
2293 | | - } |
---|
| 2280 | + resched = true; |
---|
| 2281 | + |
---|
| 2282 | + deactivate_task(src_rq, p, 0); |
---|
| 2283 | + set_task_cpu(p, this_cpu); |
---|
| 2284 | + activate_task(this_rq, p, 0); |
---|
| 2285 | + dmin = p->dl.deadline; |
---|
2294 | 2286 | |
---|
2295 | 2287 | /* Is there any other task even earlier? */ |
---|
2296 | 2288 | } |
---|
2297 | 2289 | skip: |
---|
2298 | 2290 | double_unlock_balance(this_rq, src_rq); |
---|
2299 | | - |
---|
2300 | | - if (push_task) { |
---|
2301 | | - raw_spin_unlock(&this_rq->lock); |
---|
2302 | | - stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop, |
---|
2303 | | - push_task, &src_rq->push_work); |
---|
2304 | | - raw_spin_lock(&this_rq->lock); |
---|
2305 | | - } |
---|
2306 | 2291 | } |
---|
2307 | 2292 | |
---|
2308 | 2293 | if (resched) |
---|
.. | .. |
---|
2326 | 2311 | } |
---|
2327 | 2312 | |
---|
2328 | 2313 | static void set_cpus_allowed_dl(struct task_struct *p, |
---|
2329 | | - const struct cpumask *new_mask, |
---|
2330 | | - u32 flags) |
---|
| 2314 | + const struct cpumask *new_mask) |
---|
2331 | 2315 | { |
---|
2332 | 2316 | struct root_domain *src_rd; |
---|
2333 | 2317 | struct rq *rq; |
---|
.. | .. |
---|
2356 | 2340 | raw_spin_unlock(&src_dl_b->lock); |
---|
2357 | 2341 | } |
---|
2358 | 2342 | |
---|
2359 | | - set_cpus_allowed_common(p, new_mask, flags); |
---|
| 2343 | + set_cpus_allowed_common(p, new_mask); |
---|
2360 | 2344 | } |
---|
2361 | 2345 | |
---|
2362 | 2346 | /* Assumes rq->lock is held */ |
---|
.. | .. |
---|
2437 | 2421 | if (task_on_rq_queued(p) && p->dl.dl_runtime) |
---|
2438 | 2422 | task_non_contending(p); |
---|
2439 | 2423 | |
---|
| 2424 | + /* |
---|
| 2425 | + * In case a task is setscheduled out from SCHED_DEADLINE we need to |
---|
| 2426 | + * keep track of that on its cpuset (for correct bandwidth tracking). |
---|
| 2427 | + */ |
---|
| 2428 | + dec_dl_tasks_cs(p); |
---|
| 2429 | + |
---|
2440 | 2430 | if (!task_on_rq_queued(p)) { |
---|
2441 | 2431 | /* |
---|
2442 | 2432 | * Inactive timer is armed. However, p is leaving DEADLINE and |
---|
.. | .. |
---|
2476 | 2466 | { |
---|
2477 | 2467 | if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) |
---|
2478 | 2468 | put_task_struct(p); |
---|
| 2469 | + |
---|
| 2470 | + /* |
---|
| 2471 | + * In case a task is setscheduled to SCHED_DEADLINE we need to keep |
---|
| 2472 | + * track of that on its cpuset (for correct bandwidth tracking). |
---|
| 2473 | + */ |
---|
| 2474 | + inc_dl_tasks_cs(p); |
---|
2479 | 2475 | |
---|
2480 | 2476 | /* If p is not queued we will update its parameters at next wakeup. */ |
---|
2481 | 2477 | if (!task_on_rq_queued(p)) { |
---|
.. | .. |
---|
2554 | 2550 | .rq_online = rq_online_dl, |
---|
2555 | 2551 | .rq_offline = rq_offline_dl, |
---|
2556 | 2552 | .task_woken = task_woken_dl, |
---|
2557 | | - .find_lock_rq = find_lock_later_rq, |
---|
2558 | 2553 | #endif |
---|
2559 | 2554 | |
---|
2560 | 2555 | .task_tick = task_tick_dl, |
---|
.. | .. |
---|
2866 | 2861 | return ret; |
---|
2867 | 2862 | } |
---|
2868 | 2863 | |
---|
2869 | | -int dl_cpu_busy(int cpu, struct task_struct *p) |
---|
| 2864 | +enum dl_bw_request { |
---|
| 2865 | + dl_bw_req_check_overflow = 0, |
---|
| 2866 | + dl_bw_req_alloc, |
---|
| 2867 | + dl_bw_req_free |
---|
| 2868 | +}; |
---|
| 2869 | + |
---|
| 2870 | +static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) |
---|
2870 | 2871 | { |
---|
2871 | | - unsigned long flags, cap; |
---|
| 2872 | + unsigned long flags; |
---|
2872 | 2873 | struct dl_bw *dl_b; |
---|
2873 | | - bool overflow; |
---|
| 2874 | + bool overflow = 0; |
---|
2874 | 2875 | |
---|
2875 | 2876 | rcu_read_lock_sched(); |
---|
2876 | 2877 | dl_b = dl_bw_of(cpu); |
---|
2877 | 2878 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
---|
2878 | | - cap = dl_bw_capacity(cpu); |
---|
2879 | | - overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); |
---|
2880 | 2879 | |
---|
2881 | | - if (!overflow && p) { |
---|
2882 | | - /* |
---|
2883 | | - * We reserve space for this task in the destination |
---|
2884 | | - * root_domain, as we can't fail after this point. |
---|
2885 | | - * We will free resources in the source root_domain |
---|
2886 | | - * later on (see set_cpus_allowed_dl()). |
---|
2887 | | - */ |
---|
2888 | | - __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); |
---|
| 2880 | + if (req == dl_bw_req_free) { |
---|
| 2881 | + __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); |
---|
| 2882 | + } else { |
---|
| 2883 | + unsigned long cap = dl_bw_capacity(cpu); |
---|
| 2884 | + |
---|
| 2885 | + overflow = __dl_overflow(dl_b, cap, 0, dl_bw); |
---|
| 2886 | + |
---|
| 2887 | + if (req == dl_bw_req_alloc && !overflow) { |
---|
| 2888 | + /* |
---|
| 2889 | + * We reserve space in the destination |
---|
| 2890 | + * root_domain, as we can't fail after this point. |
---|
| 2891 | + * We will free resources in the source root_domain |
---|
| 2892 | + * later on (see set_cpus_allowed_dl()). |
---|
| 2893 | + */ |
---|
| 2894 | + __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); |
---|
| 2895 | + } |
---|
2889 | 2896 | } |
---|
2890 | 2897 | |
---|
2891 | 2898 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
---|
.. | .. |
---|
2893 | 2900 | |
---|
2894 | 2901 | return overflow ? -EBUSY : 0; |
---|
2895 | 2902 | } |
---|
| 2903 | + |
---|
| 2904 | +int dl_bw_check_overflow(int cpu) |
---|
| 2905 | +{ |
---|
| 2906 | + return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); |
---|
| 2907 | +} |
---|
| 2908 | + |
---|
| 2909 | +int dl_bw_alloc(int cpu, u64 dl_bw) |
---|
| 2910 | +{ |
---|
| 2911 | + return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); |
---|
| 2912 | +} |
---|
| 2913 | + |
---|
| 2914 | +void dl_bw_free(int cpu, u64 dl_bw) |
---|
| 2915 | +{ |
---|
| 2916 | + dl_bw_manage(dl_bw_req_free, cpu, dl_bw); |
---|
| 2917 | +} |
---|
2896 | 2918 | #endif |
---|
2897 | 2919 | |
---|
2898 | 2920 | #ifdef CONFIG_SCHED_DEBUG |
---|