.. | .. |
---|
17 | 17 | */ |
---|
18 | 18 | #include "sched.h" |
---|
19 | 19 | #include "pelt.h" |
---|
| 20 | +#include <linux/cpuset.h> |
---|
20 | 21 | |
---|
21 | 22 | struct dl_bandwidth def_dl_bandwidth; |
---|
22 | 23 | |
---|
.. | .. |
---|
1847 | 1848 | deadline_queue_push_tasks(rq); |
---|
1848 | 1849 | } |
---|
1849 | 1850 | |
---|
1850 | | -static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, |
---|
1851 | | - struct dl_rq *dl_rq) |
---|
| 1851 | +static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) |
---|
1852 | 1852 | { |
---|
1853 | 1853 | struct rb_node *left = rb_first_cached(&dl_rq->root); |
---|
1854 | 1854 | |
---|
.. | .. |
---|
1867 | 1867 | if (!sched_dl_runnable(rq)) |
---|
1868 | 1868 | return NULL; |
---|
1869 | 1869 | |
---|
1870 | | - dl_se = pick_next_dl_entity(rq, dl_rq); |
---|
| 1870 | + dl_se = pick_next_dl_entity(dl_rq); |
---|
1871 | 1871 | BUG_ON(!dl_se); |
---|
1872 | 1872 | p = dl_task_of(dl_se); |
---|
1873 | 1873 | set_next_task_dl(rq, p, true); |
---|
.. | .. |
---|
2421 | 2421 | if (task_on_rq_queued(p) && p->dl.dl_runtime) |
---|
2422 | 2422 | task_non_contending(p); |
---|
2423 | 2423 | |
---|
| 2424 | + /* |
---|
| 2425 | + * In case a task is setscheduled out from SCHED_DEADLINE we need to |
---|
| 2426 | + * keep track of that on its cpuset (for correct bandwidth tracking). |
---|
| 2427 | + */ |
---|
| 2428 | + dec_dl_tasks_cs(p); |
---|
| 2429 | + |
---|
2424 | 2430 | if (!task_on_rq_queued(p)) { |
---|
2425 | 2431 | /* |
---|
2426 | 2432 | * Inactive timer is armed. However, p is leaving DEADLINE and |
---|
.. | .. |
---|
2460 | 2466 | { |
---|
2461 | 2467 | if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) |
---|
2462 | 2468 | put_task_struct(p); |
---|
| 2469 | + |
---|
| 2470 | + /* |
---|
| 2471 | + * In case a task is setscheduled to SCHED_DEADLINE we need to keep |
---|
| 2472 | + * track of that on its cpuset (for correct bandwidth tracking). |
---|
| 2473 | + */ |
---|
| 2474 | + inc_dl_tasks_cs(p); |
---|
2463 | 2475 | |
---|
2464 | 2476 | /* If p is not queued we will update its parameters at next wakeup. */ |
---|
2465 | 2477 | if (!task_on_rq_queued(p)) { |
---|
.. | .. |
---|
2849 | 2861 | return ret; |
---|
2850 | 2862 | } |
---|
2851 | 2863 | |
---|
2852 | | -int dl_cpu_busy(int cpu, struct task_struct *p) |
---|
| 2864 | +enum dl_bw_request { |
---|
| 2865 | + dl_bw_req_check_overflow = 0, |
---|
| 2866 | + dl_bw_req_alloc, |
---|
| 2867 | + dl_bw_req_free |
---|
| 2868 | +}; |
---|
| 2869 | + |
---|
| 2870 | +static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) |
---|
2853 | 2871 | { |
---|
2854 | | - unsigned long flags, cap; |
---|
| 2872 | + unsigned long flags; |
---|
2855 | 2873 | struct dl_bw *dl_b; |
---|
2856 | | - bool overflow; |
---|
| 2874 | + bool overflow = 0; |
---|
2857 | 2875 | |
---|
2858 | 2876 | rcu_read_lock_sched(); |
---|
2859 | 2877 | dl_b = dl_bw_of(cpu); |
---|
2860 | 2878 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
---|
2861 | | - cap = dl_bw_capacity(cpu); |
---|
2862 | | - overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); |
---|
2863 | 2879 | |
---|
2864 | | - if (!overflow && p) { |
---|
2865 | | - /* |
---|
2866 | | - * We reserve space for this task in the destination |
---|
2867 | | - * root_domain, as we can't fail after this point. |
---|
2868 | | - * We will free resources in the source root_domain |
---|
2869 | | - * later on (see set_cpus_allowed_dl()). |
---|
2870 | | - */ |
---|
2871 | | - __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); |
---|
| 2880 | + if (req == dl_bw_req_free) { |
---|
| 2881 | + __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); |
---|
| 2882 | + } else { |
---|
| 2883 | + unsigned long cap = dl_bw_capacity(cpu); |
---|
| 2884 | + |
---|
| 2885 | + overflow = __dl_overflow(dl_b, cap, 0, dl_bw); |
---|
| 2886 | + |
---|
| 2887 | + if (req == dl_bw_req_alloc && !overflow) { |
---|
| 2888 | + /* |
---|
| 2889 | + * We reserve space in the destination |
---|
| 2890 | + * root_domain, as we can't fail after this point. |
---|
| 2891 | + * We will free resources in the source root_domain |
---|
| 2892 | + * later on (see set_cpus_allowed_dl()). |
---|
| 2893 | + */ |
---|
| 2894 | + __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); |
---|
| 2895 | + } |
---|
2872 | 2896 | } |
---|
2873 | 2897 | |
---|
2874 | 2898 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
---|
.. | .. |
---|
2876 | 2900 | |
---|
2877 | 2901 | return overflow ? -EBUSY : 0; |
---|
2878 | 2902 | } |
---|
| 2903 | + |
---|
| 2904 | +int dl_bw_check_overflow(int cpu) |
---|
| 2905 | +{ |
---|
| 2906 | + return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); |
---|
| 2907 | +} |
---|
| 2908 | + |
---|
| 2909 | +int dl_bw_alloc(int cpu, u64 dl_bw) |
---|
| 2910 | +{ |
---|
| 2911 | + return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); |
---|
| 2912 | +} |
---|
| 2913 | + |
---|
| 2914 | +void dl_bw_free(int cpu, u64 dl_bw) |
---|
| 2915 | +{ |
---|
| 2916 | + dl_bw_manage(dl_bw_req_free, cpu, dl_bw); |
---|
| 2917 | +} |
---|
2879 | 2918 | #endif |
---|
2880 | 2919 | |
---|
2881 | 2920 | #ifdef CONFIG_SCHED_DEBUG |
---|