.. | .. |
---|
165 | 165 | */ |
---|
166 | 166 | int use_parent_ecpus; |
---|
167 | 167 | int child_ecpus_count; |
---|
| 168 | + |
---|
| 169 | + /* |
---|
| 170 | + * number of SCHED_DEADLINE tasks attached to this cpuset, so that we |
---|
| 171 | + * know when to rebuild associated root domain bandwidth information. |
---|
| 172 | + */ |
---|
| 173 | + int nr_deadline_tasks; |
---|
| 174 | + int nr_migrate_dl_tasks; |
---|
| 175 | + u64 sum_migrate_dl_bw; |
---|
168 | 176 | }; |
---|
169 | 177 | |
---|
170 | 178 | /* |
---|
.. | .. |
---|
208 | 216 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
---|
209 | 217 | { |
---|
210 | 218 | return css_cs(cs->css.parent); |
---|
| 219 | +} |
---|
| 220 | + |
---|
| 221 | +void inc_dl_tasks_cs(struct task_struct *p) |
---|
| 222 | +{ |
---|
| 223 | + struct cpuset *cs = task_cs(p); |
---|
| 224 | + |
---|
| 225 | + cs->nr_deadline_tasks++; |
---|
| 226 | +} |
---|
| 227 | + |
---|
| 228 | +void dec_dl_tasks_cs(struct task_struct *p) |
---|
| 229 | +{ |
---|
| 230 | + struct cpuset *cs = task_cs(p); |
---|
| 231 | + |
---|
| 232 | + cs->nr_deadline_tasks--; |
---|
211 | 233 | } |
---|
212 | 234 | |
---|
213 | 235 | /* bits in struct cpuset flags field */ |
---|
.. | .. |
---|
339 | 361 | */ |
---|
340 | 362 | |
---|
341 | 363 | static DEFINE_MUTEX(cpuset_mutex); |
---|
| 364 | + |
---|
| 365 | +void cpuset_lock(void) |
---|
| 366 | +{ |
---|
| 367 | + mutex_lock(&cpuset_mutex); |
---|
| 368 | +} |
---|
| 369 | + |
---|
| 370 | +void cpuset_unlock(void) |
---|
| 371 | +{ |
---|
| 372 | + mutex_unlock(&cpuset_mutex); |
---|
| 373 | +} |
---|
| 374 | + |
---|
342 | 375 | static DEFINE_SPINLOCK(callback_lock); |
---|
343 | 376 | |
---|
344 | 377 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
---|
.. | .. |
---|
925 | 958 | return ndoms; |
---|
926 | 959 | } |
---|
927 | 960 | |
---|
928 | | -static void update_tasks_root_domain(struct cpuset *cs) |
---|
| 961 | +static void dl_update_tasks_root_domain(struct cpuset *cs) |
---|
929 | 962 | { |
---|
930 | 963 | struct css_task_iter it; |
---|
931 | 964 | struct task_struct *task; |
---|
| 965 | + |
---|
| 966 | + if (cs->nr_deadline_tasks == 0) |
---|
| 967 | + return; |
---|
932 | 968 | |
---|
933 | 969 | css_task_iter_start(&cs->css, 0, &it); |
---|
934 | 970 | |
---|
.. | .. |
---|
938 | 974 | css_task_iter_end(&it); |
---|
939 | 975 | } |
---|
940 | 976 | |
---|
941 | | -static void rebuild_root_domains(void) |
---|
| 977 | +static void dl_rebuild_rd_accounting(void) |
---|
942 | 978 | { |
---|
943 | 979 | struct cpuset *cs = NULL; |
---|
944 | 980 | struct cgroup_subsys_state *pos_css; |
---|
.. | .. |
---|
966 | 1002 | |
---|
967 | 1003 | rcu_read_unlock(); |
---|
968 | 1004 | |
---|
969 | | - update_tasks_root_domain(cs); |
---|
| 1005 | + dl_update_tasks_root_domain(cs); |
---|
970 | 1006 | |
---|
971 | 1007 | rcu_read_lock(); |
---|
972 | 1008 | css_put(&cs->css); |
---|
.. | .. |
---|
980 | 1016 | { |
---|
981 | 1017 | mutex_lock(&sched_domains_mutex); |
---|
982 | 1018 | partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); |
---|
983 | | - rebuild_root_domains(); |
---|
| 1019 | + dl_rebuild_rd_accounting(); |
---|
984 | 1020 | mutex_unlock(&sched_domains_mutex); |
---|
985 | 1021 | } |
---|
986 | 1022 | |
---|
.. | .. |
---|
2171 | 2207 | |
---|
2172 | 2208 | static struct cpuset *cpuset_attach_old_cs; |
---|
2173 | 2209 | |
---|
| 2210 | +static void reset_migrate_dl_data(struct cpuset *cs) |
---|
| 2211 | +{ |
---|
| 2212 | + cs->nr_migrate_dl_tasks = 0; |
---|
| 2213 | + cs->sum_migrate_dl_bw = 0; |
---|
| 2214 | +} |
---|
| 2215 | + |
---|
2174 | 2216 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
---|
2175 | 2217 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
---|
2176 | 2218 | { |
---|
2177 | 2219 | struct cgroup_subsys_state *css; |
---|
2178 | | - struct cpuset *cs; |
---|
| 2220 | + struct cpuset *cs, *oldcs; |
---|
2179 | 2221 | struct task_struct *task; |
---|
2180 | 2222 | int ret; |
---|
2181 | 2223 | |
---|
2182 | 2224 | /* used later by cpuset_attach() */ |
---|
2183 | 2225 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
---|
| 2226 | + oldcs = cpuset_attach_old_cs; |
---|
2184 | 2227 | cs = css_cs(css); |
---|
2185 | 2228 | |
---|
2186 | 2229 | mutex_lock(&cpuset_mutex); |
---|
.. | .. |
---|
2192 | 2235 | goto out_unlock; |
---|
2193 | 2236 | |
---|
2194 | 2237 | cgroup_taskset_for_each(task, css, tset) { |
---|
2195 | | - ret = task_can_attach(task, cs->effective_cpus); |
---|
| 2238 | + ret = task_can_attach(task); |
---|
2196 | 2239 | if (ret) |
---|
2197 | 2240 | goto out_unlock; |
---|
2198 | 2241 | ret = security_task_setscheduler(task); |
---|
2199 | 2242 | if (ret) |
---|
2200 | 2243 | goto out_unlock; |
---|
| 2244 | + |
---|
| 2245 | + if (dl_task(task)) { |
---|
| 2246 | + cs->nr_migrate_dl_tasks++; |
---|
| 2247 | + cs->sum_migrate_dl_bw += task->dl.dl_bw; |
---|
| 2248 | + } |
---|
2201 | 2249 | } |
---|
2202 | 2250 | |
---|
| 2251 | + if (!cs->nr_migrate_dl_tasks) |
---|
| 2252 | + goto out_success; |
---|
| 2253 | + |
---|
| 2254 | + if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { |
---|
| 2255 | + int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); |
---|
| 2256 | + |
---|
| 2257 | + if (unlikely(cpu >= nr_cpu_ids)) { |
---|
| 2258 | + reset_migrate_dl_data(cs); |
---|
| 2259 | + ret = -EINVAL; |
---|
| 2260 | + goto out_unlock; |
---|
| 2261 | + } |
---|
| 2262 | + |
---|
| 2263 | + ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); |
---|
| 2264 | + if (ret) { |
---|
| 2265 | + reset_migrate_dl_data(cs); |
---|
| 2266 | + goto out_unlock; |
---|
| 2267 | + } |
---|
| 2268 | + } |
---|
| 2269 | + |
---|
| 2270 | +out_success: |
---|
2203 | 2271 | /* |
---|
2204 | 2272 | * Mark attach is in progress. This makes validate_change() fail |
---|
2205 | 2273 | * changes which zero cpus/mems_allowed. |
---|
.. | .. |
---|
2214 | 2282 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
---|
2215 | 2283 | { |
---|
2216 | 2284 | struct cgroup_subsys_state *css; |
---|
| 2285 | + struct cpuset *cs; |
---|
2217 | 2286 | |
---|
2218 | 2287 | cgroup_taskset_first(tset, &css); |
---|
| 2288 | + cs = css_cs(css); |
---|
2219 | 2289 | |
---|
2220 | 2290 | mutex_lock(&cpuset_mutex); |
---|
2221 | | - css_cs(css)->attach_in_progress--; |
---|
| 2291 | + cs->attach_in_progress--; |
---|
| 2292 | + if (!cs->attach_in_progress) |
---|
| 2293 | + wake_up(&cpuset_attach_wq); |
---|
| 2294 | + |
---|
| 2295 | + if (cs->nr_migrate_dl_tasks) { |
---|
| 2296 | + int cpu = cpumask_any(cs->effective_cpus); |
---|
| 2297 | + |
---|
| 2298 | + dl_bw_free(cpu, cs->sum_migrate_dl_bw); |
---|
| 2299 | + reset_migrate_dl_data(cs); |
---|
| 2300 | + } |
---|
| 2301 | + |
---|
2222 | 2302 | mutex_unlock(&cpuset_mutex); |
---|
2223 | 2303 | } |
---|
2224 | 2304 | |
---|
.. | .. |
---|
2291 | 2371 | |
---|
2292 | 2372 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
---|
2293 | 2373 | |
---|
| 2374 | + if (cs->nr_migrate_dl_tasks) { |
---|
| 2375 | + cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; |
---|
| 2376 | + oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; |
---|
| 2377 | + reset_migrate_dl_data(cs); |
---|
| 2378 | + } |
---|
| 2379 | + |
---|
2294 | 2380 | cs->attach_in_progress--; |
---|
2295 | 2381 | if (!cs->attach_in_progress) |
---|
2296 | 2382 | wake_up(&cpuset_attach_wq); |
---|