| .. | .. |
|---|
| 165 | 165 | */ |
|---|
| 166 | 166 | int use_parent_ecpus; |
|---|
| 167 | 167 | int child_ecpus_count; |
|---|
| 168 | + |
|---|
| 169 | + /* |
|---|
| 170 | + * number of SCHED_DEADLINE tasks attached to this cpuset, so that we |
|---|
| 171 | + * know when to rebuild associated root domain bandwidth information. |
|---|
| 172 | + */ |
|---|
| 173 | + int nr_deadline_tasks; |
|---|
| 174 | + int nr_migrate_dl_tasks; |
|---|
| 175 | + u64 sum_migrate_dl_bw; |
|---|
| 168 | 176 | }; |
|---|
| 169 | 177 | |
|---|
| 170 | 178 | /* |
|---|
| .. | .. |
|---|
| 208 | 216 | static inline struct cpuset *parent_cs(struct cpuset *cs) |
|---|
| 209 | 217 | { |
|---|
| 210 | 218 | return css_cs(cs->css.parent); |
|---|
| 219 | +} |
|---|
| 220 | + |
|---|
| 221 | +void inc_dl_tasks_cs(struct task_struct *p) |
|---|
| 222 | +{ |
|---|
| 223 | + struct cpuset *cs = task_cs(p); |
|---|
| 224 | + |
|---|
| 225 | + cs->nr_deadline_tasks++; |
|---|
| 226 | +} |
|---|
| 227 | + |
|---|
| 228 | +void dec_dl_tasks_cs(struct task_struct *p) |
|---|
| 229 | +{ |
|---|
| 230 | + struct cpuset *cs = task_cs(p); |
|---|
| 231 | + |
|---|
| 232 | + cs->nr_deadline_tasks--; |
|---|
| 211 | 233 | } |
|---|
| 212 | 234 | |
|---|
| 213 | 235 | /* bits in struct cpuset flags field */ |
|---|
| .. | .. |
|---|
| 339 | 361 | */ |
|---|
| 340 | 362 | |
|---|
| 341 | 363 | static DEFINE_MUTEX(cpuset_mutex); |
|---|
| 342 | | -static DEFINE_RAW_SPINLOCK(callback_lock); |
|---|
| 364 | + |
|---|
| 365 | +void cpuset_lock(void) |
|---|
| 366 | +{ |
|---|
| 367 | + mutex_lock(&cpuset_mutex); |
|---|
| 368 | +} |
|---|
| 369 | + |
|---|
| 370 | +void cpuset_unlock(void) |
|---|
| 371 | +{ |
|---|
| 372 | + mutex_unlock(&cpuset_mutex); |
|---|
| 373 | +} |
|---|
| 374 | + |
|---|
| 375 | +static DEFINE_SPINLOCK(callback_lock); |
|---|
| 343 | 376 | |
|---|
| 344 | 377 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
|---|
| 345 | 378 | |
|---|
| .. | .. |
|---|
| 925 | 958 | return ndoms; |
|---|
| 926 | 959 | } |
|---|
| 927 | 960 | |
|---|
| 928 | | -static void update_tasks_root_domain(struct cpuset *cs) |
|---|
| 961 | +static void dl_update_tasks_root_domain(struct cpuset *cs) |
|---|
| 929 | 962 | { |
|---|
| 930 | 963 | struct css_task_iter it; |
|---|
| 931 | 964 | struct task_struct *task; |
|---|
| 965 | + |
|---|
| 966 | + if (cs->nr_deadline_tasks == 0) |
|---|
| 967 | + return; |
|---|
| 932 | 968 | |
|---|
| 933 | 969 | css_task_iter_start(&cs->css, 0, &it); |
|---|
| 934 | 970 | |
|---|
| .. | .. |
|---|
| 938 | 974 | css_task_iter_end(&it); |
|---|
| 939 | 975 | } |
|---|
| 940 | 976 | |
|---|
| 941 | | -static void rebuild_root_domains(void) |
|---|
| 977 | +static void dl_rebuild_rd_accounting(void) |
|---|
| 942 | 978 | { |
|---|
| 943 | 979 | struct cpuset *cs = NULL; |
|---|
| 944 | 980 | struct cgroup_subsys_state *pos_css; |
|---|
| .. | .. |
|---|
| 966 | 1002 | |
|---|
| 967 | 1003 | rcu_read_unlock(); |
|---|
| 968 | 1004 | |
|---|
| 969 | | - update_tasks_root_domain(cs); |
|---|
| 1005 | + dl_update_tasks_root_domain(cs); |
|---|
| 970 | 1006 | |
|---|
| 971 | 1007 | rcu_read_lock(); |
|---|
| 972 | 1008 | css_put(&cs->css); |
|---|
| .. | .. |
|---|
| 980 | 1016 | { |
|---|
| 981 | 1017 | mutex_lock(&sched_domains_mutex); |
|---|
| 982 | 1018 | partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); |
|---|
| 983 | | - rebuild_root_domains(); |
|---|
| 1019 | + dl_rebuild_rd_accounting(); |
|---|
| 984 | 1020 | mutex_unlock(&sched_domains_mutex); |
|---|
| 985 | 1021 | } |
|---|
| 986 | 1022 | |
|---|
| .. | .. |
|---|
| 1315 | 1351 | * Newly added CPUs will be removed from effective_cpus and |
|---|
| 1316 | 1352 | * newly deleted ones will be added back to effective_cpus. |
|---|
| 1317 | 1353 | */ |
|---|
| 1318 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 1354 | + spin_lock_irq(&callback_lock); |
|---|
| 1319 | 1355 | if (adding) { |
|---|
| 1320 | 1356 | cpumask_or(parent->subparts_cpus, |
|---|
| 1321 | 1357 | parent->subparts_cpus, tmp->addmask); |
|---|
| .. | .. |
|---|
| 1337 | 1373 | |
|---|
| 1338 | 1374 | if (cpuset->partition_root_state != new_prs) |
|---|
| 1339 | 1375 | cpuset->partition_root_state = new_prs; |
|---|
| 1340 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 1376 | + spin_unlock_irq(&callback_lock); |
|---|
| 1341 | 1377 | |
|---|
| 1342 | 1378 | return cmd == partcmd_update; |
|---|
| 1343 | 1379 | } |
|---|
| .. | .. |
|---|
| 1440 | 1476 | continue; |
|---|
| 1441 | 1477 | rcu_read_unlock(); |
|---|
| 1442 | 1478 | |
|---|
| 1443 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 1479 | + spin_lock_irq(&callback_lock); |
|---|
| 1444 | 1480 | |
|---|
| 1445 | 1481 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); |
|---|
| 1446 | 1482 | if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { |
|---|
| .. | .. |
|---|
| 1474 | 1510 | if (new_prs != cp->partition_root_state) |
|---|
| 1475 | 1511 | cp->partition_root_state = new_prs; |
|---|
| 1476 | 1512 | |
|---|
| 1477 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 1513 | + spin_unlock_irq(&callback_lock); |
|---|
| 1478 | 1514 | |
|---|
| 1479 | 1515 | WARN_ON(!is_in_v2_mode() && |
|---|
| 1480 | 1516 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
|---|
| .. | .. |
|---|
| 1603 | 1639 | return -EINVAL; |
|---|
| 1604 | 1640 | } |
|---|
| 1605 | 1641 | |
|---|
| 1606 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 1642 | + spin_lock_irq(&callback_lock); |
|---|
| 1607 | 1643 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
|---|
| 1608 | 1644 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
|---|
| 1609 | 1645 | |
|---|
| .. | .. |
|---|
| 1614 | 1650 | cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); |
|---|
| 1615 | 1651 | cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); |
|---|
| 1616 | 1652 | } |
|---|
| 1617 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 1653 | + spin_unlock_irq(&callback_lock); |
|---|
| 1618 | 1654 | |
|---|
| 1619 | 1655 | update_cpumasks_hier(cs, &tmp); |
|---|
| 1620 | 1656 | |
|---|
| .. | .. |
|---|
| 1808 | 1844 | continue; |
|---|
| 1809 | 1845 | rcu_read_unlock(); |
|---|
| 1810 | 1846 | |
|---|
| 1811 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 1847 | + spin_lock_irq(&callback_lock); |
|---|
| 1812 | 1848 | cp->effective_mems = *new_mems; |
|---|
| 1813 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 1849 | + spin_unlock_irq(&callback_lock); |
|---|
| 1814 | 1850 | |
|---|
| 1815 | 1851 | WARN_ON(!is_in_v2_mode() && |
|---|
| 1816 | 1852 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
|---|
| .. | .. |
|---|
| 1878 | 1914 | if (retval < 0) |
|---|
| 1879 | 1915 | goto done; |
|---|
| 1880 | 1916 | |
|---|
| 1881 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 1917 | + spin_lock_irq(&callback_lock); |
|---|
| 1882 | 1918 | cs->mems_allowed = trialcs->mems_allowed; |
|---|
| 1883 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 1919 | + spin_unlock_irq(&callback_lock); |
|---|
| 1884 | 1920 | |
|---|
| 1885 | 1921 | /* use trialcs->mems_allowed as a temp variable */ |
|---|
| 1886 | 1922 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
|---|
| .. | .. |
|---|
| 1971 | 2007 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
|---|
| 1972 | 2008 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
|---|
| 1973 | 2009 | |
|---|
| 1974 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 2010 | + spin_lock_irq(&callback_lock); |
|---|
| 1975 | 2011 | cs->flags = trialcs->flags; |
|---|
| 1976 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 2012 | + spin_unlock_irq(&callback_lock); |
|---|
| 1977 | 2013 | |
|---|
| 1978 | 2014 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
|---|
| 1979 | 2015 | rebuild_sched_domains_locked(); |
|---|
| .. | .. |
|---|
| 2059 | 2095 | rebuild_sched_domains_locked(); |
|---|
| 2060 | 2096 | out: |
|---|
| 2061 | 2097 | if (!err) { |
|---|
| 2062 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 2098 | + spin_lock_irq(&callback_lock); |
|---|
| 2063 | 2099 | cs->partition_root_state = new_prs; |
|---|
| 2064 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 2100 | + spin_unlock_irq(&callback_lock); |
|---|
| 2065 | 2101 | } |
|---|
| 2066 | 2102 | |
|---|
| 2067 | 2103 | free_cpumasks(NULL, &tmpmask); |
|---|
| .. | .. |
|---|
| 2171 | 2207 | |
|---|
| 2172 | 2208 | static struct cpuset *cpuset_attach_old_cs; |
|---|
| 2173 | 2209 | |
|---|
| 2210 | +static void reset_migrate_dl_data(struct cpuset *cs) |
|---|
| 2211 | +{ |
|---|
| 2212 | + cs->nr_migrate_dl_tasks = 0; |
|---|
| 2213 | + cs->sum_migrate_dl_bw = 0; |
|---|
| 2214 | +} |
|---|
| 2215 | + |
|---|
| 2174 | 2216 | /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ |
|---|
| 2175 | 2217 | static int cpuset_can_attach(struct cgroup_taskset *tset) |
|---|
| 2176 | 2218 | { |
|---|
| 2177 | 2219 | struct cgroup_subsys_state *css; |
|---|
| 2178 | | - struct cpuset *cs; |
|---|
| 2220 | + struct cpuset *cs, *oldcs; |
|---|
| 2179 | 2221 | struct task_struct *task; |
|---|
| 2180 | 2222 | int ret; |
|---|
| 2181 | 2223 | |
|---|
| 2182 | 2224 | /* used later by cpuset_attach() */ |
|---|
| 2183 | 2225 | cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); |
|---|
| 2226 | + oldcs = cpuset_attach_old_cs; |
|---|
| 2184 | 2227 | cs = css_cs(css); |
|---|
| 2185 | 2228 | |
|---|
| 2186 | 2229 | mutex_lock(&cpuset_mutex); |
|---|
| .. | .. |
|---|
| 2192 | 2235 | goto out_unlock; |
|---|
| 2193 | 2236 | |
|---|
| 2194 | 2237 | cgroup_taskset_for_each(task, css, tset) { |
|---|
| 2195 | | - ret = task_can_attach(task, cs->effective_cpus); |
|---|
| 2238 | + ret = task_can_attach(task); |
|---|
| 2196 | 2239 | if (ret) |
|---|
| 2197 | 2240 | goto out_unlock; |
|---|
| 2198 | 2241 | ret = security_task_setscheduler(task); |
|---|
| 2199 | 2242 | if (ret) |
|---|
| 2200 | 2243 | goto out_unlock; |
|---|
| 2244 | + |
|---|
| 2245 | + if (dl_task(task)) { |
|---|
| 2246 | + cs->nr_migrate_dl_tasks++; |
|---|
| 2247 | + cs->sum_migrate_dl_bw += task->dl.dl_bw; |
|---|
| 2248 | + } |
|---|
| 2201 | 2249 | } |
|---|
| 2202 | 2250 | |
|---|
| 2251 | + if (!cs->nr_migrate_dl_tasks) |
|---|
| 2252 | + goto out_success; |
|---|
| 2253 | + |
|---|
| 2254 | + if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { |
|---|
| 2255 | + int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); |
|---|
| 2256 | + |
|---|
| 2257 | + if (unlikely(cpu >= nr_cpu_ids)) { |
|---|
| 2258 | + reset_migrate_dl_data(cs); |
|---|
| 2259 | + ret = -EINVAL; |
|---|
| 2260 | + goto out_unlock; |
|---|
| 2261 | + } |
|---|
| 2262 | + |
|---|
| 2263 | + ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); |
|---|
| 2264 | + if (ret) { |
|---|
| 2265 | + reset_migrate_dl_data(cs); |
|---|
| 2266 | + goto out_unlock; |
|---|
| 2267 | + } |
|---|
| 2268 | + } |
|---|
| 2269 | + |
|---|
| 2270 | +out_success: |
|---|
| 2203 | 2271 | /* |
|---|
| 2204 | 2272 | * Mark attach is in progress. This makes validate_change() fail |
|---|
| 2205 | 2273 | * changes which zero cpus/mems_allowed. |
|---|
| .. | .. |
|---|
| 2214 | 2282 | static void cpuset_cancel_attach(struct cgroup_taskset *tset) |
|---|
| 2215 | 2283 | { |
|---|
| 2216 | 2284 | struct cgroup_subsys_state *css; |
|---|
| 2285 | + struct cpuset *cs; |
|---|
| 2217 | 2286 | |
|---|
| 2218 | 2287 | cgroup_taskset_first(tset, &css); |
|---|
| 2288 | + cs = css_cs(css); |
|---|
| 2219 | 2289 | |
|---|
| 2220 | 2290 | mutex_lock(&cpuset_mutex); |
|---|
| 2221 | | - css_cs(css)->attach_in_progress--; |
|---|
| 2291 | + cs->attach_in_progress--; |
|---|
| 2292 | + if (!cs->attach_in_progress) |
|---|
| 2293 | + wake_up(&cpuset_attach_wq); |
|---|
| 2294 | + |
|---|
| 2295 | + if (cs->nr_migrate_dl_tasks) { |
|---|
| 2296 | + int cpu = cpumask_any(cs->effective_cpus); |
|---|
| 2297 | + |
|---|
| 2298 | + dl_bw_free(cpu, cs->sum_migrate_dl_bw); |
|---|
| 2299 | + reset_migrate_dl_data(cs); |
|---|
| 2300 | + } |
|---|
| 2301 | + |
|---|
| 2222 | 2302 | mutex_unlock(&cpuset_mutex); |
|---|
| 2223 | 2303 | } |
|---|
| 2224 | 2304 | |
|---|
| .. | .. |
|---|
| 2290 | 2370 | } |
|---|
| 2291 | 2371 | |
|---|
| 2292 | 2372 | cs->old_mems_allowed = cpuset_attach_nodemask_to; |
|---|
| 2373 | + |
|---|
| 2374 | + if (cs->nr_migrate_dl_tasks) { |
|---|
| 2375 | + cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; |
|---|
| 2376 | + oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; |
|---|
| 2377 | + reset_migrate_dl_data(cs); |
|---|
| 2378 | + } |
|---|
| 2293 | 2379 | |
|---|
| 2294 | 2380 | cs->attach_in_progress--; |
|---|
| 2295 | 2381 | if (!cs->attach_in_progress) |
|---|
| .. | .. |
|---|
| 2476 | 2562 | cpuset_filetype_t type = seq_cft(sf)->private; |
|---|
| 2477 | 2563 | int ret = 0; |
|---|
| 2478 | 2564 | |
|---|
| 2479 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 2565 | + spin_lock_irq(&callback_lock); |
|---|
| 2480 | 2566 | |
|---|
| 2481 | 2567 | switch (type) { |
|---|
| 2482 | 2568 | case FILE_CPULIST: |
|---|
| .. | .. |
|---|
| 2498 | 2584 | ret = -EINVAL; |
|---|
| 2499 | 2585 | } |
|---|
| 2500 | 2586 | |
|---|
| 2501 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 2587 | + spin_unlock_irq(&callback_lock); |
|---|
| 2502 | 2588 | return ret; |
|---|
| 2503 | 2589 | } |
|---|
| 2504 | 2590 | |
|---|
| .. | .. |
|---|
| 2811 | 2897 | |
|---|
| 2812 | 2898 | cpuset_inc(); |
|---|
| 2813 | 2899 | |
|---|
| 2814 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 2900 | + spin_lock_irq(&callback_lock); |
|---|
| 2815 | 2901 | if (is_in_v2_mode()) { |
|---|
| 2816 | 2902 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
|---|
| 2817 | 2903 | cs->effective_mems = parent->effective_mems; |
|---|
| 2818 | 2904 | cs->use_parent_ecpus = true; |
|---|
| 2819 | 2905 | parent->child_ecpus_count++; |
|---|
| 2820 | 2906 | } |
|---|
| 2821 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 2907 | + spin_unlock_irq(&callback_lock); |
|---|
| 2822 | 2908 | |
|---|
| 2823 | 2909 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
|---|
| 2824 | 2910 | goto out_unlock; |
|---|
| .. | .. |
|---|
| 2845 | 2931 | } |
|---|
| 2846 | 2932 | rcu_read_unlock(); |
|---|
| 2847 | 2933 | |
|---|
| 2848 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 2934 | + spin_lock_irq(&callback_lock); |
|---|
| 2849 | 2935 | cs->mems_allowed = parent->mems_allowed; |
|---|
| 2850 | 2936 | cs->effective_mems = parent->mems_allowed; |
|---|
| 2851 | 2937 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
|---|
| 2852 | 2938 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
|---|
| 2853 | 2939 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
|---|
| 2854 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 2940 | + spin_unlock_irq(&callback_lock); |
|---|
| 2855 | 2941 | out_unlock: |
|---|
| 2856 | 2942 | mutex_unlock(&cpuset_mutex); |
|---|
| 2857 | 2943 | put_online_cpus(); |
|---|
| .. | .. |
|---|
| 2907 | 2993 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
|---|
| 2908 | 2994 | { |
|---|
| 2909 | 2995 | mutex_lock(&cpuset_mutex); |
|---|
| 2910 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 2996 | + spin_lock_irq(&callback_lock); |
|---|
| 2911 | 2997 | |
|---|
| 2912 | 2998 | if (is_in_v2_mode()) { |
|---|
| 2913 | 2999 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
|---|
| .. | .. |
|---|
| 2918 | 3004 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
|---|
| 2919 | 3005 | } |
|---|
| 2920 | 3006 | |
|---|
| 2921 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3007 | + spin_unlock_irq(&callback_lock); |
|---|
| 2922 | 3008 | mutex_unlock(&cpuset_mutex); |
|---|
| 2923 | 3009 | } |
|---|
| 2924 | 3010 | |
|---|
| .. | .. |
|---|
| 3018 | 3104 | { |
|---|
| 3019 | 3105 | bool is_empty; |
|---|
| 3020 | 3106 | |
|---|
| 3021 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 3107 | + spin_lock_irq(&callback_lock); |
|---|
| 3022 | 3108 | cpumask_copy(cs->cpus_allowed, new_cpus); |
|---|
| 3023 | 3109 | cpumask_copy(cs->effective_cpus, new_cpus); |
|---|
| 3024 | 3110 | cs->mems_allowed = *new_mems; |
|---|
| 3025 | 3111 | cs->effective_mems = *new_mems; |
|---|
| 3026 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3112 | + spin_unlock_irq(&callback_lock); |
|---|
| 3027 | 3113 | |
|---|
| 3028 | 3114 | /* |
|---|
| 3029 | 3115 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
|---|
| .. | .. |
|---|
| 3060 | 3146 | if (nodes_empty(*new_mems)) |
|---|
| 3061 | 3147 | *new_mems = parent_cs(cs)->effective_mems; |
|---|
| 3062 | 3148 | |
|---|
| 3063 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 3149 | + spin_lock_irq(&callback_lock); |
|---|
| 3064 | 3150 | cpumask_copy(cs->effective_cpus, new_cpus); |
|---|
| 3065 | 3151 | cs->effective_mems = *new_mems; |
|---|
| 3066 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3152 | + spin_unlock_irq(&callback_lock); |
|---|
| 3067 | 3153 | |
|---|
| 3068 | 3154 | if (cpus_updated) |
|---|
| 3069 | 3155 | update_tasks_cpumask(cs); |
|---|
| .. | .. |
|---|
| 3130 | 3216 | if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || |
|---|
| 3131 | 3217 | (parent->partition_root_state == PRS_ERROR))) { |
|---|
| 3132 | 3218 | if (cs->nr_subparts_cpus) { |
|---|
| 3133 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 3219 | + spin_lock_irq(&callback_lock); |
|---|
| 3134 | 3220 | cs->nr_subparts_cpus = 0; |
|---|
| 3135 | 3221 | cpumask_clear(cs->subparts_cpus); |
|---|
| 3136 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3222 | + spin_unlock_irq(&callback_lock); |
|---|
| 3137 | 3223 | compute_effective_cpumask(&new_cpus, cs, parent); |
|---|
| 3138 | 3224 | } |
|---|
| 3139 | 3225 | |
|---|
| .. | .. |
|---|
| 3147 | 3233 | cpumask_empty(&new_cpus)) { |
|---|
| 3148 | 3234 | update_parent_subparts_cpumask(cs, partcmd_disable, |
|---|
| 3149 | 3235 | NULL, tmp); |
|---|
| 3150 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 3236 | + spin_lock_irq(&callback_lock); |
|---|
| 3151 | 3237 | cs->partition_root_state = PRS_ERROR; |
|---|
| 3152 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3238 | + spin_unlock_irq(&callback_lock); |
|---|
| 3153 | 3239 | } |
|---|
| 3154 | 3240 | cpuset_force_rebuild(); |
|---|
| 3155 | 3241 | } |
|---|
| .. | .. |
|---|
| 3229 | 3315 | |
|---|
| 3230 | 3316 | /* synchronize cpus_allowed to cpu_active_mask */ |
|---|
| 3231 | 3317 | if (cpus_updated) { |
|---|
| 3232 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 3318 | + spin_lock_irq(&callback_lock); |
|---|
| 3233 | 3319 | if (!on_dfl) |
|---|
| 3234 | 3320 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
|---|
| 3235 | 3321 | /* |
|---|
| .. | .. |
|---|
| 3249 | 3335 | } |
|---|
| 3250 | 3336 | } |
|---|
| 3251 | 3337 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
|---|
| 3252 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3338 | + spin_unlock_irq(&callback_lock); |
|---|
| 3253 | 3339 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
|---|
| 3254 | 3340 | } |
|---|
| 3255 | 3341 | |
|---|
| 3256 | 3342 | /* synchronize mems_allowed to N_MEMORY */ |
|---|
| 3257 | 3343 | if (mems_updated) { |
|---|
| 3258 | | - raw_spin_lock_irq(&callback_lock); |
|---|
| 3344 | + spin_lock_irq(&callback_lock); |
|---|
| 3259 | 3345 | if (!on_dfl) |
|---|
| 3260 | 3346 | top_cpuset.mems_allowed = new_mems; |
|---|
| 3261 | 3347 | top_cpuset.effective_mems = new_mems; |
|---|
| 3262 | | - raw_spin_unlock_irq(&callback_lock); |
|---|
| 3348 | + spin_unlock_irq(&callback_lock); |
|---|
| 3263 | 3349 | update_tasks_nodemask(&top_cpuset); |
|---|
| 3264 | 3350 | } |
|---|
| 3265 | 3351 | |
|---|
| .. | .. |
|---|
| 3368 | 3454 | { |
|---|
| 3369 | 3455 | unsigned long flags; |
|---|
| 3370 | 3456 | |
|---|
| 3371 | | - raw_spin_lock_irqsave(&callback_lock, flags); |
|---|
| 3457 | + spin_lock_irqsave(&callback_lock, flags); |
|---|
| 3372 | 3458 | rcu_read_lock(); |
|---|
| 3373 | 3459 | guarantee_online_cpus(tsk, pmask); |
|---|
| 3374 | 3460 | rcu_read_unlock(); |
|---|
| 3375 | | - raw_spin_unlock_irqrestore(&callback_lock, flags); |
|---|
| 3461 | + spin_unlock_irqrestore(&callback_lock, flags); |
|---|
| 3376 | 3462 | } |
|---|
| 3377 | 3463 | EXPORT_SYMBOL_GPL(cpuset_cpus_allowed); |
|---|
| 3378 | 3464 | /** |
|---|
| .. | .. |
|---|
| 3441 | 3527 | nodemask_t mask; |
|---|
| 3442 | 3528 | unsigned long flags; |
|---|
| 3443 | 3529 | |
|---|
| 3444 | | - raw_spin_lock_irqsave(&callback_lock, flags); |
|---|
| 3530 | + spin_lock_irqsave(&callback_lock, flags); |
|---|
| 3445 | 3531 | rcu_read_lock(); |
|---|
| 3446 | 3532 | guarantee_online_mems(task_cs(tsk), &mask); |
|---|
| 3447 | 3533 | rcu_read_unlock(); |
|---|
| 3448 | | - raw_spin_unlock_irqrestore(&callback_lock, flags); |
|---|
| 3534 | + spin_unlock_irqrestore(&callback_lock, flags); |
|---|
| 3449 | 3535 | |
|---|
| 3450 | 3536 | return mask; |
|---|
| 3451 | 3537 | } |
|---|
| .. | .. |
|---|
| 3537 | 3623 | return true; |
|---|
| 3538 | 3624 | |
|---|
| 3539 | 3625 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
|---|
| 3540 | | - raw_spin_lock_irqsave(&callback_lock, flags); |
|---|
| 3626 | + spin_lock_irqsave(&callback_lock, flags); |
|---|
| 3541 | 3627 | |
|---|
| 3542 | 3628 | rcu_read_lock(); |
|---|
| 3543 | 3629 | cs = nearest_hardwall_ancestor(task_cs(current)); |
|---|
| 3544 | 3630 | allowed = node_isset(node, cs->mems_allowed); |
|---|
| 3545 | 3631 | rcu_read_unlock(); |
|---|
| 3546 | 3632 | |
|---|
| 3547 | | - raw_spin_unlock_irqrestore(&callback_lock, flags); |
|---|
| 3633 | + spin_unlock_irqrestore(&callback_lock, flags); |
|---|
| 3548 | 3634 | return allowed; |
|---|
| 3549 | 3635 | } |
|---|
| 3550 | 3636 | |
|---|