.. | .. |
---|
289 | 289 | */ |
---|
290 | 290 | |
---|
291 | 291 | static DEFINE_MUTEX(cpuset_mutex); |
---|
292 | | -static DEFINE_SPINLOCK(callback_lock); |
---|
| 292 | +static DEFINE_RAW_SPINLOCK(callback_lock); |
---|
293 | 293 | |
---|
294 | 294 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
---|
295 | 295 | |
---|
.. | .. |
---|
929 | 929 | continue; |
---|
930 | 930 | rcu_read_unlock(); |
---|
931 | 931 | |
---|
932 | | - spin_lock_irq(&callback_lock); |
---|
| 932 | + raw_spin_lock_irq(&callback_lock); |
---|
933 | 933 | cpumask_copy(cp->effective_cpus, new_cpus); |
---|
934 | | - spin_unlock_irq(&callback_lock); |
---|
| 934 | + raw_spin_unlock_irq(&callback_lock); |
---|
935 | 935 | |
---|
936 | 936 | WARN_ON(!is_in_v2_mode() && |
---|
937 | 937 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
---|
.. | .. |
---|
997 | 997 | if (retval < 0) |
---|
998 | 998 | return retval; |
---|
999 | 999 | |
---|
1000 | | - spin_lock_irq(&callback_lock); |
---|
| 1000 | + raw_spin_lock_irq(&callback_lock); |
---|
1001 | 1001 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
---|
1002 | 1002 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
---|
1003 | | - spin_unlock_irq(&callback_lock); |
---|
| 1003 | + raw_spin_unlock_irq(&callback_lock); |
---|
1004 | 1004 | |
---|
1005 | 1005 | /* use trialcs->cpus_allowed as a temp variable */ |
---|
1006 | 1006 | update_cpumasks_hier(cs, trialcs->cpus_allowed); |
---|
.. | .. |
---|
1184 | 1184 | continue; |
---|
1185 | 1185 | rcu_read_unlock(); |
---|
1186 | 1186 | |
---|
1187 | | - spin_lock_irq(&callback_lock); |
---|
| 1187 | + raw_spin_lock_irq(&callback_lock); |
---|
1188 | 1188 | cp->effective_mems = *new_mems; |
---|
1189 | | - spin_unlock_irq(&callback_lock); |
---|
| 1189 | + raw_spin_unlock_irq(&callback_lock); |
---|
1190 | 1190 | |
---|
1191 | 1191 | WARN_ON(!is_in_v2_mode() && |
---|
1192 | 1192 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
---|
.. | .. |
---|
1254 | 1254 | if (retval < 0) |
---|
1255 | 1255 | goto done; |
---|
1256 | 1256 | |
---|
1257 | | - spin_lock_irq(&callback_lock); |
---|
| 1257 | + raw_spin_lock_irq(&callback_lock); |
---|
1258 | 1258 | cs->mems_allowed = trialcs->mems_allowed; |
---|
1259 | | - spin_unlock_irq(&callback_lock); |
---|
| 1259 | + raw_spin_unlock_irq(&callback_lock); |
---|
1260 | 1260 | |
---|
1261 | 1261 | /* use trialcs->mems_allowed as a temp variable */ |
---|
1262 | 1262 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
---|
.. | .. |
---|
1347 | 1347 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
---|
1348 | 1348 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
---|
1349 | 1349 | |
---|
1350 | | - spin_lock_irq(&callback_lock); |
---|
| 1350 | + raw_spin_lock_irq(&callback_lock); |
---|
1351 | 1351 | cs->flags = trialcs->flags; |
---|
1352 | | - spin_unlock_irq(&callback_lock); |
---|
| 1352 | + raw_spin_unlock_irq(&callback_lock); |
---|
1353 | 1353 | |
---|
1354 | 1354 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
---|
1355 | 1355 | rebuild_sched_domains_locked(); |
---|
.. | .. |
---|
1766 | 1766 | cpuset_filetype_t type = seq_cft(sf)->private; |
---|
1767 | 1767 | int ret = 0; |
---|
1768 | 1768 | |
---|
1769 | | - spin_lock_irq(&callback_lock); |
---|
| 1769 | + raw_spin_lock_irq(&callback_lock); |
---|
1770 | 1770 | |
---|
1771 | 1771 | switch (type) { |
---|
1772 | 1772 | case FILE_CPULIST: |
---|
.. | .. |
---|
1785 | 1785 | ret = -EINVAL; |
---|
1786 | 1786 | } |
---|
1787 | 1787 | |
---|
1788 | | - spin_unlock_irq(&callback_lock); |
---|
| 1788 | + raw_spin_unlock_irq(&callback_lock); |
---|
1789 | 1789 | return ret; |
---|
1790 | 1790 | } |
---|
1791 | 1791 | |
---|
.. | .. |
---|
2005 | 2005 | |
---|
2006 | 2006 | cpuset_inc(); |
---|
2007 | 2007 | |
---|
2008 | | - spin_lock_irq(&callback_lock); |
---|
| 2008 | + raw_spin_lock_irq(&callback_lock); |
---|
2009 | 2009 | if (is_in_v2_mode()) { |
---|
2010 | 2010 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
---|
2011 | 2011 | cs->effective_mems = parent->effective_mems; |
---|
2012 | 2012 | } |
---|
2013 | | - spin_unlock_irq(&callback_lock); |
---|
| 2013 | + raw_spin_unlock_irq(&callback_lock); |
---|
2014 | 2014 | |
---|
2015 | 2015 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
---|
2016 | 2016 | goto out_unlock; |
---|
.. | .. |
---|
2037 | 2037 | } |
---|
2038 | 2038 | rcu_read_unlock(); |
---|
2039 | 2039 | |
---|
2040 | | - spin_lock_irq(&callback_lock); |
---|
| 2040 | + raw_spin_lock_irq(&callback_lock); |
---|
2041 | 2041 | cs->mems_allowed = parent->mems_allowed; |
---|
2042 | 2042 | cs->effective_mems = parent->mems_allowed; |
---|
2043 | 2043 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
---|
2044 | 2044 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
---|
2045 | 2045 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
---|
2046 | | - spin_unlock_irq(&callback_lock); |
---|
| 2046 | + raw_spin_unlock_irq(&callback_lock); |
---|
2047 | 2047 | out_unlock: |
---|
2048 | 2048 | mutex_unlock(&cpuset_mutex); |
---|
2049 | 2049 | return 0; |
---|
.. | .. |
---|
2083 | 2083 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
---|
2084 | 2084 | { |
---|
2085 | 2085 | mutex_lock(&cpuset_mutex); |
---|
2086 | | - spin_lock_irq(&callback_lock); |
---|
| 2086 | + raw_spin_lock_irq(&callback_lock); |
---|
2087 | 2087 | |
---|
2088 | 2088 | if (is_in_v2_mode()) { |
---|
2089 | 2089 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
---|
.. | .. |
---|
2094 | 2094 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
---|
2095 | 2095 | } |
---|
2096 | 2096 | |
---|
2097 | | - spin_unlock_irq(&callback_lock); |
---|
| 2097 | + raw_spin_unlock_irq(&callback_lock); |
---|
2098 | 2098 | mutex_unlock(&cpuset_mutex); |
---|
2099 | 2099 | } |
---|
2100 | 2100 | |
---|
.. | .. |
---|
2108 | 2108 | if (task_css_is_root(task, cpuset_cgrp_id)) |
---|
2109 | 2109 | return; |
---|
2110 | 2110 | |
---|
2111 | | - set_cpus_allowed_ptr(task, ¤t->cpus_allowed); |
---|
| 2111 | + set_cpus_allowed_ptr(task, current->cpus_ptr); |
---|
2112 | 2112 | task->mems_allowed = current->mems_allowed; |
---|
2113 | 2113 | } |
---|
2114 | 2114 | |
---|
.. | .. |
---|
2194 | 2194 | { |
---|
2195 | 2195 | bool is_empty; |
---|
2196 | 2196 | |
---|
2197 | | - spin_lock_irq(&callback_lock); |
---|
| 2197 | + raw_spin_lock_irq(&callback_lock); |
---|
2198 | 2198 | cpumask_copy(cs->cpus_allowed, new_cpus); |
---|
2199 | 2199 | cpumask_copy(cs->effective_cpus, new_cpus); |
---|
2200 | 2200 | cs->mems_allowed = *new_mems; |
---|
2201 | 2201 | cs->effective_mems = *new_mems; |
---|
2202 | | - spin_unlock_irq(&callback_lock); |
---|
| 2202 | + raw_spin_unlock_irq(&callback_lock); |
---|
2203 | 2203 | |
---|
2204 | 2204 | /* |
---|
2205 | 2205 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
---|
.. | .. |
---|
2236 | 2236 | if (nodes_empty(*new_mems)) |
---|
2237 | 2237 | *new_mems = parent_cs(cs)->effective_mems; |
---|
2238 | 2238 | |
---|
2239 | | - spin_lock_irq(&callback_lock); |
---|
| 2239 | + raw_spin_lock_irq(&callback_lock); |
---|
2240 | 2240 | cpumask_copy(cs->effective_cpus, new_cpus); |
---|
2241 | 2241 | cs->effective_mems = *new_mems; |
---|
2242 | | - spin_unlock_irq(&callback_lock); |
---|
| 2242 | + raw_spin_unlock_irq(&callback_lock); |
---|
2243 | 2243 | |
---|
2244 | 2244 | if (cpus_updated) |
---|
2245 | 2245 | update_tasks_cpumask(cs); |
---|
.. | .. |
---|
2332 | 2332 | |
---|
2333 | 2333 | /* synchronize cpus_allowed to cpu_active_mask */ |
---|
2334 | 2334 | if (cpus_updated) { |
---|
2335 | | - spin_lock_irq(&callback_lock); |
---|
| 2335 | + raw_spin_lock_irq(&callback_lock); |
---|
2336 | 2336 | if (!on_dfl) |
---|
2337 | 2337 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
---|
2338 | 2338 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
---|
2339 | | - spin_unlock_irq(&callback_lock); |
---|
| 2339 | + raw_spin_unlock_irq(&callback_lock); |
---|
2340 | 2340 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
---|
2341 | 2341 | } |
---|
2342 | 2342 | |
---|
2343 | 2343 | /* synchronize mems_allowed to N_MEMORY */ |
---|
2344 | 2344 | if (mems_updated) { |
---|
2345 | | - spin_lock_irq(&callback_lock); |
---|
| 2345 | + raw_spin_lock_irq(&callback_lock); |
---|
2346 | 2346 | if (!on_dfl) |
---|
2347 | 2347 | top_cpuset.mems_allowed = new_mems; |
---|
2348 | 2348 | top_cpuset.effective_mems = new_mems; |
---|
2349 | | - spin_unlock_irq(&callback_lock); |
---|
| 2349 | + raw_spin_unlock_irq(&callback_lock); |
---|
2350 | 2350 | update_tasks_nodemask(&top_cpuset); |
---|
2351 | 2351 | } |
---|
2352 | 2352 | |
---|
.. | .. |
---|
2445 | 2445 | { |
---|
2446 | 2446 | unsigned long flags; |
---|
2447 | 2447 | |
---|
2448 | | - spin_lock_irqsave(&callback_lock, flags); |
---|
| 2448 | + raw_spin_lock_irqsave(&callback_lock, flags); |
---|
2449 | 2449 | rcu_read_lock(); |
---|
2450 | 2450 | guarantee_online_cpus(task_cs(tsk), pmask); |
---|
2451 | 2451 | rcu_read_unlock(); |
---|
2452 | | - spin_unlock_irqrestore(&callback_lock, flags); |
---|
| 2452 | + raw_spin_unlock_irqrestore(&callback_lock, flags); |
---|
2453 | 2453 | } |
---|
2454 | 2454 | |
---|
2455 | 2455 | /** |
---|
.. | .. |
---|
2510 | 2510 | nodemask_t mask; |
---|
2511 | 2511 | unsigned long flags; |
---|
2512 | 2512 | |
---|
2513 | | - spin_lock_irqsave(&callback_lock, flags); |
---|
| 2513 | + raw_spin_lock_irqsave(&callback_lock, flags); |
---|
2514 | 2514 | rcu_read_lock(); |
---|
2515 | 2515 | guarantee_online_mems(task_cs(tsk), &mask); |
---|
2516 | 2516 | rcu_read_unlock(); |
---|
2517 | | - spin_unlock_irqrestore(&callback_lock, flags); |
---|
| 2517 | + raw_spin_unlock_irqrestore(&callback_lock, flags); |
---|
2518 | 2518 | |
---|
2519 | 2519 | return mask; |
---|
2520 | 2520 | } |
---|
.. | .. |
---|
2606 | 2606 | return true; |
---|
2607 | 2607 | |
---|
2608 | 2608 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
---|
2609 | | - spin_lock_irqsave(&callback_lock, flags); |
---|
| 2609 | + raw_spin_lock_irqsave(&callback_lock, flags); |
---|
2610 | 2610 | |
---|
2611 | 2611 | rcu_read_lock(); |
---|
2612 | 2612 | cs = nearest_hardwall_ancestor(task_cs(current)); |
---|
2613 | 2613 | allowed = node_isset(node, cs->mems_allowed); |
---|
2614 | 2614 | rcu_read_unlock(); |
---|
2615 | 2615 | |
---|
2616 | | - spin_unlock_irqrestore(&callback_lock, flags); |
---|
| 2616 | + raw_spin_unlock_irqrestore(&callback_lock, flags); |
---|
2617 | 2617 | return allowed; |
---|
2618 | 2618 | } |
---|
2619 | 2619 | |
---|