.. | .. |
---|
339 | 339 | */ |
---|
340 | 340 | |
---|
341 | 341 | static DEFINE_MUTEX(cpuset_mutex); |
---|
342 | | -static DEFINE_RAW_SPINLOCK(callback_lock); |
---|
| 342 | +static DEFINE_SPINLOCK(callback_lock); |
---|
343 | 343 | |
---|
344 | 344 | static struct workqueue_struct *cpuset_migrate_mm_wq; |
---|
345 | 345 | |
---|
.. | .. |
---|
1315 | 1315 | * Newly added CPUs will be removed from effective_cpus and |
---|
1316 | 1316 | * newly deleted ones will be added back to effective_cpus. |
---|
1317 | 1317 | */ |
---|
1318 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 1318 | + spin_lock_irq(&callback_lock); |
---|
1319 | 1319 | if (adding) { |
---|
1320 | 1320 | cpumask_or(parent->subparts_cpus, |
---|
1321 | 1321 | parent->subparts_cpus, tmp->addmask); |
---|
.. | .. |
---|
1337 | 1337 | |
---|
1338 | 1338 | if (cpuset->partition_root_state != new_prs) |
---|
1339 | 1339 | cpuset->partition_root_state = new_prs; |
---|
1340 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 1340 | + spin_unlock_irq(&callback_lock); |
---|
1341 | 1341 | |
---|
1342 | 1342 | return cmd == partcmd_update; |
---|
1343 | 1343 | } |
---|
.. | .. |
---|
1440 | 1440 | continue; |
---|
1441 | 1441 | rcu_read_unlock(); |
---|
1442 | 1442 | |
---|
1443 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 1443 | + spin_lock_irq(&callback_lock); |
---|
1444 | 1444 | |
---|
1445 | 1445 | cpumask_copy(cp->effective_cpus, tmp->new_cpus); |
---|
1446 | 1446 | if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) { |
---|
.. | .. |
---|
1474 | 1474 | if (new_prs != cp->partition_root_state) |
---|
1475 | 1475 | cp->partition_root_state = new_prs; |
---|
1476 | 1476 | |
---|
1477 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 1477 | + spin_unlock_irq(&callback_lock); |
---|
1478 | 1478 | |
---|
1479 | 1479 | WARN_ON(!is_in_v2_mode() && |
---|
1480 | 1480 | !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); |
---|
.. | .. |
---|
1603 | 1603 | return -EINVAL; |
---|
1604 | 1604 | } |
---|
1605 | 1605 | |
---|
1606 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 1606 | + spin_lock_irq(&callback_lock); |
---|
1607 | 1607 | cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); |
---|
1608 | 1608 | cpumask_copy(cs->cpus_requested, trialcs->cpus_requested); |
---|
1609 | 1609 | |
---|
.. | .. |
---|
1614 | 1614 | cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); |
---|
1615 | 1615 | cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); |
---|
1616 | 1616 | } |
---|
1617 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 1617 | + spin_unlock_irq(&callback_lock); |
---|
1618 | 1618 | |
---|
1619 | 1619 | update_cpumasks_hier(cs, &tmp); |
---|
1620 | 1620 | |
---|
.. | .. |
---|
1808 | 1808 | continue; |
---|
1809 | 1809 | rcu_read_unlock(); |
---|
1810 | 1810 | |
---|
1811 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 1811 | + spin_lock_irq(&callback_lock); |
---|
1812 | 1812 | cp->effective_mems = *new_mems; |
---|
1813 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 1813 | + spin_unlock_irq(&callback_lock); |
---|
1814 | 1814 | |
---|
1815 | 1815 | WARN_ON(!is_in_v2_mode() && |
---|
1816 | 1816 | !nodes_equal(cp->mems_allowed, cp->effective_mems)); |
---|
.. | .. |
---|
1878 | 1878 | if (retval < 0) |
---|
1879 | 1879 | goto done; |
---|
1880 | 1880 | |
---|
1881 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 1881 | + spin_lock_irq(&callback_lock); |
---|
1882 | 1882 | cs->mems_allowed = trialcs->mems_allowed; |
---|
1883 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 1883 | + spin_unlock_irq(&callback_lock); |
---|
1884 | 1884 | |
---|
1885 | 1885 | /* use trialcs->mems_allowed as a temp variable */ |
---|
1886 | 1886 | update_nodemasks_hier(cs, &trialcs->mems_allowed); |
---|
.. | .. |
---|
1971 | 1971 | spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) |
---|
1972 | 1972 | || (is_spread_page(cs) != is_spread_page(trialcs))); |
---|
1973 | 1973 | |
---|
1974 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 1974 | + spin_lock_irq(&callback_lock); |
---|
1975 | 1975 | cs->flags = trialcs->flags; |
---|
1976 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 1976 | + spin_unlock_irq(&callback_lock); |
---|
1977 | 1977 | |
---|
1978 | 1978 | if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) |
---|
1979 | 1979 | rebuild_sched_domains_locked(); |
---|
.. | .. |
---|
2059 | 2059 | rebuild_sched_domains_locked(); |
---|
2060 | 2060 | out: |
---|
2061 | 2061 | if (!err) { |
---|
2062 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 2062 | + spin_lock_irq(&callback_lock); |
---|
2063 | 2063 | cs->partition_root_state = new_prs; |
---|
2064 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 2064 | + spin_unlock_irq(&callback_lock); |
---|
2065 | 2065 | } |
---|
2066 | 2066 | |
---|
2067 | 2067 | free_cpumasks(NULL, &tmpmask); |
---|
.. | .. |
---|
2476 | 2476 | cpuset_filetype_t type = seq_cft(sf)->private; |
---|
2477 | 2477 | int ret = 0; |
---|
2478 | 2478 | |
---|
2479 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 2479 | + spin_lock_irq(&callback_lock); |
---|
2480 | 2480 | |
---|
2481 | 2481 | switch (type) { |
---|
2482 | 2482 | case FILE_CPULIST: |
---|
.. | .. |
---|
2498 | 2498 | ret = -EINVAL; |
---|
2499 | 2499 | } |
---|
2500 | 2500 | |
---|
2501 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 2501 | + spin_unlock_irq(&callback_lock); |
---|
2502 | 2502 | return ret; |
---|
2503 | 2503 | } |
---|
2504 | 2504 | |
---|
.. | .. |
---|
2811 | 2811 | |
---|
2812 | 2812 | cpuset_inc(); |
---|
2813 | 2813 | |
---|
2814 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 2814 | + spin_lock_irq(&callback_lock); |
---|
2815 | 2815 | if (is_in_v2_mode()) { |
---|
2816 | 2816 | cpumask_copy(cs->effective_cpus, parent->effective_cpus); |
---|
2817 | 2817 | cs->effective_mems = parent->effective_mems; |
---|
2818 | 2818 | cs->use_parent_ecpus = true; |
---|
2819 | 2819 | parent->child_ecpus_count++; |
---|
2820 | 2820 | } |
---|
2821 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 2821 | + spin_unlock_irq(&callback_lock); |
---|
2822 | 2822 | |
---|
2823 | 2823 | if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) |
---|
2824 | 2824 | goto out_unlock; |
---|
.. | .. |
---|
2845 | 2845 | } |
---|
2846 | 2846 | rcu_read_unlock(); |
---|
2847 | 2847 | |
---|
2848 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 2848 | + spin_lock_irq(&callback_lock); |
---|
2849 | 2849 | cs->mems_allowed = parent->mems_allowed; |
---|
2850 | 2850 | cs->effective_mems = parent->mems_allowed; |
---|
2851 | 2851 | cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); |
---|
2852 | 2852 | cpumask_copy(cs->cpus_requested, parent->cpus_requested); |
---|
2853 | 2853 | cpumask_copy(cs->effective_cpus, parent->cpus_allowed); |
---|
2854 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 2854 | + spin_unlock_irq(&callback_lock); |
---|
2855 | 2855 | out_unlock: |
---|
2856 | 2856 | mutex_unlock(&cpuset_mutex); |
---|
2857 | 2857 | put_online_cpus(); |
---|
.. | .. |
---|
2907 | 2907 | static void cpuset_bind(struct cgroup_subsys_state *root_css) |
---|
2908 | 2908 | { |
---|
2909 | 2909 | mutex_lock(&cpuset_mutex); |
---|
2910 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 2910 | + spin_lock_irq(&callback_lock); |
---|
2911 | 2911 | |
---|
2912 | 2912 | if (is_in_v2_mode()) { |
---|
2913 | 2913 | cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); |
---|
.. | .. |
---|
2918 | 2918 | top_cpuset.mems_allowed = top_cpuset.effective_mems; |
---|
2919 | 2919 | } |
---|
2920 | 2920 | |
---|
2921 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 2921 | + spin_unlock_irq(&callback_lock); |
---|
2922 | 2922 | mutex_unlock(&cpuset_mutex); |
---|
2923 | 2923 | } |
---|
2924 | 2924 | |
---|
.. | .. |
---|
3018 | 3018 | { |
---|
3019 | 3019 | bool is_empty; |
---|
3020 | 3020 | |
---|
3021 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 3021 | + spin_lock_irq(&callback_lock); |
---|
3022 | 3022 | cpumask_copy(cs->cpus_allowed, new_cpus); |
---|
3023 | 3023 | cpumask_copy(cs->effective_cpus, new_cpus); |
---|
3024 | 3024 | cs->mems_allowed = *new_mems; |
---|
3025 | 3025 | cs->effective_mems = *new_mems; |
---|
3026 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 3026 | + spin_unlock_irq(&callback_lock); |
---|
3027 | 3027 | |
---|
3028 | 3028 | /* |
---|
3029 | 3029 | * Don't call update_tasks_cpumask() if the cpuset becomes empty, |
---|
.. | .. |
---|
3060 | 3060 | if (nodes_empty(*new_mems)) |
---|
3061 | 3061 | *new_mems = parent_cs(cs)->effective_mems; |
---|
3062 | 3062 | |
---|
3063 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 3063 | + spin_lock_irq(&callback_lock); |
---|
3064 | 3064 | cpumask_copy(cs->effective_cpus, new_cpus); |
---|
3065 | 3065 | cs->effective_mems = *new_mems; |
---|
3066 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 3066 | + spin_unlock_irq(&callback_lock); |
---|
3067 | 3067 | |
---|
3068 | 3068 | if (cpus_updated) |
---|
3069 | 3069 | update_tasks_cpumask(cs); |
---|
.. | .. |
---|
3130 | 3130 | if (is_partition_root(cs) && (cpumask_empty(&new_cpus) || |
---|
3131 | 3131 | (parent->partition_root_state == PRS_ERROR))) { |
---|
3132 | 3132 | if (cs->nr_subparts_cpus) { |
---|
3133 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 3133 | + spin_lock_irq(&callback_lock); |
---|
3134 | 3134 | cs->nr_subparts_cpus = 0; |
---|
3135 | 3135 | cpumask_clear(cs->subparts_cpus); |
---|
3136 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 3136 | + spin_unlock_irq(&callback_lock); |
---|
3137 | 3137 | compute_effective_cpumask(&new_cpus, cs, parent); |
---|
3138 | 3138 | } |
---|
3139 | 3139 | |
---|
.. | .. |
---|
3147 | 3147 | cpumask_empty(&new_cpus)) { |
---|
3148 | 3148 | update_parent_subparts_cpumask(cs, partcmd_disable, |
---|
3149 | 3149 | NULL, tmp); |
---|
3150 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 3150 | + spin_lock_irq(&callback_lock); |
---|
3151 | 3151 | cs->partition_root_state = PRS_ERROR; |
---|
3152 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 3152 | + spin_unlock_irq(&callback_lock); |
---|
3153 | 3153 | } |
---|
3154 | 3154 | cpuset_force_rebuild(); |
---|
3155 | 3155 | } |
---|
.. | .. |
---|
3229 | 3229 | |
---|
3230 | 3230 | /* synchronize cpus_allowed to cpu_active_mask */ |
---|
3231 | 3231 | if (cpus_updated) { |
---|
3232 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 3232 | + spin_lock_irq(&callback_lock); |
---|
3233 | 3233 | if (!on_dfl) |
---|
3234 | 3234 | cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); |
---|
3235 | 3235 | /* |
---|
.. | .. |
---|
3249 | 3249 | } |
---|
3250 | 3250 | } |
---|
3251 | 3251 | cpumask_copy(top_cpuset.effective_cpus, &new_cpus); |
---|
3252 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 3252 | + spin_unlock_irq(&callback_lock); |
---|
3253 | 3253 | /* we don't mess with cpumasks of tasks in top_cpuset */ |
---|
3254 | 3254 | } |
---|
3255 | 3255 | |
---|
3256 | 3256 | /* synchronize mems_allowed to N_MEMORY */ |
---|
3257 | 3257 | if (mems_updated) { |
---|
3258 | | - raw_spin_lock_irq(&callback_lock); |
---|
| 3258 | + spin_lock_irq(&callback_lock); |
---|
3259 | 3259 | if (!on_dfl) |
---|
3260 | 3260 | top_cpuset.mems_allowed = new_mems; |
---|
3261 | 3261 | top_cpuset.effective_mems = new_mems; |
---|
3262 | | - raw_spin_unlock_irq(&callback_lock); |
---|
| 3262 | + spin_unlock_irq(&callback_lock); |
---|
3263 | 3263 | update_tasks_nodemask(&top_cpuset); |
---|
3264 | 3264 | } |
---|
3265 | 3265 | |
---|
.. | .. |
---|
3368 | 3368 | { |
---|
3369 | 3369 | unsigned long flags; |
---|
3370 | 3370 | |
---|
3371 | | - raw_spin_lock_irqsave(&callback_lock, flags); |
---|
| 3371 | + spin_lock_irqsave(&callback_lock, flags); |
---|
3372 | 3372 | rcu_read_lock(); |
---|
3373 | 3373 | guarantee_online_cpus(tsk, pmask); |
---|
3374 | 3374 | rcu_read_unlock(); |
---|
3375 | | - raw_spin_unlock_irqrestore(&callback_lock, flags); |
---|
| 3375 | + spin_unlock_irqrestore(&callback_lock, flags); |
---|
3376 | 3376 | } |
---|
3377 | 3377 | EXPORT_SYMBOL_GPL(cpuset_cpus_allowed); |
---|
3378 | 3378 | /** |
---|
.. | .. |
---|
3441 | 3441 | nodemask_t mask; |
---|
3442 | 3442 | unsigned long flags; |
---|
3443 | 3443 | |
---|
3444 | | - raw_spin_lock_irqsave(&callback_lock, flags); |
---|
| 3444 | + spin_lock_irqsave(&callback_lock, flags); |
---|
3445 | 3445 | rcu_read_lock(); |
---|
3446 | 3446 | guarantee_online_mems(task_cs(tsk), &mask); |
---|
3447 | 3447 | rcu_read_unlock(); |
---|
3448 | | - raw_spin_unlock_irqrestore(&callback_lock, flags); |
---|
| 3448 | + spin_unlock_irqrestore(&callback_lock, flags); |
---|
3449 | 3449 | |
---|
3450 | 3450 | return mask; |
---|
3451 | 3451 | } |
---|
.. | .. |
---|
3537 | 3537 | return true; |
---|
3538 | 3538 | |
---|
3539 | 3539 | /* Not hardwall and node outside mems_allowed: scan up cpusets */ |
---|
3540 | | - raw_spin_lock_irqsave(&callback_lock, flags); |
---|
| 3540 | + spin_lock_irqsave(&callback_lock, flags); |
---|
3541 | 3541 | |
---|
3542 | 3542 | rcu_read_lock(); |
---|
3543 | 3543 | cs = nearest_hardwall_ancestor(task_cs(current)); |
---|
3544 | 3544 | allowed = node_isset(node, cs->mems_allowed); |
---|
3545 | 3545 | rcu_read_unlock(); |
---|
3546 | 3546 | |
---|
3547 | | - raw_spin_unlock_irqrestore(&callback_lock, flags); |
---|
| 3547 | + spin_unlock_irqrestore(&callback_lock, flags); |
---|
3548 | 3548 | return allowed; |
---|
3549 | 3549 | } |
---|
3550 | 3550 | |
---|