hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/kernel/cgroup/cpuset.c
....@@ -289,7 +289,7 @@
289289 */
290290
291291 static DEFINE_MUTEX(cpuset_mutex);
292
-static DEFINE_SPINLOCK(callback_lock);
292
+static DEFINE_RAW_SPINLOCK(callback_lock);
293293
294294 static struct workqueue_struct *cpuset_migrate_mm_wq;
295295
....@@ -929,9 +929,9 @@
929929 continue;
930930 rcu_read_unlock();
931931
932
- spin_lock_irq(&callback_lock);
932
+ raw_spin_lock_irq(&callback_lock);
933933 cpumask_copy(cp->effective_cpus, new_cpus);
934
- spin_unlock_irq(&callback_lock);
934
+ raw_spin_unlock_irq(&callback_lock);
935935
936936 WARN_ON(!is_in_v2_mode() &&
937937 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
....@@ -997,10 +997,10 @@
997997 if (retval < 0)
998998 return retval;
999999
1000
- spin_lock_irq(&callback_lock);
1000
+ raw_spin_lock_irq(&callback_lock);
10011001 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
10021002 cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
1003
- spin_unlock_irq(&callback_lock);
1003
+ raw_spin_unlock_irq(&callback_lock);
10041004
10051005 /* use trialcs->cpus_allowed as a temp variable */
10061006 update_cpumasks_hier(cs, trialcs->cpus_allowed);
....@@ -1184,9 +1184,9 @@
11841184 continue;
11851185 rcu_read_unlock();
11861186
1187
- spin_lock_irq(&callback_lock);
1187
+ raw_spin_lock_irq(&callback_lock);
11881188 cp->effective_mems = *new_mems;
1189
- spin_unlock_irq(&callback_lock);
1189
+ raw_spin_unlock_irq(&callback_lock);
11901190
11911191 WARN_ON(!is_in_v2_mode() &&
11921192 !nodes_equal(cp->mems_allowed, cp->effective_mems));
....@@ -1254,9 +1254,9 @@
12541254 if (retval < 0)
12551255 goto done;
12561256
1257
- spin_lock_irq(&callback_lock);
1257
+ raw_spin_lock_irq(&callback_lock);
12581258 cs->mems_allowed = trialcs->mems_allowed;
1259
- spin_unlock_irq(&callback_lock);
1259
+ raw_spin_unlock_irq(&callback_lock);
12601260
12611261 /* use trialcs->mems_allowed as a temp variable */
12621262 update_nodemasks_hier(cs, &trialcs->mems_allowed);
....@@ -1347,9 +1347,9 @@
13471347 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
13481348 || (is_spread_page(cs) != is_spread_page(trialcs)));
13491349
1350
- spin_lock_irq(&callback_lock);
1350
+ raw_spin_lock_irq(&callback_lock);
13511351 cs->flags = trialcs->flags;
1352
- spin_unlock_irq(&callback_lock);
1352
+ raw_spin_unlock_irq(&callback_lock);
13531353
13541354 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
13551355 rebuild_sched_domains_locked();
....@@ -1766,7 +1766,7 @@
17661766 cpuset_filetype_t type = seq_cft(sf)->private;
17671767 int ret = 0;
17681768
1769
- spin_lock_irq(&callback_lock);
1769
+ raw_spin_lock_irq(&callback_lock);
17701770
17711771 switch (type) {
17721772 case FILE_CPULIST:
....@@ -1785,7 +1785,7 @@
17851785 ret = -EINVAL;
17861786 }
17871787
1788
- spin_unlock_irq(&callback_lock);
1788
+ raw_spin_unlock_irq(&callback_lock);
17891789 return ret;
17901790 }
17911791
....@@ -2005,12 +2005,12 @@
20052005
20062006 cpuset_inc();
20072007
2008
- spin_lock_irq(&callback_lock);
2008
+ raw_spin_lock_irq(&callback_lock);
20092009 if (is_in_v2_mode()) {
20102010 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
20112011 cs->effective_mems = parent->effective_mems;
20122012 }
2013
- spin_unlock_irq(&callback_lock);
2013
+ raw_spin_unlock_irq(&callback_lock);
20142014
20152015 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
20162016 goto out_unlock;
....@@ -2037,13 +2037,13 @@
20372037 }
20382038 rcu_read_unlock();
20392039
2040
- spin_lock_irq(&callback_lock);
2040
+ raw_spin_lock_irq(&callback_lock);
20412041 cs->mems_allowed = parent->mems_allowed;
20422042 cs->effective_mems = parent->mems_allowed;
20432043 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
20442044 cpumask_copy(cs->cpus_requested, parent->cpus_requested);
20452045 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2046
- spin_unlock_irq(&callback_lock);
2046
+ raw_spin_unlock_irq(&callback_lock);
20472047 out_unlock:
20482048 mutex_unlock(&cpuset_mutex);
20492049 return 0;
....@@ -2083,7 +2083,7 @@
20832083 static void cpuset_bind(struct cgroup_subsys_state *root_css)
20842084 {
20852085 mutex_lock(&cpuset_mutex);
2086
- spin_lock_irq(&callback_lock);
2086
+ raw_spin_lock_irq(&callback_lock);
20872087
20882088 if (is_in_v2_mode()) {
20892089 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
....@@ -2094,7 +2094,7 @@
20942094 top_cpuset.mems_allowed = top_cpuset.effective_mems;
20952095 }
20962096
2097
- spin_unlock_irq(&callback_lock);
2097
+ raw_spin_unlock_irq(&callback_lock);
20982098 mutex_unlock(&cpuset_mutex);
20992099 }
21002100
....@@ -2108,7 +2108,7 @@
21082108 if (task_css_is_root(task, cpuset_cgrp_id))
21092109 return;
21102110
2111
- set_cpus_allowed_ptr(task, &current->cpus_allowed);
2111
+ set_cpus_allowed_ptr(task, current->cpus_ptr);
21122112 task->mems_allowed = current->mems_allowed;
21132113 }
21142114
....@@ -2194,12 +2194,12 @@
21942194 {
21952195 bool is_empty;
21962196
2197
- spin_lock_irq(&callback_lock);
2197
+ raw_spin_lock_irq(&callback_lock);
21982198 cpumask_copy(cs->cpus_allowed, new_cpus);
21992199 cpumask_copy(cs->effective_cpus, new_cpus);
22002200 cs->mems_allowed = *new_mems;
22012201 cs->effective_mems = *new_mems;
2202
- spin_unlock_irq(&callback_lock);
2202
+ raw_spin_unlock_irq(&callback_lock);
22032203
22042204 /*
22052205 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
....@@ -2236,10 +2236,10 @@
22362236 if (nodes_empty(*new_mems))
22372237 *new_mems = parent_cs(cs)->effective_mems;
22382238
2239
- spin_lock_irq(&callback_lock);
2239
+ raw_spin_lock_irq(&callback_lock);
22402240 cpumask_copy(cs->effective_cpus, new_cpus);
22412241 cs->effective_mems = *new_mems;
2242
- spin_unlock_irq(&callback_lock);
2242
+ raw_spin_unlock_irq(&callback_lock);
22432243
22442244 if (cpus_updated)
22452245 update_tasks_cpumask(cs);
....@@ -2332,21 +2332,21 @@
23322332
23332333 /* synchronize cpus_allowed to cpu_active_mask */
23342334 if (cpus_updated) {
2335
- spin_lock_irq(&callback_lock);
2335
+ raw_spin_lock_irq(&callback_lock);
23362336 if (!on_dfl)
23372337 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
23382338 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2339
- spin_unlock_irq(&callback_lock);
2339
+ raw_spin_unlock_irq(&callback_lock);
23402340 /* we don't mess with cpumasks of tasks in top_cpuset */
23412341 }
23422342
23432343 /* synchronize mems_allowed to N_MEMORY */
23442344 if (mems_updated) {
2345
- spin_lock_irq(&callback_lock);
2345
+ raw_spin_lock_irq(&callback_lock);
23462346 if (!on_dfl)
23472347 top_cpuset.mems_allowed = new_mems;
23482348 top_cpuset.effective_mems = new_mems;
2349
- spin_unlock_irq(&callback_lock);
2349
+ raw_spin_unlock_irq(&callback_lock);
23502350 update_tasks_nodemask(&top_cpuset);
23512351 }
23522352
....@@ -2445,11 +2445,11 @@
24452445 {
24462446 unsigned long flags;
24472447
2448
- spin_lock_irqsave(&callback_lock, flags);
2448
+ raw_spin_lock_irqsave(&callback_lock, flags);
24492449 rcu_read_lock();
24502450 guarantee_online_cpus(task_cs(tsk), pmask);
24512451 rcu_read_unlock();
2452
- spin_unlock_irqrestore(&callback_lock, flags);
2452
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
24532453 }
24542454
24552455 /**
....@@ -2510,11 +2510,11 @@
25102510 nodemask_t mask;
25112511 unsigned long flags;
25122512
2513
- spin_lock_irqsave(&callback_lock, flags);
2513
+ raw_spin_lock_irqsave(&callback_lock, flags);
25142514 rcu_read_lock();
25152515 guarantee_online_mems(task_cs(tsk), &mask);
25162516 rcu_read_unlock();
2517
- spin_unlock_irqrestore(&callback_lock, flags);
2517
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
25182518
25192519 return mask;
25202520 }
....@@ -2606,14 +2606,14 @@
26062606 return true;
26072607
26082608 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2609
- spin_lock_irqsave(&callback_lock, flags);
2609
+ raw_spin_lock_irqsave(&callback_lock, flags);
26102610
26112611 rcu_read_lock();
26122612 cs = nearest_hardwall_ancestor(task_cs(current));
26132613 allowed = node_isset(node, cs->mems_allowed);
26142614 rcu_read_unlock();
26152615
2616
- spin_unlock_irqrestore(&callback_lock, flags);
2616
+ raw_spin_unlock_irqrestore(&callback_lock, flags);
26172617 return allowed;
26182618 }
26192619