From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/kernel/cgroup/cpuset.c |   82 ++++++++++++++++++++--------------------
 1 files changed, 41 insertions(+), 41 deletions(-)

diff --git a/kernel/kernel/cgroup/cpuset.c b/kernel/kernel/cgroup/cpuset.c
index de768d5..6820a0c 100644
--- a/kernel/kernel/cgroup/cpuset.c
+++ b/kernel/kernel/cgroup/cpuset.c
@@ -339,7 +339,7 @@
  */
 
 static DEFINE_MUTEX(cpuset_mutex);
-static DEFINE_RAW_SPINLOCK(callback_lock);
+static DEFINE_SPINLOCK(callback_lock);
 
 static struct workqueue_struct *cpuset_migrate_mm_wq;
 
@@ -1315,7 +1315,7 @@
 	 * Newly added CPUs will be removed from effective_cpus and
 	 * newly deleted ones will be added back to effective_cpus.
 	 */
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	if (adding) {
 		cpumask_or(parent->subparts_cpus,
 			   parent->subparts_cpus, tmp->addmask);
@@ -1337,7 +1337,7 @@
 
 	if (cpuset->partition_root_state != new_prs)
 		cpuset->partition_root_state = new_prs;
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	return cmd == partcmd_update;
 }
@@ -1440,7 +1440,7 @@
 			continue;
 		rcu_read_unlock();
 
-		raw_spin_lock_irq(&callback_lock);
+		spin_lock_irq(&callback_lock);
 
 		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
 		if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
@@ -1474,7 +1474,7 @@
 		if (new_prs != cp->partition_root_state)
 			cp->partition_root_state = new_prs;
 
-		raw_spin_unlock_irq(&callback_lock);
+		spin_unlock_irq(&callback_lock);
 
 		WARN_ON(!is_in_v2_mode() &&
 			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -1603,7 +1603,7 @@
 			return -EINVAL;
 	}
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 	cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
 
@@ -1614,7 +1614,7 @@
 		cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed);
 		cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
 	}
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	update_cpumasks_hier(cs, &tmp);
 
@@ -1808,9 +1808,9 @@
 			continue;
 		rcu_read_unlock();
 
-		raw_spin_lock_irq(&callback_lock);
+		spin_lock_irq(&callback_lock);
 		cp->effective_mems = *new_mems;
-		raw_spin_unlock_irq(&callback_lock);
+		spin_unlock_irq(&callback_lock);
 
 		WARN_ON(!is_in_v2_mode() &&
 			!nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1878,9 +1878,9 @@
 	if (retval < 0)
 		goto done;
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	cs->mems_allowed = trialcs->mems_allowed;
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	/* use trialcs->mems_allowed as a temp variable */
 	update_nodemasks_hier(cs, &trialcs->mems_allowed);
@@ -1971,9 +1971,9 @@
 	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
 			|| (is_spread_page(cs) != is_spread_page(trialcs)));
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	cs->flags = trialcs->flags;
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
 		rebuild_sched_domains_locked();
@@ -2059,9 +2059,9 @@
 	rebuild_sched_domains_locked();
 out:
 	if (!err) {
-		raw_spin_lock_irq(&callback_lock);
+		spin_lock_irq(&callback_lock);
 		cs->partition_root_state = new_prs;
-		raw_spin_unlock_irq(&callback_lock);
+		spin_unlock_irq(&callback_lock);
 	}
 
 	free_cpumasks(NULL, &tmpmask);
@@ -2476,7 +2476,7 @@
 	cpuset_filetype_t type = seq_cft(sf)->private;
 	int ret = 0;
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 
 	switch (type) {
 	case FILE_CPULIST:
@@ -2498,7 +2498,7 @@
 		ret = -EINVAL;
 	}
 
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 	return ret;
 }
 
@@ -2811,14 +2811,14 @@
 
 	cpuset_inc();
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	if (is_in_v2_mode()) {
 		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
 		cs->effective_mems = parent->effective_mems;
 		cs->use_parent_ecpus = true;
 		parent->child_ecpus_count++;
 	}
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
 		goto out_unlock;
@@ -2845,13 +2845,13 @@
 	}
 	rcu_read_unlock();
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	cs->mems_allowed = parent->mems_allowed;
 	cs->effective_mems = parent->mems_allowed;
 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
 	cpumask_copy(cs->cpus_requested, parent->cpus_requested);
 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 out_unlock:
 	mutex_unlock(&cpuset_mutex);
 	put_online_cpus();
@@ -2907,7 +2907,7 @@
 static void cpuset_bind(struct cgroup_subsys_state *root_css)
 {
 	mutex_lock(&cpuset_mutex);
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 
 	if (is_in_v2_mode()) {
 		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2918,7 +2918,7 @@
 		top_cpuset.mems_allowed = top_cpuset.effective_mems;
 	}
 
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 	mutex_unlock(&cpuset_mutex);
 }
 
@@ -3018,12 +3018,12 @@
 {
 	bool is_empty;
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->cpus_allowed, new_cpus);
 	cpumask_copy(cs->effective_cpus, new_cpus);
 	cs->mems_allowed = *new_mems;
 	cs->effective_mems = *new_mems;
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	/*
 	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -3060,10 +3060,10 @@
 	if (nodes_empty(*new_mems))
 		*new_mems = parent_cs(cs)->effective_mems;
 
-	raw_spin_lock_irq(&callback_lock);
+	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->effective_cpus, new_cpus);
 	cs->effective_mems = *new_mems;
-	raw_spin_unlock_irq(&callback_lock);
+	spin_unlock_irq(&callback_lock);
 
 	if (cpus_updated)
 		update_tasks_cpumask(cs);
@@ -3130,10 +3130,10 @@
 	if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
 	   (parent->partition_root_state == PRS_ERROR))) {
 		if (cs->nr_subparts_cpus) {
-			raw_spin_lock_irq(&callback_lock);
+			spin_lock_irq(&callback_lock);
 			cs->nr_subparts_cpus = 0;
 			cpumask_clear(cs->subparts_cpus);
-			raw_spin_unlock_irq(&callback_lock);
+			spin_unlock_irq(&callback_lock);
 			compute_effective_cpumask(&new_cpus, cs, parent);
 		}
 
@@ -3147,9 +3147,9 @@
 		     cpumask_empty(&new_cpus)) {
 			update_parent_subparts_cpumask(cs, partcmd_disable,
 						       NULL, tmp);
-			raw_spin_lock_irq(&callback_lock);
+			spin_lock_irq(&callback_lock);
 			cs->partition_root_state = PRS_ERROR;
-			raw_spin_unlock_irq(&callback_lock);
+			spin_unlock_irq(&callback_lock);
 		}
 		cpuset_force_rebuild();
 	}
@@ -3229,7 +3229,7 @@
 
 	/* synchronize cpus_allowed to cpu_active_mask */
 	if (cpus_updated) {
-		raw_spin_lock_irq(&callback_lock);
+		spin_lock_irq(&callback_lock);
 		if (!on_dfl)
 			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
 		/*
@@ -3249,17 +3249,17 @@
 			}
 		}
 		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
-		raw_spin_unlock_irq(&callback_lock);
+		spin_unlock_irq(&callback_lock);
 		/* we don't mess with cpumasks of tasks in top_cpuset */
 	}
 
 	/* synchronize mems_allowed to N_MEMORY */
 	if (mems_updated) {
-		raw_spin_lock_irq(&callback_lock);
+		spin_lock_irq(&callback_lock);
 		if (!on_dfl)
 			top_cpuset.mems_allowed = new_mems;
 		top_cpuset.effective_mems = new_mems;
-		raw_spin_unlock_irq(&callback_lock);
+		spin_unlock_irq(&callback_lock);
 		update_tasks_nodemask(&top_cpuset);
 	}
 
@@ -3368,11 +3368,11 @@
 {
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&callback_lock, flags);
+	spin_lock_irqsave(&callback_lock, flags);
 	rcu_read_lock();
 	guarantee_online_cpus(tsk, pmask);
 	rcu_read_unlock();
-	raw_spin_unlock_irqrestore(&callback_lock, flags);
+	spin_unlock_irqrestore(&callback_lock, flags);
 }
 EXPORT_SYMBOL_GPL(cpuset_cpus_allowed);
 /**
@@ -3441,11 +3441,11 @@
 	nodemask_t mask;
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&callback_lock, flags);
+	spin_lock_irqsave(&callback_lock, flags);
 	rcu_read_lock();
 	guarantee_online_mems(task_cs(tsk), &mask);
 	rcu_read_unlock();
-	raw_spin_unlock_irqrestore(&callback_lock, flags);
+	spin_unlock_irqrestore(&callback_lock, flags);
 
 	return mask;
 }
@@ -3537,14 +3537,14 @@
 		return true;
 
 	/* Not hardwall and node outside mems_allowed: scan up cpusets */
-	raw_spin_lock_irqsave(&callback_lock, flags);
+	spin_lock_irqsave(&callback_lock, flags);
 
 	rcu_read_lock();
 	cs = nearest_hardwall_ancestor(task_cs(current));
 	allowed = node_isset(node, cs->mems_allowed);
 	rcu_read_unlock();
 
-	raw_spin_unlock_irqrestore(&callback_lock, flags);
+	spin_unlock_irqrestore(&callback_lock, flags);
 	return allowed;
 }
 

--
Gitblit v1.6.2