From 9999e48639b3cecb08ffb37358bcba3b48161b29 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 08:50:17 +0000
Subject: [PATCH] add ax88772_rst
---
kernel/kernel/cgroup/cpuset.c | 182 +++++++++++++++++++++++++++++++++------------
1 files changed, 134 insertions(+), 48 deletions(-)
diff --git a/kernel/kernel/cgroup/cpuset.c b/kernel/kernel/cgroup/cpuset.c
index de768d5..270310e 100644
--- a/kernel/kernel/cgroup/cpuset.c
+++ b/kernel/kernel/cgroup/cpuset.c
@@ -165,6 +165,14 @@
*/
int use_parent_ecpus;
int child_ecpus_count;
+
+ /*
+ * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
+ * know when to rebuild associated root domain bandwidth information.
+ */
+ int nr_deadline_tasks;
+ int nr_migrate_dl_tasks;
+ u64 sum_migrate_dl_bw;
};
/*
@@ -208,6 +216,20 @@
static inline struct cpuset *parent_cs(struct cpuset *cs)
{
return css_cs(cs->css.parent);
+}
+
+void inc_dl_tasks_cs(struct task_struct *p)
+{
+ struct cpuset *cs = task_cs(p);
+
+ cs->nr_deadline_tasks++;
+}
+
+void dec_dl_tasks_cs(struct task_struct *p)
+{
+ struct cpuset *cs = task_cs(p);
+
+ cs->nr_deadline_tasks--;
}
/* bits in struct cpuset flags field */
@@ -339,7 +361,18 @@
*/
static DEFINE_MUTEX(cpuset_mutex);
-static DEFINE_RAW_SPINLOCK(callback_lock);
+
+void cpuset_lock(void)
+{
+ mutex_lock(&cpuset_mutex);
+}
+
+void cpuset_unlock(void)
+{
+ mutex_unlock(&cpuset_mutex);
+}
+
+static DEFINE_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -925,10 +958,13 @@
return ndoms;
}
-static void update_tasks_root_domain(struct cpuset *cs)
+static void dl_update_tasks_root_domain(struct cpuset *cs)
{
struct css_task_iter it;
struct task_struct *task;
+
+ if (cs->nr_deadline_tasks == 0)
+ return;
css_task_iter_start(&cs->css, 0, &it);
@@ -938,7 +974,7 @@
css_task_iter_end(&it);
}
-static void rebuild_root_domains(void)
+static void dl_rebuild_rd_accounting(void)
{
struct cpuset *cs = NULL;
struct cgroup_subsys_state *pos_css;
@@ -966,7 +1002,7 @@
rcu_read_unlock();
- update_tasks_root_domain(cs);
+ dl_update_tasks_root_domain(cs);
rcu_read_lock();
css_put(&cs->css);
@@ -980,7 +1016,7 @@
{
mutex_lock(&sched_domains_mutex);
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
- rebuild_root_domains();
+ dl_rebuild_rd_accounting();
mutex_unlock(&sched_domains_mutex);
}
@@ -1315,7 +1351,7 @@
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
if (adding) {
cpumask_or(parent->subparts_cpus,
parent->subparts_cpus, tmp->addmask);
@@ -1337,7 +1373,7 @@
if (cpuset->partition_root_state != new_prs)
cpuset->partition_root_state = new_prs;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
return cmd == partcmd_update;
}
@@ -1440,7 +1476,7 @@
continue;
rcu_read_unlock();
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
@@ -1474,7 +1510,7 @@
if (new_prs != cp->partition_root_state)
cp->partition_root_state = new_prs;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -1603,7 +1639,7 @@
return -EINVAL;
}
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
@@ -1614,7 +1650,7 @@
cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed);
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
}
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
update_cpumasks_hier(cs, &tmp);
@@ -1808,9 +1844,9 @@
continue;
rcu_read_unlock();
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cp->effective_mems = *new_mems;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1878,9 +1914,9 @@
if (retval < 0)
goto done;
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cs->mems_allowed = trialcs->mems_allowed;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
@@ -1971,9 +2007,9 @@
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cs->flags = trialcs->flags;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
@@ -2059,9 +2095,9 @@
rebuild_sched_domains_locked();
out:
if (!err) {
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
}
free_cpumasks(NULL, &tmpmask);
@@ -2171,16 +2207,23 @@
static struct cpuset *cpuset_attach_old_cs;
+static void reset_migrate_dl_data(struct cpuset *cs)
+{
+ cs->nr_migrate_dl_tasks = 0;
+ cs->sum_migrate_dl_bw = 0;
+}
+
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
- struct cpuset *cs;
+ struct cpuset *cs, *oldcs;
struct task_struct *task;
int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
+ oldcs = cpuset_attach_old_cs;
cs = css_cs(css);
mutex_lock(&cpuset_mutex);
@@ -2192,14 +2235,39 @@
goto out_unlock;
cgroup_taskset_for_each(task, css, tset) {
- ret = task_can_attach(task, cs->effective_cpus);
+ ret = task_can_attach(task);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
if (ret)
goto out_unlock;
+
+ if (dl_task(task)) {
+ cs->nr_migrate_dl_tasks++;
+ cs->sum_migrate_dl_bw += task->dl.dl_bw;
+ }
}
+ if (!cs->nr_migrate_dl_tasks)
+ goto out_success;
+
+ if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
+ int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
+
+ if (unlikely(cpu >= nr_cpu_ids)) {
+ reset_migrate_dl_data(cs);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
+ if (ret) {
+ reset_migrate_dl_data(cs);
+ goto out_unlock;
+ }
+ }
+
+out_success:
/*
* Mark attach is in progress. This makes validate_change() fail
* changes which zero cpus/mems_allowed.
@@ -2214,11 +2282,23 @@
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
{
struct cgroup_subsys_state *css;
+ struct cpuset *cs;
cgroup_taskset_first(tset, &css);
+ cs = css_cs(css);
mutex_lock(&cpuset_mutex);
- css_cs(css)->attach_in_progress--;
+ cs->attach_in_progress--;
+ if (!cs->attach_in_progress)
+ wake_up(&cpuset_attach_wq);
+
+ if (cs->nr_migrate_dl_tasks) {
+ int cpu = cpumask_any(cs->effective_cpus);
+
+ dl_bw_free(cpu, cs->sum_migrate_dl_bw);
+ reset_migrate_dl_data(cs);
+ }
+
mutex_unlock(&cpuset_mutex);
}
@@ -2290,6 +2370,12 @@
}
cs->old_mems_allowed = cpuset_attach_nodemask_to;
+
+ if (cs->nr_migrate_dl_tasks) {
+ cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
+ oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
+ reset_migrate_dl_data(cs);
+ }
cs->attach_in_progress--;
if (!cs->attach_in_progress)
@@ -2476,7 +2562,7 @@
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
switch (type) {
case FILE_CPULIST:
@@ -2498,7 +2584,7 @@
ret = -EINVAL;
}
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
return ret;
}
@@ -2811,14 +2897,14 @@
cpuset_inc();
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
cs->use_parent_ecpus = true;
parent->child_ecpus_count++;
}
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
@@ -2845,13 +2931,13 @@
}
rcu_read_unlock();
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->cpus_requested, parent->cpus_requested);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
out_unlock:
mutex_unlock(&cpuset_mutex);
put_online_cpus();
@@ -2907,7 +2993,7 @@
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2918,7 +3004,7 @@
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
mutex_unlock(&cpuset_mutex);
}
@@ -3018,12 +3104,12 @@
{
bool is_empty;
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, new_cpus);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->mems_allowed = *new_mems;
cs->effective_mems = *new_mems;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -3060,10 +3146,10 @@
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cpumask_copy(cs->effective_cpus, new_cpus);
cs->effective_mems = *new_mems;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
if (cpus_updated)
update_tasks_cpumask(cs);
@@ -3130,10 +3216,10 @@
if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
(parent->partition_root_state == PRS_ERROR))) {
if (cs->nr_subparts_cpus) {
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cs->nr_subparts_cpus = 0;
cpumask_clear(cs->subparts_cpus);
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
compute_effective_cpumask(&new_cpus, cs, parent);
}
@@ -3147,9 +3233,9 @@
cpumask_empty(&new_cpus)) {
update_parent_subparts_cpumask(cs, partcmd_disable,
NULL, tmp);
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
cs->partition_root_state = PRS_ERROR;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
}
cpuset_force_rebuild();
}
@@ -3229,7 +3315,7 @@
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
@@ -3249,17 +3335,17 @@
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
/* we don't mess with cpumasks of tasks in top_cpuset */
}
/* synchronize mems_allowed to N_MEMORY */
if (mems_updated) {
- raw_spin_lock_irq(&callback_lock);
+ spin_lock_irq(&callback_lock);
if (!on_dfl)
top_cpuset.mems_allowed = new_mems;
top_cpuset.effective_mems = new_mems;
- raw_spin_unlock_irq(&callback_lock);
+ spin_unlock_irq(&callback_lock);
update_tasks_nodemask(&top_cpuset);
}
@@ -3368,11 +3454,11 @@
{
unsigned long flags;
- raw_spin_lock_irqsave(&callback_lock, flags);
+ spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_cpus(tsk, pmask);
rcu_read_unlock();
- raw_spin_unlock_irqrestore(&callback_lock, flags);
+ spin_unlock_irqrestore(&callback_lock, flags);
}
EXPORT_SYMBOL_GPL(cpuset_cpus_allowed);
/**
@@ -3441,11 +3527,11 @@
nodemask_t mask;
unsigned long flags;
- raw_spin_lock_irqsave(&callback_lock, flags);
+ spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
guarantee_online_mems(task_cs(tsk), &mask);
rcu_read_unlock();
- raw_spin_unlock_irqrestore(&callback_lock, flags);
+ spin_unlock_irqrestore(&callback_lock, flags);
return mask;
}
@@ -3537,14 +3623,14 @@
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
- raw_spin_lock_irqsave(&callback_lock, flags);
+ spin_lock_irqsave(&callback_lock, flags);
rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed);
rcu_read_unlock();
- raw_spin_unlock_irqrestore(&callback_lock, flags);
+ spin_unlock_irqrestore(&callback_lock, flags);
return allowed;
}
--
Gitblit v1.6.2