From 072de836f53be56a70cecf70b43ae43b7ce17376 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 10:08:36 +0000
Subject: [PATCH] mk-rootfs.sh
---
kernel/kernel/sched/cpupri.c | 70 ++++++++++++++++++++++++++++++----
1 files changed, 61 insertions(+), 9 deletions(-)
diff --git a/kernel/kernel/sched/cpupri.c b/kernel/kernel/sched/cpupri.c
index 685ee13..cb11531 100644
--- a/kernel/kernel/sched/cpupri.c
+++ b/kernel/kernel/sched/cpupri.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* kernel/sched/cpupri.c
*
@@ -20,11 +21,6 @@
* searches). For tasks with affinity restrictions, the algorithm has a
* worst case complexity of O(min(102, nr_domcpus)), though the scenario that
* yields the worst case search is fairly contrived.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
*/
#include "sched.h"
@@ -45,8 +41,29 @@
return cpupri;
}
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/**
+ * drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+ unsigned int cpu = cpumask_first(lowest_mask);
+ while (cpu < nr_cpu_ids) {
+ /* unlocked access */
+ struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+ if (task_may_not_preempt(task, cpu)) {
+ cpumask_clear_cpu(cpu, lowest_mask);
+ }
+ cpu = cpumask_next(cpu, lowest_mask);
+ }
+}
+#endif
+
static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
- struct cpumask *lowest_mask, int idx)
+ struct cpumask *lowest_mask, int idx,
+ bool drop_nopreempts)
{
struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
int skip = 0;
@@ -77,11 +94,17 @@
if (skip)
return 0;
- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+ if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
return 0;
if (lowest_mask) {
- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
+ cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
+ cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
+
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+ if (drop_nopreempts)
+ drop_nopreempt_cpus(lowest_mask);
+#endif
/*
* We have to ensure that we have at least one bit
@@ -127,12 +150,16 @@
{
int task_pri = convert_prio(p->prio);
int idx, cpu;
+ bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+retry:
+#endif
for (idx = 0; idx < task_pri; idx++) {
- if (!__cpupri_find(cp, p, lowest_mask, idx))
+ if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
continue;
if (!lowest_mask || !fitness_fn)
@@ -153,6 +180,17 @@
return 1;
}
+
+ /*
+ * If we can't find any non-preemptible cpu's, retry so we can
+ * find the lowest priority target and avoid priority inversion.
+ */
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+ if (drop_nopreempts) {
+ drop_nopreempts = false;
+ goto retry;
+ }
+#endif
/*
* If we failed to find a fitting lowest_mask, kick off a new search
@@ -176,6 +214,7 @@
return 0;
}
+EXPORT_SYMBOL_GPL(cpupri_find_fitness);
/**
* cpupri_set - update the CPU priority setting
@@ -294,3 +333,16 @@
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
free_cpumask_var(cp->pri_to_cpu[i].mask);
}
+
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
+#endif
--
Gitblit v1.6.2