.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * kernel/sched/cpupri.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
20 | 21 | * searches). For tasks with affinity restrictions, the algorithm has a |
---|
21 | 22 | * worst case complexity of O(min(102, nr_domcpus)), though the scenario that |
---|
22 | 23 | * yields the worst case search is fairly contrived. |
---|
23 | | - * |
---|
24 | | - * This program is free software; you can redistribute it and/or |
---|
25 | | - * modify it under the terms of the GNU General Public License |
---|
26 | | - * as published by the Free Software Foundation; version 2 |
---|
27 | | - * of the License. |
---|
28 | 24 | */ |
---|
29 | 25 | #include "sched.h" |
---|
30 | 26 | |
---|
.. | .. |
---|
45 | 41 | return cpupri; |
---|
46 | 42 | } |
---|
47 | 43 | |
---|
| 44 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 45 | +/** |
---|
| 46 | + * drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask |
---|
| 47 | + * @lowest_mask: mask with selected CPUs (non-NULL) |
---|
| 48 | + */ |
---|
| 49 | +static void |
---|
| 50 | +drop_nopreempt_cpus(struct cpumask *lowest_mask) |
---|
| 51 | +{ |
---|
| 52 | + unsigned int cpu = cpumask_first(lowest_mask); |
---|
| 53 | + while (cpu < nr_cpu_ids) { |
---|
| 54 | + /* unlocked access */ |
---|
| 55 | + struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr); |
---|
| 56 | + if (task_may_not_preempt(task, cpu)) { |
---|
| 57 | + cpumask_clear_cpu(cpu, lowest_mask); |
---|
| 58 | + } |
---|
| 59 | + cpu = cpumask_next(cpu, lowest_mask); |
---|
| 60 | + } |
---|
| 61 | +} |
---|
| 62 | +#endif |
---|
| 63 | + |
---|
48 | 64 | static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p, |
---|
49 | | - struct cpumask *lowest_mask, int idx) |
---|
| 65 | + struct cpumask *lowest_mask, int idx, |
---|
| 66 | + bool drop_nopreempts) |
---|
50 | 67 | { |
---|
51 | 68 | struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; |
---|
52 | 69 | int skip = 0; |
---|
.. | .. |
---|
77 | 94 | if (skip) |
---|
78 | 95 | return 0; |
---|
79 | 96 | |
---|
80 | | - if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) |
---|
| 97 | + if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) |
---|
81 | 98 | return 0; |
---|
82 | 99 | |
---|
83 | 100 | if (lowest_mask) { |
---|
84 | | - cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); |
---|
| 101 | + cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); |
---|
| 102 | + cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); |
---|
| 103 | + |
---|
| 104 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 105 | + if (drop_nopreempts) |
---|
| 106 | + drop_nopreempt_cpus(lowest_mask); |
---|
| 107 | +#endif |
---|
85 | 108 | |
---|
86 | 109 | /* |
---|
87 | 110 | * We have to ensure that we have at least one bit |
---|
.. | .. |
---|
127 | 150 | { |
---|
128 | 151 | int task_pri = convert_prio(p->prio); |
---|
129 | 152 | int idx, cpu; |
---|
| 153 | + bool drop_nopreempts = task_pri <= MAX_RT_PRIO; |
---|
130 | 154 | |
---|
131 | 155 | BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); |
---|
132 | 156 | |
---|
| 157 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 158 | +retry: |
---|
| 159 | +#endif |
---|
133 | 160 | for (idx = 0; idx < task_pri; idx++) { |
---|
134 | 161 | |
---|
135 | | - if (!__cpupri_find(cp, p, lowest_mask, idx)) |
---|
| 162 | + if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts)) |
---|
136 | 163 | continue; |
---|
137 | 164 | |
---|
138 | 165 | if (!lowest_mask || !fitness_fn) |
---|
.. | .. |
---|
153 | 180 | |
---|
154 | 181 | return 1; |
---|
155 | 182 | } |
---|
| 183 | + |
---|
| 184 | + /* |
---|
| 185 | + * If we can't find any non-preemptible cpu's, retry so we can |
---|
| 186 | + * find the lowest priority target and avoid priority inversion. |
---|
| 187 | + */ |
---|
| 188 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 189 | + if (drop_nopreempts) { |
---|
| 190 | + drop_nopreempts = false; |
---|
| 191 | + goto retry; |
---|
| 192 | + } |
---|
| 193 | +#endif |
---|
156 | 194 | |
---|
157 | 195 | /* |
---|
158 | 196 | * If we failed to find a fitting lowest_mask, kick off a new search |
---|
.. | .. |
---|
176 | 214 | |
---|
177 | 215 | return 0; |
---|
178 | 216 | } |
---|
| 217 | +EXPORT_SYMBOL_GPL(cpupri_find_fitness); |
---|
179 | 218 | |
---|
180 | 219 | /** |
---|
181 | 220 | * cpupri_set - update the CPU priority setting |
---|
.. | .. |
---|
294 | 333 | for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) |
---|
295 | 334 | free_cpumask_var(cp->pri_to_cpu[i].mask); |
---|
296 | 335 | } |
---|
| 336 | + |
---|
| 337 | +#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION |
---|
| 338 | +/* |
---|
| 339 | + * cpupri_check_rt - check if CPU has a RT task |
---|
| 340 | + * should be called from rcu-sched read section. |
---|
| 341 | + */ |
---|
| 342 | +bool cpupri_check_rt(void) |
---|
| 343 | +{ |
---|
| 344 | + int cpu = raw_smp_processor_id(); |
---|
| 345 | + |
---|
| 346 | + return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL; |
---|
| 347 | +} |
---|
| 348 | +#endif |
---|