hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/lib/cpumask.c
....@@ -4,7 +4,8 @@
44 #include <linux/bitops.h>
55 #include <linux/cpumask.h>
66 #include <linux/export.h>
7
-#include <linux/bootmem.h>
7
+#include <linux/memblock.h>
8
+#include <linux/numa.h>
89
910 /**
1011 * cpumask_next - get the next cpu in a cpumask
....@@ -163,7 +164,10 @@
163164 */
164165 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
165166 {
166
- *mask = memblock_virt_alloc(cpumask_size(), 0);
167
+ *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
168
+ if (!*mask)
169
+ panic("%s: Failed to allocate %u bytes\n", __func__,
170
+ cpumask_size());
167171 }
168172
169173 /**
....@@ -206,7 +210,7 @@
206210 /* Wrap: we always want a cpu. */
207211 i %= num_online_cpus();
208212
209
- if (node == -1) {
213
+ if (node == NUMA_NO_NODE) {
210214 for_each_cpu(cpu, cpu_online_mask)
211215 if (i-- == 0)
212216 return cpu;
....@@ -228,3 +232,32 @@
228232 BUG();
229233 }
230234 EXPORT_SYMBOL(cpumask_local_spread);
235
+
236
+static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
237
+
238
+/**
239
+ * Returns an arbitrary cpu within srcp1 & srcp2.
240
+ *
241
+ * Iterated calls using the same srcp1 and srcp2 will be distributed within
242
+ * their intersection.
243
+ *
244
+ * Returns >= nr_cpu_ids if the intersection is empty.
245
+ */
246
+int cpumask_any_and_distribute(const struct cpumask *src1p,
247
+ const struct cpumask *src2p)
248
+{
249
+ int next, prev;
250
+
251
+ /* NOTE: our first selection will skip 0. */
252
+ prev = __this_cpu_read(distribute_cpu_mask_prev);
253
+
254
+ next = cpumask_next_and(prev, src1p, src2p);
255
+ if (next >= nr_cpu_ids)
256
+ next = cpumask_first_and(src1p, src2p);
257
+
258
+ if (next < nr_cpu_ids)
259
+ __this_cpu_write(distribute_cpu_mask_prev, next);
260
+
261
+ return next;
262
+}
263
+EXPORT_SYMBOL(cpumask_any_and_distribute);