.. | .. |
---|
10 | 10 | #include <linux/kernel.h> |
---|
11 | 11 | #include <linux/threads.h> |
---|
12 | 12 | #include <linux/bitmap.h> |
---|
| 13 | +#include <linux/atomic.h> |
---|
13 | 14 | #include <linux/bug.h> |
---|
14 | 15 | |
---|
15 | 16 | /* Don't assign or return these: may not be this big! */ |
---|
.. | .. |
---|
95 | 96 | #define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) |
---|
96 | 97 | #define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) |
---|
97 | 98 | |
---|
| 99 | +extern atomic_t __num_online_cpus; |
---|
| 100 | + |
---|
98 | 101 | #if NR_CPUS > 1 |
---|
99 | | -#define num_online_cpus() cpumask_weight(cpu_online_mask) |
---|
| 102 | +/** |
---|
| 103 | + * num_online_cpus() - Read the number of online CPUs |
---|
| 104 | + * |
---|
| 105 | + * Despite the fact that __num_online_cpus is of type atomic_t, this |
---|
| 106 | + * interface gives only a momentary snapshot and is not protected against |
---|
| 107 | + * concurrent CPU hotplug operations unless invoked from a cpuhp_lock held |
---|
| 108 | + * region. |
---|
| 109 | + */ |
---|
| 110 | +static inline unsigned int num_online_cpus(void) |
---|
| 111 | +{ |
---|
| 112 | + return atomic_read(&__num_online_cpus); |
---|
| 113 | +} |
---|
100 | 114 | #define num_possible_cpus() cpumask_weight(cpu_possible_mask) |
---|
101 | 115 | #define num_present_cpus() cpumask_weight(cpu_present_mask) |
---|
102 | 116 | #define num_active_cpus() cpumask_weight(cpu_active_mask) |
---|
.. | .. |
---|
114 | 128 | #define cpu_present(cpu) ((cpu) == 0) |
---|
115 | 129 | #define cpu_active(cpu) ((cpu) == 0) |
---|
116 | 130 | #endif |
---|
| 131 | + |
---|
| 132 | +extern cpumask_t cpus_booted_once_mask; |
---|
117 | 133 | |
---|
118 | 134 | static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) |
---|
119 | 135 | { |
---|
.. | .. |
---|
178 | 194 | return 0; |
---|
179 | 195 | } |
---|
180 | 196 | |
---|
| 197 | +static inline int cpumask_any_and_distribute(const struct cpumask *src1p, |
---|
| 198 | + const struct cpumask *src2p) { |
---|
| 199 | + return cpumask_next_and(-1, src1p, src2p); |
---|
| 200 | +} |
---|
| 201 | + |
---|
181 | 202 | #define for_each_cpu(cpu, mask) \ |
---|
182 | 203 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
---|
183 | 204 | #define for_each_cpu_not(cpu, mask) \ |
---|
184 | 205 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
---|
185 | 206 | #define for_each_cpu_wrap(cpu, mask, start) \ |
---|
186 | 207 | for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) |
---|
187 | | -#define for_each_cpu_and(cpu, mask, and) \ |
---|
188 | | - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) |
---|
| 208 | +#define for_each_cpu_and(cpu, mask1, mask2) \ |
---|
| 209 | + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) |
---|
189 | 210 | #else |
---|
190 | 211 | /** |
---|
191 | 212 | * cpumask_first - get the first cpu in a cpumask |
---|
.. | .. |
---|
229 | 250 | int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); |
---|
230 | 251 | int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); |
---|
231 | 252 | unsigned int cpumask_local_spread(unsigned int i, int node); |
---|
| 253 | +int cpumask_any_and_distribute(const struct cpumask *src1p, |
---|
| 254 | + const struct cpumask *src2p); |
---|
232 | 255 | |
---|
233 | 256 | /** |
---|
234 | 257 | * for_each_cpu - iterate over every cpu in a mask |
---|
.. | .. |
---|
274 | 297 | /** |
---|
275 | 298 | * for_each_cpu_and - iterate over every cpu in both masks |
---|
276 | 299 | * @cpu: the (optionally unsigned) integer iterator |
---|
277 | | - * @mask: the first cpumask pointer |
---|
278 | | - * @and: the second cpumask pointer |
---|
| 300 | + * @mask1: the first cpumask pointer |
---|
| 301 | + * @mask2: the second cpumask pointer |
---|
279 | 302 | * |
---|
280 | 303 | * This saves a temporary CPU mask in many places. It is equivalent to: |
---|
281 | 304 | * struct cpumask tmp; |
---|
282 | | - * cpumask_and(&tmp, &mask, &and); |
---|
| 305 | + * cpumask_and(&tmp, &mask1, &mask2); |
---|
283 | 306 | * for_each_cpu(cpu, &tmp) |
---|
284 | 307 | * ... |
---|
285 | 308 | * |
---|
286 | 309 | * After the loop, cpu is >= nr_cpu_ids. |
---|
287 | 310 | */ |
---|
288 | | -#define for_each_cpu_and(cpu, mask, and) \ |
---|
| 311 | +#define for_each_cpu_and(cpu, mask1, mask2) \ |
---|
289 | 312 | for ((cpu) = -1; \ |
---|
290 | | - (cpu) = cpumask_next_and((cpu), (mask), (and)), \ |
---|
| 313 | + (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ |
---|
291 | 314 | (cpu) < nr_cpu_ids;) |
---|
292 | 315 | #endif /* SMP */ |
---|
293 | 316 | |
---|
.. | .. |
---|
474 | 497 | } |
---|
475 | 498 | |
---|
476 | 499 | /** |
---|
| 500 | + * cpumask_or_equal - *src1p | *src2p == *src3p |
---|
| 501 | + * @src1p: the first input |
---|
| 502 | + * @src2p: the second input |
---|
| 503 | + * @src3p: the third input |
---|
| 504 | + */ |
---|
| 505 | +static inline bool cpumask_or_equal(const struct cpumask *src1p, |
---|
| 506 | + const struct cpumask *src2p, |
---|
| 507 | + const struct cpumask *src3p) |
---|
| 508 | +{ |
---|
| 509 | + return bitmap_or_equal(cpumask_bits(src1p), cpumask_bits(src2p), |
---|
| 510 | + cpumask_bits(src3p), nr_cpumask_bits); |
---|
| 511 | +} |
---|
| 512 | + |
---|
| 513 | +/** |
---|
477 | 514 | * cpumask_intersects - (*src1p & *src2p) != 0 |
---|
478 | 515 | * @src1p: the first input |
---|
479 | 516 | * @src2p: the second input |
---|
.. | .. |
---|
633 | 670 | */ |
---|
634 | 671 | static inline int cpumask_parse(const char *buf, struct cpumask *dstp) |
---|
635 | 672 | { |
---|
636 | | - char *nl = strchr(buf, '\n'); |
---|
637 | | - unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); |
---|
638 | | - |
---|
639 | | - return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
---|
| 673 | + return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits); |
---|
640 | 674 | } |
---|
641 | 675 | |
---|
642 | 676 | /** |
---|
.. | .. |
---|
806 | 840 | cpumask_clear_cpu(cpu, &__cpu_present_mask); |
---|
807 | 841 | } |
---|
808 | 842 | |
---|
809 | | -static inline void |
---|
810 | | -set_cpu_online(unsigned int cpu, bool online) |
---|
811 | | -{ |
---|
812 | | - if (online) |
---|
813 | | - cpumask_set_cpu(cpu, &__cpu_online_mask); |
---|
814 | | - else |
---|
815 | | - cpumask_clear_cpu(cpu, &__cpu_online_mask); |
---|
816 | | -} |
---|
| 843 | +void set_cpu_online(unsigned int cpu, bool online); |
---|
817 | 844 | |
---|
818 | 845 | static inline void |
---|
819 | 846 | set_cpu_active(unsigned int cpu, bool active) |
---|