| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * smp.h: PowerPC-specific SMP code. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 6 | 7 | * |
|---|
| 7 | 8 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
|---|
| 8 | 9 | * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com> |
|---|
| 9 | | - * |
|---|
| 10 | | - * This program is free software; you can redistribute it and/or |
|---|
| 11 | | - * modify it under the terms of the GNU General Public License |
|---|
| 12 | | - * as published by the Free Software Foundation; either version |
|---|
| 13 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 14 | 10 | */ |
|---|
| 15 | 11 | |
|---|
| 16 | 12 | #ifndef _ASM_POWERPC_SMP_H |
|---|
| .. | .. |
|---|
| 32 | 28 | extern int boot_cpuid; |
|---|
| 33 | 29 | extern int spinning_secondaries; |
|---|
| 34 | 30 | extern u32 *cpu_to_phys_id; |
|---|
| 31 | +extern bool coregroup_enabled; |
|---|
| 35 | 32 | |
|---|
| 36 | | -extern void cpu_die(void); |
|---|
| 37 | 33 | extern int cpu_to_chip_id(int cpu); |
|---|
| 38 | 34 | |
|---|
| 39 | 35 | #ifdef CONFIG_SMP |
|---|
| .. | .. |
|---|
| 54 | 50 | int (*cpu_disable)(void); |
|---|
| 55 | 51 | void (*cpu_die)(unsigned int nr); |
|---|
| 56 | 52 | int (*cpu_bootable)(unsigned int nr); |
|---|
| 53 | +#ifdef CONFIG_HOTPLUG_CPU |
|---|
| 54 | + void (*cpu_offline_self)(void); |
|---|
| 55 | +#endif |
|---|
| 57 | 56 | }; |
|---|
| 58 | 57 | |
|---|
| 59 | 58 | extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); |
|---|
| .. | .. |
|---|
| 83 | 82 | /* 32-bit */ |
|---|
| 84 | 83 | extern int smp_hw_index[]; |
|---|
| 85 | 84 | |
|---|
| 86 | | -#define raw_smp_processor_id() (current_thread_info()->cpu) |
|---|
| 85 | +/* |
|---|
| 86 | + * This is particularly ugly: it appears we can't actually get the definition |
|---|
| 87 | + * of task_struct here, but we need access to the CPU this task is running on. |
|---|
| 88 | + * Instead of using task_struct we're using _TASK_CPU which is extracted from |
|---|
| 89 | + * asm-offsets.h by kbuild to get the current processor ID. |
|---|
| 90 | + * |
|---|
| 91 | + * This also needs to be safeguarded when building asm-offsets.s because at |
|---|
| 92 | + * that time _TASK_CPU is not defined yet. It could have been guarded by |
|---|
| 93 | + * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing |
|---|
| 94 | + * when building something else than asm-offsets.s |
|---|
| 95 | + */ |
|---|
| 96 | +#ifdef GENERATING_ASM_OFFSETS |
|---|
| 97 | +#define raw_smp_processor_id() (0) |
|---|
| 98 | +#else |
|---|
| 99 | +#define raw_smp_processor_id() (*(unsigned int *)((void *)current + _TASK_CPU)) |
|---|
| 100 | +#endif |
|---|
| 87 | 101 | #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) |
|---|
| 88 | 102 | |
|---|
| 89 | 103 | static inline int get_hard_smp_processor_id(int cpu) |
|---|
| .. | .. |
|---|
| 100 | 114 | DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
|---|
| 101 | 115 | DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); |
|---|
| 102 | 116 | DECLARE_PER_CPU(cpumask_var_t, cpu_core_map); |
|---|
| 117 | +DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map); |
|---|
| 103 | 118 | |
|---|
| 104 | 119 | static inline struct cpumask *cpu_sibling_mask(int cpu) |
|---|
| 105 | 120 | { |
|---|
| .. | .. |
|---|
| 116 | 131 | return per_cpu(cpu_l2_cache_map, cpu); |
|---|
| 117 | 132 | } |
|---|
| 118 | 133 | |
|---|
| 134 | +static inline struct cpumask *cpu_smallcore_mask(int cpu) |
|---|
| 135 | +{ |
|---|
| 136 | + return per_cpu(cpu_smallcore_map, cpu); |
|---|
| 137 | +} |
|---|
| 138 | + |
|---|
| 119 | 139 | extern int cpu_to_core_id(int cpu); |
|---|
| 140 | + |
|---|
| 141 | +extern bool has_big_cores; |
|---|
| 142 | + |
|---|
| 143 | +#define cpu_smt_mask cpu_smt_mask |
|---|
| 144 | +#ifdef CONFIG_SCHED_SMT |
|---|
| 145 | +static inline const struct cpumask *cpu_smt_mask(int cpu) |
|---|
| 146 | +{ |
|---|
| 147 | + if (has_big_cores) |
|---|
| 148 | + return per_cpu(cpu_smallcore_map, cpu); |
|---|
| 149 | + |
|---|
| 150 | + return per_cpu(cpu_sibling_map, cpu); |
|---|
| 151 | +} |
|---|
| 152 | +#endif /* CONFIG_SCHED_SMT */ |
|---|
| 120 | 153 | |
|---|
| 121 | 154 | /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. |
|---|
| 122 | 155 | * |
|---|
| .. | .. |
|---|
| 162 | 195 | static inline void inhibit_secondary_onlining(void) {} |
|---|
| 163 | 196 | static inline void uninhibit_secondary_onlining(void) {} |
|---|
| 164 | 197 | static inline const struct cpumask *cpu_sibling_mask(int cpu) |
|---|
| 198 | +{ |
|---|
| 199 | + return cpumask_of(cpu); |
|---|
| 200 | +} |
|---|
| 201 | + |
|---|
| 202 | +static inline const struct cpumask *cpu_smallcore_mask(int cpu) |
|---|
| 165 | 203 | { |
|---|
| 166 | 204 | return cpumask_of(cpu); |
|---|
| 167 | 205 | } |
|---|
| .. | .. |
|---|
| 221 | 259 | * 64-bit but defining them all here doesn't harm |
|---|
| 222 | 260 | */ |
|---|
| 223 | 261 | extern void generic_secondary_smp_init(void); |
|---|
| 224 | | -extern void generic_secondary_thread_init(void); |
|---|
| 225 | 262 | extern unsigned long __secondary_hold_spinloop; |
|---|
| 226 | 263 | extern unsigned long __secondary_hold_acknowledge; |
|---|
| 227 | 264 | extern char __secondary_hold; |
|---|