.. | .. |
---|
117 | 117 | * cli/sti. Otherwise we use an even cheaper single atomic write |
---|
118 | 118 | * to the APIC. |
---|
119 | 119 | */ |
---|
| 120 | + unsigned long flags; |
---|
120 | 121 | unsigned int cfg; |
---|
121 | 122 | |
---|
| 123 | + flags = hard_cond_local_irq_save(); |
---|
122 | 124 | /* |
---|
123 | 125 | * Wait for idle. |
---|
124 | 126 | */ |
---|
.. | .. |
---|
137 | 139 | * Send the IPI. The write to APIC_ICR fires this off. |
---|
138 | 140 | */ |
---|
139 | 141 | native_apic_mem_write(APIC_ICR, cfg); |
---|
| 142 | + |
---|
| 143 | + hard_cond_local_irq_restore(flags); |
---|
140 | 144 | } |
---|
141 | 145 | |
---|
142 | 146 | /* |
---|
.. | .. |
---|
145 | 149 | */ |
---|
146 | 150 | void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest) |
---|
147 | 151 | { |
---|
| 152 | + unsigned long flags; |
---|
148 | 153 | unsigned long cfg; |
---|
149 | 154 | |
---|
| 155 | + flags = hard_cond_local_irq_save(); |
---|
150 | 156 | /* |
---|
151 | 157 | * Wait for idle. |
---|
152 | 158 | */ |
---|
.. | .. |
---|
170 | 176 | * Send the IPI. The write to APIC_ICR fires this off. |
---|
171 | 177 | */ |
---|
172 | 178 | native_apic_mem_write(APIC_ICR, cfg); |
---|
| 179 | + |
---|
| 180 | + hard_cond_local_irq_restore(flags); |
---|
173 | 181 | } |
---|
174 | 182 | |
---|
175 | 183 | void default_send_IPI_single_phys(int cpu, int vector) |
---|
176 | 184 | { |
---|
177 | 185 | unsigned long flags; |
---|
178 | 186 | |
---|
179 | | - local_irq_save(flags); |
---|
| 187 | + flags = hard_local_irq_save(); |
---|
180 | 188 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu), |
---|
181 | 189 | vector, APIC_DEST_PHYSICAL); |
---|
182 | | - local_irq_restore(flags); |
---|
| 190 | + hard_local_irq_restore(flags); |
---|
183 | 191 | } |
---|
184 | 192 | |
---|
185 | 193 | void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector) |
---|
.. | .. |
---|
192 | 200 | * to an arbitrary mask, so I do a unicast to each CPU instead. |
---|
193 | 201 | * - mbligh |
---|
194 | 202 | */ |
---|
195 | | - local_irq_save(flags); |
---|
| 203 | + flags = hard_local_irq_save(); |
---|
196 | 204 | for_each_cpu(query_cpu, mask) { |
---|
197 | 205 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, |
---|
198 | 206 | query_cpu), vector, APIC_DEST_PHYSICAL); |
---|
199 | 207 | } |
---|
200 | | - local_irq_restore(flags); |
---|
| 208 | + hard_local_irq_restore(flags); |
---|
201 | 209 | } |
---|
202 | 210 | |
---|
203 | 211 | void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, |
---|
.. | .. |
---|
209 | 217 | |
---|
210 | 218 | /* See Hack comment above */ |
---|
211 | 219 | |
---|
212 | | - local_irq_save(flags); |
---|
| 220 | + flags = hard_local_irq_save(); |
---|
213 | 221 | for_each_cpu(query_cpu, mask) { |
---|
214 | 222 | if (query_cpu == this_cpu) |
---|
215 | 223 | continue; |
---|
216 | 224 | __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, |
---|
217 | 225 | query_cpu), vector, APIC_DEST_PHYSICAL); |
---|
218 | 226 | } |
---|
219 | | - local_irq_restore(flags); |
---|
| 227 | + hard_local_irq_restore(flags); |
---|
220 | 228 | } |
---|
221 | 229 | |
---|
222 | 230 | /* |
---|
.. | .. |
---|
256 | 264 | * should be modified to do 1 message per cluster ID - mbligh |
---|
257 | 265 | */ |
---|
258 | 266 | |
---|
259 | | - local_irq_save(flags); |
---|
| 267 | + flags = hard_local_irq_save(); |
---|
260 | 268 | for_each_cpu(query_cpu, mask) |
---|
261 | 269 | __default_send_IPI_dest_field( |
---|
262 | 270 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
---|
263 | 271 | vector, apic->dest_logical); |
---|
264 | | - local_irq_restore(flags); |
---|
| 272 | + hard_local_irq_restore(flags); |
---|
265 | 273 | } |
---|
266 | 274 | |
---|
267 | 275 | void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask, |
---|
.. | .. |
---|
273 | 281 | |
---|
274 | 282 | /* See Hack comment above */ |
---|
275 | 283 | |
---|
276 | | - local_irq_save(flags); |
---|
| 284 | + flags = hard_local_irq_save(); |
---|
277 | 285 | for_each_cpu(query_cpu, mask) { |
---|
278 | 286 | if (query_cpu == this_cpu) |
---|
279 | 287 | continue; |
---|
.. | .. |
---|
281 | 289 | early_per_cpu(x86_cpu_to_logical_apicid, query_cpu), |
---|
282 | 290 | vector, apic->dest_logical); |
---|
283 | 291 | } |
---|
284 | | - local_irq_restore(flags); |
---|
| 292 | + hard_local_irq_restore(flags); |
---|
285 | 293 | } |
---|
286 | 294 | |
---|
287 | 295 | /* |
---|
.. | .. |
---|
295 | 303 | if (!mask) |
---|
296 | 304 | return; |
---|
297 | 305 | |
---|
298 | | - local_irq_save(flags); |
---|
| 306 | + flags = hard_local_irq_save(); |
---|
299 | 307 | WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]); |
---|
300 | 308 | __default_send_IPI_dest_field(mask, vector, apic->dest_logical); |
---|
301 | | - local_irq_restore(flags); |
---|
| 309 | + hard_local_irq_restore(flags); |
---|
302 | 310 | } |
---|
303 | 311 | |
---|
304 | 312 | /* must come after the send_IPI functions above for inlining */ |
---|