hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/x86/kernel/apic/ipi.c
....@@ -117,8 +117,10 @@
117117 * cli/sti. Otherwise we use an even cheaper single atomic write
118118 * to the APIC.
119119 */
120
+ unsigned long flags;
120121 unsigned int cfg;
121122
123
+ flags = hard_cond_local_irq_save();
122124 /*
123125 * Wait for idle.
124126 */
....@@ -137,6 +139,8 @@
137139 * Send the IPI. The write to APIC_ICR fires this off.
138140 */
139141 native_apic_mem_write(APIC_ICR, cfg);
142
+
143
+ hard_cond_local_irq_restore(flags);
140144 }
141145
142146 /*
....@@ -145,8 +149,10 @@
145149 */
146150 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
147151 {
152
+ unsigned long flags;
148153 unsigned long cfg;
149154
155
+ flags = hard_cond_local_irq_save();
150156 /*
151157 * Wait for idle.
152158 */
....@@ -170,16 +176,18 @@
170176 * Send the IPI. The write to APIC_ICR fires this off.
171177 */
172178 native_apic_mem_write(APIC_ICR, cfg);
179
+
180
+ hard_cond_local_irq_restore(flags);
173181 }
174182
175183 void default_send_IPI_single_phys(int cpu, int vector)
176184 {
177185 unsigned long flags;
178186
179
- local_irq_save(flags);
187
+ flags = hard_local_irq_save();
180188 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
181189 vector, APIC_DEST_PHYSICAL);
182
- local_irq_restore(flags);
190
+ hard_local_irq_restore(flags);
183191 }
184192
185193 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
....@@ -192,12 +200,12 @@
192200 * to an arbitrary mask, so I do a unicast to each CPU instead.
193201 * - mbligh
194202 */
195
- local_irq_save(flags);
203
+ flags = hard_local_irq_save();
196204 for_each_cpu(query_cpu, mask) {
197205 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198206 query_cpu), vector, APIC_DEST_PHYSICAL);
199207 }
200
- local_irq_restore(flags);
208
+ hard_local_irq_restore(flags);
201209 }
202210
203211 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
....@@ -209,14 +217,14 @@
209217
210218 /* See Hack comment above */
211219
212
- local_irq_save(flags);
220
+ flags = hard_local_irq_save();
213221 for_each_cpu(query_cpu, mask) {
214222 if (query_cpu == this_cpu)
215223 continue;
216224 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
217225 query_cpu), vector, APIC_DEST_PHYSICAL);
218226 }
219
- local_irq_restore(flags);
227
+ hard_local_irq_restore(flags);
220228 }
221229
222230 /*
....@@ -256,12 +264,12 @@
256264 * should be modified to do 1 message per cluster ID - mbligh
257265 */
258266
259
- local_irq_save(flags);
267
+ flags = hard_local_irq_save();
260268 for_each_cpu(query_cpu, mask)
261269 __default_send_IPI_dest_field(
262270 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
263271 vector, apic->dest_logical);
264
- local_irq_restore(flags);
272
+ hard_local_irq_restore(flags);
265273 }
266274
267275 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
....@@ -273,7 +281,7 @@
273281
274282 /* See Hack comment above */
275283
276
- local_irq_save(flags);
284
+ flags = hard_local_irq_save();
277285 for_each_cpu(query_cpu, mask) {
278286 if (query_cpu == this_cpu)
279287 continue;
....@@ -281,7 +289,7 @@
281289 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
282290 vector, apic->dest_logical);
283291 }
284
- local_irq_restore(flags);
292
+ hard_local_irq_restore(flags);
285293 }
286294
287295 /*
....@@ -295,10 +303,10 @@
295303 if (!mask)
296304 return;
297305
298
- local_irq_save(flags);
306
+ flags = hard_local_irq_save();
299307 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
300308 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
301
- local_irq_restore(flags);
309
+ hard_local_irq_restore(flags);
302310 }
303311
304312 /* must come after the send_IPI functions above for inlining */