hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/powerpc/kernel/irq.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Derived from arch/i386/kernel/irq.c
34 * Copyright (C) 1992 Linus Torvalds
....@@ -7,11 +8,6 @@
78 * Copyright (C) 1996-2001 Cort Dougan
89 * Adapted for Power Macintosh by Paul Mackerras
910 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10
- *
11
- * This program is free software; you can redistribute it and/or
12
- * modify it under the terms of the GNU General Public License
13
- * as published by the Free Software Foundation; either version
14
- * 2 of the License, or (at your option) any later version.
1511 *
1612 * This file contains the code used by various IRQ handling routines:
1713 * asking for different IRQ's should be done through these routines
....@@ -54,10 +50,11 @@
5450 #include <linux/debugfs.h>
5551 #include <linux/of.h>
5652 #include <linux/of_irq.h>
53
+#include <linux/vmalloc.h>
54
+#include <linux/pgtable.h>
5755
5856 #include <linux/uaccess.h>
5957 #include <asm/io.h>
60
-#include <asm/pgtable.h>
6158 #include <asm/irq.h>
6259 #include <asm/cache.h>
6360 #include <asm/prom.h>
....@@ -73,6 +70,7 @@
7370 #include <asm/paca.h>
7471 #include <asm/firmware.h>
7572 #include <asm/lv1call.h>
73
+#include <asm/dbell.h>
7674 #endif
7775 #define CREATE_TRACE_POINTS
7876 #include <asm/trace.h>
....@@ -81,10 +79,7 @@
8179 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
8280 EXPORT_PER_CPU_SYMBOL(irq_stat);
8381
84
-int __irq_offset_value;
85
-
8682 #ifdef CONFIG_PPC32
87
-EXPORT_SYMBOL(__irq_offset_value);
8883 atomic_t ppc_n_lost_interrupts;
8984
9085 #ifdef CONFIG_TAU_INT
....@@ -107,16 +102,10 @@
107102 return happened;
108103 }
109104
110
-static inline notrace int decrementer_check_overflow(void)
111
-{
112
- u64 now = get_tb_or_rtc();
113
- u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114
-
115
- return now >= *next_tb;
116
-}
105
+#ifdef CONFIG_PPC_BOOK3E
117106
118107 /* This is called whenever we are re-enabling interrupts
119
- * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
108
+ * and returns either 0 (nothing to do) or 500/900/280 if
120109 * there's an EE, DEC or DBELL to generate.
121110 *
122111 * This is called in two contexts: From arch_local_irq_restore()
....@@ -145,34 +134,71 @@
145134 trace_hardirqs_on();
146135 trace_hardirqs_off();
147136
148
- /*
149
- * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
150
- * not be set, which means interrupts have only just been hard
151
- * disabled as part of the local_irq_restore or interrupt return
152
- * code. In that case, skip the decrementr check becaus it's
153
- * expensive to read the TB.
154
- *
155
- * HARD_DIS then gets cleared here, but it's reconciled later.
156
- * Either local_irq_disable will replay the interrupt and that
157
- * will reconcile state like other hard interrupts. Or interrupt
158
- * retur will replay the interrupt and in that case it sets
159
- * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
160
- */
161
- if (happened & PACA_IRQ_HARD_DIS) {
137
+ if (happened & PACA_IRQ_DEC) {
138
+ local_paca->irq_happened &= ~PACA_IRQ_DEC;
139
+ return 0x900;
140
+ }
141
+
142
+ if (happened & PACA_IRQ_EE) {
143
+ local_paca->irq_happened &= ~PACA_IRQ_EE;
144
+ return 0x500;
145
+ }
146
+
147
+ if (happened & PACA_IRQ_DBELL) {
148
+ local_paca->irq_happened &= ~PACA_IRQ_DBELL;
149
+ return 0x280;
150
+ }
151
+
152
+ if (happened & PACA_IRQ_HARD_DIS)
162153 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
163154
164
- /*
165
- * We may have missed a decrementer interrupt if hard disabled.
166
- * Check the decrementer register in case we had a rollover
167
- * while hard disabled.
168
- */
169
- if (!(happened & PACA_IRQ_DEC)) {
170
- if (decrementer_check_overflow()) {
171
- local_paca->irq_happened |= PACA_IRQ_DEC;
172
- happened |= PACA_IRQ_DEC;
173
- }
174
- }
175
- }
155
+ /* There should be nothing left ! */
156
+ BUG_ON(local_paca->irq_happened != 0);
157
+
158
+ return 0;
159
+}
160
+
161
+/*
162
+ * This is specifically called by assembly code to re-enable interrupts
163
+ * if they are currently disabled. This is typically called before
164
+ * schedule() or do_signal() when returning to userspace. We do it
165
+ * in C to avoid the burden of dealing with lockdep etc...
166
+ *
167
+ * NOTE: This is called with interrupts hard disabled but not marked
168
+ * as such in paca->irq_happened, so we need to resync this.
169
+ */
170
+void notrace restore_interrupts(void)
171
+{
172
+ if (irqs_disabled()) {
173
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
174
+ local_irq_enable();
175
+ } else
176
+ __hard_irq_enable();
177
+}
178
+
179
+#endif /* CONFIG_PPC_BOOK3E */
180
+
181
+void replay_soft_interrupts(void)
182
+{
183
+ struct pt_regs regs;
184
+
185
+ /*
186
+ * Be careful here, calling these interrupt handlers can cause
187
+ * softirqs to be raised, which they may run when calling irq_exit,
188
+ * which will cause local_irq_enable() to be run, which can then
189
+ * recurse into this function. Don't keep any state across
190
+ * interrupt handler calls which may change underneath us.
191
+ *
192
+ * We use local_paca rather than get_paca() to avoid all the
193
+ * debug_smp_processor_id() business in this low level function.
194
+ */
195
+
196
+ ppc_save_regs(&regs);
197
+ regs.softe = IRQS_ENABLED;
198
+
199
+again:
200
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
201
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
176202
177203 /*
178204 * Force the delivery of pending soft-disabled interrupts on PS3.
....@@ -188,58 +214,89 @@
188214 * This is a higher priority interrupt than the others, so
189215 * replay it first.
190216 */
191
- if (happened & PACA_IRQ_HMI) {
217
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
192218 local_paca->irq_happened &= ~PACA_IRQ_HMI;
193
- return 0xe60;
219
+ regs.trap = 0xe60;
220
+ handle_hmi_exception(&regs);
221
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
222
+ hard_irq_disable();
194223 }
195224
196
- if (happened & PACA_IRQ_DEC) {
225
+ if (local_paca->irq_happened & PACA_IRQ_DEC) {
197226 local_paca->irq_happened &= ~PACA_IRQ_DEC;
198
- return 0x900;
227
+ regs.trap = 0x900;
228
+ timer_interrupt(&regs);
229
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
230
+ hard_irq_disable();
199231 }
200232
201
- if (happened & PACA_IRQ_PMI) {
202
- local_paca->irq_happened &= ~PACA_IRQ_PMI;
203
- return 0xf00;
204
- }
205
-
206
- if (happened & PACA_IRQ_EE) {
233
+ if (local_paca->irq_happened & PACA_IRQ_EE) {
207234 local_paca->irq_happened &= ~PACA_IRQ_EE;
208
- return 0x500;
235
+ regs.trap = 0x500;
236
+ do_IRQ(&regs);
237
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
238
+ hard_irq_disable();
209239 }
210240
211
-#ifdef CONFIG_PPC_BOOK3E
212
- /*
213
- * Check if an EPR external interrupt happened this bit is typically
214
- * set if we need to handle another "edge" interrupt from within the
215
- * MPIC "EPR" handler.
216
- */
217
- if (happened & PACA_IRQ_EE_EDGE) {
218
- local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
219
- return 0x500;
220
- }
221
-
222
- if (happened & PACA_IRQ_DBELL) {
241
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
223242 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
224
- return 0x280;
243
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E))
244
+ regs.trap = 0x280;
245
+ else
246
+ regs.trap = 0xa00;
247
+ doorbell_exception(&regs);
248
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
249
+ hard_irq_disable();
225250 }
226
-#else
227
- if (happened & PACA_IRQ_DBELL) {
228
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
229
- return 0xa00;
251
+
252
+ /* Book3E does not support soft-masking PMI interrupts */
253
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
254
+ local_paca->irq_happened &= ~PACA_IRQ_PMI;
255
+ regs.trap = 0xf00;
256
+ performance_monitor_exception(&regs);
257
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
258
+ hard_irq_disable();
230259 }
231
-#endif /* CONFIG_PPC_BOOK3E */
232260
233
- /* There should be nothing left ! */
234
- BUG_ON(local_paca->irq_happened != 0);
235
-
236
- return 0;
261
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
262
+ /*
263
+ * We are responding to the next interrupt, so interrupt-off
264
+ * latencies should be reset here.
265
+ */
266
+ trace_hardirqs_on();
267
+ trace_hardirqs_off();
268
+ goto again;
269
+ }
237270 }
271
+
272
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
273
+static inline void replay_soft_interrupts_irqrestore(void)
274
+{
275
+ unsigned long kuap_state = get_kuap();
276
+
277
+ /*
278
+ * Check if anything calls local_irq_enable/restore() when KUAP is
279
+ * disabled (user access enabled). We handle that case here by saving
280
+ * and re-locking AMR but we shouldn't get here in the first place,
281
+ * hence the warning.
282
+ */
283
+ kuap_check_amr();
284
+
285
+ if (kuap_state != AMR_KUAP_BLOCKED)
286
+ set_kuap(AMR_KUAP_BLOCKED);
287
+
288
+ replay_soft_interrupts();
289
+
290
+ if (kuap_state != AMR_KUAP_BLOCKED)
291
+ set_kuap(kuap_state);
292
+}
293
+#else
294
+#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
295
+#endif
238296
239297 notrace void arch_local_irq_restore(unsigned long mask)
240298 {
241299 unsigned char irq_happened;
242
- unsigned int replay;
243300
244301 /* Write the new soft-enabled value */
245302 irq_soft_mask_set(mask);
....@@ -261,31 +318,17 @@
261318 */
262319 irq_happened = get_irq_happened();
263320 if (!irq_happened) {
264
- /*
265
- * FIXME. Here we'd like to be able to do:
266
- *
267
- * #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
268
- * WARN_ON(!(mfmsr() & MSR_EE));
269
- * #endif
270
- *
271
- * But currently it hits in a few paths, we should fix those and
272
- * enable the warning.
273
- */
321
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
322
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
274323 return;
275324 }
276325
277
- /*
278
- * We need to hard disable to get a trusted value from
279
- * __check_irq_replay(). We also need to soft-disable
280
- * again to avoid warnings in there due to the use of
281
- * per-cpu variables.
282
- */
326
+ /* We need to hard disable to replay. */
283327 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
284
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
285
- WARN_ON(!(mfmsr() & MSR_EE));
286
-#endif
328
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
329
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
287330 __hard_irq_disable();
288
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
331
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
289332 } else {
290333 /*
291334 * We should already be hard disabled here. We had bugs
....@@ -293,56 +336,36 @@
293336 * warn if we are wrong. Only do that when IRQ tracing
294337 * is enabled as mfmsr() can be costly.
295338 */
296
- if (WARN_ON(mfmsr() & MSR_EE))
297
- __hard_irq_disable();
298
-#endif
339
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
340
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
341
+ __hard_irq_disable();
342
+ }
343
+
344
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
345
+ local_paca->irq_happened = 0;
346
+ __hard_irq_enable();
347
+ return;
348
+ }
299349 }
300350
351
+ /*
352
+ * Disable preempt here, so that the below preempt_enable will
353
+ * perform resched if required (a replayed interrupt may set
354
+ * need_resched).
355
+ */
356
+ preempt_disable();
301357 irq_soft_mask_set(IRQS_ALL_DISABLED);
302358 trace_hardirqs_off();
303359
304
- /*
305
- * Check if anything needs to be re-emitted. We haven't
306
- * soft-enabled yet to avoid warnings in decrementer_check_overflow
307
- * accessing per-cpu variables
308
- */
309
- replay = __check_irq_replay();
360
+ replay_soft_interrupts_irqrestore();
361
+ local_paca->irq_happened = 0;
310362
311
- /* We can soft-enable now */
312363 trace_hardirqs_on();
313364 irq_soft_mask_set(IRQS_ENABLED);
314
-
315
- /*
316
- * And replay if we have to. This will return with interrupts
317
- * hard-enabled.
318
- */
319
- if (replay) {
320
- __replay_interrupt(replay);
321
- return;
322
- }
323
-
324
- /* Finally, let's ensure we are hard enabled */
325365 __hard_irq_enable();
366
+ preempt_enable();
326367 }
327368 EXPORT_SYMBOL(arch_local_irq_restore);
328
-
329
-/*
330
- * This is specifically called by assembly code to re-enable interrupts
331
- * if they are currently disabled. This is typically called before
332
- * schedule() or do_signal() when returning to userspace. We do it
333
- * in C to avoid the burden of dealing with lockdep etc...
334
- *
335
- * NOTE: This is called with interrupts hard disabled but not marked
336
- * as such in paca->irq_happened, so we need to resync this.
337
- */
338
-void notrace restore_interrupts(void)
339
-{
340
- if (irqs_disabled()) {
341
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
342
- local_irq_enable();
343
- } else
344
- __hard_irq_enable();
345
-}
346369
347370 /*
348371 * This is a helper to use when about to go into idle low-power
....@@ -473,6 +496,19 @@
473496 return;
474497 }
475498
499
+ if (reason == PACA_IRQ_DBELL) {
500
+ /*
501
+ * When doorbell triggers a system reset wakeup, the message
502
+ * is not cleared, so if the doorbell interrupt is replayed
503
+ * and the IPI handled, the doorbell interrupt would still
504
+ * fire when EE is enabled.
505
+ *
506
+ * To avoid taking the superfluous doorbell interrupt,
507
+ * execute a msgclr here before the interrupt is replayed.
508
+ */
509
+ ppc_msgclr(PPC_DBELL_MSGTYPE);
510
+ }
511
+
476512 /*
477513 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
478514 * so this can be called unconditionally with the SRR1 wake
....@@ -554,13 +590,14 @@
554590 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
555591 seq_printf(p, " Machine check exceptions\n");
556592
593
+#ifdef CONFIG_PPC_BOOK3S_64
557594 if (cpu_has_feature(CPU_FTR_HVMODE)) {
558595 seq_printf(p, "%*s: ", prec, "HMI");
559596 for_each_online_cpu(j)
560
- seq_printf(p, "%10u ",
561
- per_cpu(irq_stat, j).hmi_exceptions);
597
+ seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
562598 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
563599 }
600
+#endif
564601
565602 seq_printf(p, "%*s: ", prec, "NMI");
566603 for_each_online_cpu(j)
....@@ -598,7 +635,9 @@
598635 sum += per_cpu(irq_stat, cpu).mce_exceptions;
599636 sum += per_cpu(irq_stat, cpu).spurious_irqs;
600637 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
601
- sum += per_cpu(irq_stat, cpu).hmi_exceptions;
638
+#ifdef CONFIG_PPC_BOOK3S_64
639
+ sum += paca_ptrs[cpu]->hmi_irqs;
640
+#endif
602641 sum += per_cpu(irq_stat, cpu).sreset_irqs;
603642 #ifdef CONFIG_PPC_WATCHDOG
604643 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
....@@ -612,18 +651,18 @@
612651
613652 static inline void check_stack_overflow(void)
614653 {
615
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
616654 long sp;
617655
618
- sp = current_stack_pointer() & (THREAD_SIZE-1);
656
+ if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
657
+ return;
658
+
659
+ sp = current_stack_pointer & (THREAD_SIZE - 1);
619660
620661 /* check for stack overflow: is there less than 2KB free? */
621
- if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
622
- pr_err("do_IRQ: stack overflow: %ld\n",
623
- sp - sizeof(struct thread_info));
662
+ if (unlikely(sp < 2048)) {
663
+ pr_err("do_IRQ: stack overflow: %ld\n", sp);
624664 dump_stack();
625665 }
626
-#endif
627666 }
628667
629668 void __do_irq(struct pt_regs *regs)
....@@ -658,133 +697,66 @@
658697 void do_IRQ(struct pt_regs *regs)
659698 {
660699 struct pt_regs *old_regs = set_irq_regs(regs);
661
- struct thread_info *curtp, *irqtp, *sirqtp;
700
+ void *cursp, *irqsp, *sirqsp;
662701
663702 /* Switch to the irq stack to handle this */
664
- curtp = current_thread_info();
665
- irqtp = hardirq_ctx[raw_smp_processor_id()];
666
- sirqtp = softirq_ctx[raw_smp_processor_id()];
703
+ cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
704
+ irqsp = hardirq_ctx[raw_smp_processor_id()];
705
+ sirqsp = softirq_ctx[raw_smp_processor_id()];
667706
668707 check_stack_overflow();
669708
670709 /* Already there ? */
671
- if (unlikely(curtp == irqtp || curtp == sirqtp)) {
710
+ if (unlikely(cursp == irqsp || cursp == sirqsp)) {
672711 __do_irq(regs);
673712 set_irq_regs(old_regs);
674713 return;
675714 }
676
-
677
- /* Prepare the thread_info in the irq stack */
678
- irqtp->task = curtp->task;
679
- irqtp->flags = 0;
680
-
681
- /* Copy the preempt_count so that the [soft]irq checks work. */
682
- irqtp->preempt_count = curtp->preempt_count;
683
-
684715 /* Switch stack and call */
685
- call_do_irq(regs, irqtp);
686
-
687
- /* Restore stack limit */
688
- irqtp->task = NULL;
689
-
690
- /* Copy back updates to the thread_info */
691
- if (irqtp->flags)
692
- set_bits(irqtp->flags, &curtp->flags);
716
+ call_do_irq(regs, irqsp);
693717
694718 set_irq_regs(old_regs);
695719 }
696720
697
-void __init init_IRQ(void)
721
+static void *__init alloc_vm_stack(void)
698722 {
699
- if (ppc_md.init_IRQ)
700
- ppc_md.init_IRQ();
701
-
702
- exc_lvl_ctx_init();
703
-
704
- irq_ctx_init();
723
+ return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
724
+ NUMA_NO_NODE, (void *)_RET_IP_);
705725 }
706726
707
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
708
-struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
709
-struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
710
-struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
711
-
712
-void exc_lvl_ctx_init(void)
727
+static void __init vmap_irqstack_init(void)
713728 {
714
- struct thread_info *tp;
715
- int i, cpu_nr;
716
-
717
- for_each_possible_cpu(i) {
718
-#ifdef CONFIG_PPC64
719
- cpu_nr = i;
720
-#else
721
-#ifdef CONFIG_SMP
722
- cpu_nr = get_hard_smp_processor_id(i);
723
-#else
724
- cpu_nr = 0;
725
-#endif
726
-#endif
727
-
728
- memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
729
- tp = critirq_ctx[cpu_nr];
730
- tp->cpu = cpu_nr;
731
- tp->preempt_count = 0;
732
-
733
-#ifdef CONFIG_BOOKE
734
- memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
735
- tp = dbgirq_ctx[cpu_nr];
736
- tp->cpu = cpu_nr;
737
- tp->preempt_count = 0;
738
-
739
- memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
740
- tp = mcheckirq_ctx[cpu_nr];
741
- tp->cpu = cpu_nr;
742
- tp->preempt_count = HARDIRQ_OFFSET;
743
-#endif
744
- }
745
-}
746
-#endif
747
-
748
-struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
749
-struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
750
-
751
-void irq_ctx_init(void)
752
-{
753
- struct thread_info *tp;
754729 int i;
755730
756731 for_each_possible_cpu(i) {
757
- memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
758
- tp = softirq_ctx[i];
759
- tp->cpu = i;
760
- klp_init_thread_info(tp);
761
-
762
- memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
763
- tp = hardirq_ctx[i];
764
- tp->cpu = i;
765
- klp_init_thread_info(tp);
732
+ softirq_ctx[i] = alloc_vm_stack();
733
+ hardirq_ctx[i] = alloc_vm_stack();
766734 }
767735 }
768736
769
-#ifndef CONFIG_PREEMPT_RT_FULL
737
+
738
+void __init init_IRQ(void)
739
+{
740
+ if (IS_ENABLED(CONFIG_VMAP_STACK))
741
+ vmap_irqstack_init();
742
+
743
+ if (ppc_md.init_IRQ)
744
+ ppc_md.init_IRQ();
745
+}
746
+
747
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
748
+void *critirq_ctx[NR_CPUS] __read_mostly;
749
+void *dbgirq_ctx[NR_CPUS] __read_mostly;
750
+void *mcheckirq_ctx[NR_CPUS] __read_mostly;
751
+#endif
752
+
753
+void *softirq_ctx[NR_CPUS] __read_mostly;
754
+void *hardirq_ctx[NR_CPUS] __read_mostly;
755
+
770756 void do_softirq_own_stack(void)
771757 {
772
- struct thread_info *curtp, *irqtp;
773
-
774
- curtp = current_thread_info();
775
- irqtp = softirq_ctx[smp_processor_id()];
776
- irqtp->task = curtp->task;
777
- irqtp->flags = 0;
778
- call_do_softirq(irqtp);
779
- irqtp->task = NULL;
780
-
781
- /* Set any flag that may have been set on the
782
- * alternate stack
783
- */
784
- if (irqtp->flags)
785
- set_bits(irqtp->flags, &curtp->flags);
758
+ call_do_softirq(softirq_ctx[smp_processor_id()]);
786759 }
787
-#endif
788760
789761 irq_hw_number_t virq_to_hw(unsigned int virq)
790762 {
....@@ -828,11 +800,6 @@
828800 return hard_smp_processor_id();
829801 }
830802 #endif
831
-
832
-int arch_early_irq_init(void)
833
-{
834
- return 0;
835
-}
836803
837804 #ifdef CONFIG_PPC64
838805 static int __init setup_noirqdistrib(char *str)