hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm64/kernel/entry.S
....@@ -39,6 +39,12 @@
3939 #endif
4040 .endm
4141
42
+ .macro user_exit_el0_irq
43
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
44
+ bl enter_el0_irq
45
+#endif
46
+ .endm
47
+
4248 .macro user_enter_irqoff
4349 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
4450 bl exit_to_user_mode
....@@ -534,6 +540,21 @@
534540 mov x24, scs_sp // preserve the original shadow stack
535541 #endif
536542
543
+#ifdef CONFIG_DOVETAIL
544
+ /*
545
+ * When the pipeline is enabled, context switches over the irq
546
+ * stack are allowed (for the co-kernel), and more interrupts
547
+ * can be taken over sibling stack contexts. So we need a not so
548
+ * subtle way of figuring out whether the irq stack was actually
549
+ * exited, which cannot depend on the current task pointer.
550
+ */
551
+ adr_this_cpu x25, irq_nesting, x26
552
+ ldr w26, [x25]
553
+ cmp w26, #0
554
+ add w26, w26, #1
555
+ str w26, [x25]
556
+ b.ne 9998f
557
+#else
537558 /*
538559 * Compare sp with the base of the task stack.
539560 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
....@@ -543,6 +564,7 @@
543564 eor x25, x25, x19
544565 and x25, x25, #~(THREAD_SIZE - 1)
545566 cbnz x25, 9998f
567
+#endif
546568
547569 ldr_this_cpu x25, irq_stack_ptr, x26
548570 mov x26, #IRQ_STACK_SIZE
....@@ -563,11 +585,18 @@
563585 * The callee-saved regs (x19-x29) should be preserved between
564586 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
565587 * uses x20-x23 to store data for later use.
588
+ * IRQ_PIPELINE: caution, we have to preserve w0.
566589 */
567590 .macro irq_stack_exit
568591 mov sp, x19
569592 #ifdef CONFIG_SHADOW_CALL_STACK
570593 mov scs_sp, x24
594
+#endif
595
+#ifdef CONFIG_DOVETAIL
596
+ adr_this_cpu x1, irq_nesting, x2
597
+ ldr w2, [x1]
598
+ add w2, w2, #-1
599
+ str w2, [x1]
571600 #endif
572601 .endm
573602
....@@ -578,7 +607,15 @@
578607 * Interrupt handling.
579608 */
580609 .macro irq_handler, handler:req
610
+#ifdef CONFIG_IRQ_PIPELINE
611
+# .if \handler == handle_arch_irq
612
+ ldr x1, =handle_arch_irq_pipelined
613
+# .else
614
+# .error "irq_pipeline: cannot handle interrupt"
615
+# .endif
616
+#else
581617 ldr_l x1, \handler
618
+#endif
582619 mov x0, sp
583620 irq_stack_entry
584621 blr x1
....@@ -616,6 +653,9 @@
616653
617654 irq_handler \handler
618655
656
+#ifdef CONFIG_IRQ_PIPELINE
657
+ cbz w0, 66f // skip epilogue if oob or in-band stalled
658
+#endif
619659 #ifdef CONFIG_PREEMPTION
620660 ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
621661 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
....@@ -630,13 +670,13 @@
630670 bl arm64_preempt_schedule_irq // irq en/disable is done inside
631671 1:
632672 #endif
633
-
673
+66:
634674 mov x0, sp
635675 bl exit_el1_irq_or_nmi
636676 .endm
637677
638678 .macro el0_interrupt_handler, handler:req
639
- user_exit_irqoff
679
+ user_exit_el0_irq
640680 enable_da_f
641681
642682 tbz x22, #55, 1f
....@@ -815,6 +855,9 @@
815855 kernel_entry 0
816856 el0_irq_naked:
817857 el0_interrupt_handler handle_arch_irq
858
+#ifdef CONFIG_IRQ_PIPELINE
859
+ cbz w0, fast_ret_from_el0_irq // skip epilogue if oob
860
+#endif
818861 b ret_to_user
819862 SYM_CODE_END(el0_irq)
820863
....@@ -846,6 +889,11 @@
846889 SYM_CODE_START_LOCAL(ret_to_user)
847890 disable_daif
848891 gic_prio_kentry_setup tmp=x3
892
+#ifdef CONFIG_IRQ_PIPELINE
893
+ ldr x0, [tsk, #TSK_TI_LOCAL_FLAGS]
894
+ tst x0, #_TLF_OOB
895
+ b.ne fast_ret_to_user
896
+#endif
849897 #ifdef CONFIG_TRACE_IRQFLAGS
850898 bl trace_hardirqs_off
851899 #endif
....@@ -854,12 +902,22 @@
854902 cbnz x2, work_pending
855903 finish_ret_to_user:
856904 user_enter_irqoff
905
+ret_to_user_naked:
857906 enable_step_tsk x19, x2
858907 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
859908 bl stackleak_erase
860909 #endif
861910 kernel_exit 0
862911
912
+#ifdef CONFIG_IRQ_PIPELINE
913
+fast_ret_from_el0_irq:
914
+ disable_daif
915
+ gic_prio_kentry_setup tmp=x3
916
+fast_ret_to_user:
917
+ ldr x19, [tsk, #TSK_TI_FLAGS]
918
+ b ret_to_user_naked
919
+#endif
920
+
863921 /*
864922 * Ok, we need to do extra processing, enter the slow path.
865923 */