| .. | .. |
|---|
| 39 | 39 | #endif |
|---|
| 40 | 40 | .endm |
|---|
| 41 | 41 | |
|---|
| 42 | + .macro user_exit_el0_irq |
|---|
| 43 | +#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) |
|---|
| 44 | + bl enter_el0_irq |
|---|
| 45 | +#endif |
|---|
| 46 | + .endm |
|---|
| 47 | + |
|---|
| 42 | 48 | .macro user_enter_irqoff |
|---|
| 43 | 49 | #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) |
|---|
| 44 | 50 | bl exit_to_user_mode |
|---|
| .. | .. |
|---|
| 534 | 540 | mov x24, scs_sp // preserve the original shadow stack |
|---|
| 535 | 541 | #endif |
|---|
| 536 | 542 | |
|---|
| 543 | +#ifdef CONFIG_DOVETAIL |
|---|
| 544 | + /* |
|---|
| 545 | + * When the pipeline is enabled, context switches over the irq |
|---|
| 546 | + * stack are allowed (for the co-kernel), and more interrupts |
|---|
| 547 | + * can be taken over sibling stack contexts. So we need a not so |
|---|
| 548 | + * subtle way of figuring out whether the irq stack was actually |
|---|
| 549 | + * exited, which cannot depend on the current task pointer. |
|---|
| 550 | + */ |
|---|
| 551 | + adr_this_cpu x25, irq_nesting, x26 |
|---|
| 552 | + ldr w26, [x25] |
|---|
| 553 | + cmp w26, #0 |
|---|
| 554 | + add w26, w26, #1 |
|---|
| 555 | + str w26, [x25] |
|---|
| 556 | + b.ne 9998f |
|---|
| 557 | +#else |
|---|
| 537 | 558 | /* |
|---|
| 538 | 559 | * Compare sp with the base of the task stack. |
|---|
| 539 | 560 | * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, |
|---|
| .. | .. |
|---|
| 543 | 564 | eor x25, x25, x19 |
|---|
| 544 | 565 | and x25, x25, #~(THREAD_SIZE - 1) |
|---|
| 545 | 566 | cbnz x25, 9998f |
|---|
| 567 | +#endif |
|---|
| 546 | 568 | |
|---|
| 547 | 569 | ldr_this_cpu x25, irq_stack_ptr, x26 |
|---|
| 548 | 570 | mov x26, #IRQ_STACK_SIZE |
|---|
| .. | .. |
|---|
| 563 | 585 | * The callee-saved regs (x19-x29) should be preserved between |
|---|
| 564 | 586 | * irq_stack_entry and irq_stack_exit, but note that kernel_entry |
|---|
| 565 | 587 | * uses x20-x23 to store data for later use. |
|---|
| 588 | + * IRQ_PIPELINE: caution, we have to preserve w0. |
|---|
| 566 | 589 | */ |
|---|
| 567 | 590 | .macro irq_stack_exit |
|---|
| 568 | 591 | mov sp, x19 |
|---|
| 569 | 592 | #ifdef CONFIG_SHADOW_CALL_STACK |
|---|
| 570 | 593 | mov scs_sp, x24 |
|---|
| 594 | +#endif |
|---|
| 595 | +#ifdef CONFIG_DOVETAIL |
|---|
| 596 | + adr_this_cpu x1, irq_nesting, x2 |
|---|
| 597 | + ldr w2, [x1] |
|---|
| 598 | + add w2, w2, #-1 |
|---|
| 599 | + str w2, [x1] |
|---|
| 571 | 600 | #endif |
|---|
| 572 | 601 | .endm |
|---|
| 573 | 602 | |
|---|
| .. | .. |
|---|
| 578 | 607 | * Interrupt handling. |
|---|
| 579 | 608 | */ |
|---|
| 580 | 609 | .macro irq_handler, handler:req |
|---|
| 610 | +#ifdef CONFIG_IRQ_PIPELINE |
|---|
| 611 | +# .if \handler == handle_arch_irq |
|---|
| 612 | + ldr x1, =handle_arch_irq_pipelined |
|---|
| 613 | +# .else |
|---|
| 614 | +# .error "irq_pipeline: cannot handle interrupt" |
|---|
| 615 | +# .endif |
|---|
| 616 | +#else |
|---|
| 581 | 617 | ldr_l x1, \handler |
|---|
| 618 | +#endif |
|---|
| 582 | 619 | mov x0, sp |
|---|
| 583 | 620 | irq_stack_entry |
|---|
| 584 | 621 | blr x1 |
|---|
| .. | .. |
|---|
| 616 | 653 | |
|---|
| 617 | 654 | irq_handler \handler |
|---|
| 618 | 655 | |
|---|
| 656 | +#ifdef CONFIG_IRQ_PIPELINE |
|---|
| 657 | + cbz w0, 66f // skip epilogue if oob or in-band stalled |
|---|
| 658 | +#endif |
|---|
| 619 | 659 | #ifdef CONFIG_PREEMPTION |
|---|
| 620 | 660 | ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count |
|---|
| 621 | 661 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING |
|---|
| .. | .. |
|---|
| 630 | 670 | bl arm64_preempt_schedule_irq // irq en/disable is done inside |
|---|
| 631 | 671 | 1: |
|---|
| 632 | 672 | #endif |
|---|
| 633 | | - |
|---|
| 673 | +66: |
|---|
| 634 | 674 | mov x0, sp |
|---|
| 635 | 675 | bl exit_el1_irq_or_nmi |
|---|
| 636 | 676 | .endm |
|---|
| 637 | 677 | |
|---|
| 638 | 678 | .macro el0_interrupt_handler, handler:req |
|---|
| 639 | | - user_exit_irqoff |
|---|
| 679 | + user_exit_el0_irq |
|---|
| 640 | 680 | enable_da_f |
|---|
| 641 | 681 | |
|---|
| 642 | 682 | tbz x22, #55, 1f |
|---|
| .. | .. |
|---|
| 815 | 855 | kernel_entry 0 |
|---|
| 816 | 856 | el0_irq_naked: |
|---|
| 817 | 857 | el0_interrupt_handler handle_arch_irq |
|---|
| 858 | +#ifdef CONFIG_IRQ_PIPELINE |
|---|
| 859 | + cbz w0, fast_ret_from_el0_irq // skip epilogue if oob |
|---|
| 860 | +#endif |
|---|
| 818 | 861 | b ret_to_user |
|---|
| 819 | 862 | SYM_CODE_END(el0_irq) |
|---|
| 820 | 863 | |
|---|
| .. | .. |
|---|
| 846 | 889 | SYM_CODE_START_LOCAL(ret_to_user) |
|---|
| 847 | 890 | disable_daif |
|---|
| 848 | 891 | gic_prio_kentry_setup tmp=x3 |
|---|
| 892 | +#ifdef CONFIG_IRQ_PIPELINE |
|---|
| 893 | + ldr x0, [tsk, #TSK_TI_LOCAL_FLAGS] |
|---|
| 894 | + tst x0, #_TLF_OOB |
|---|
| 895 | + b.ne fast_ret_to_user |
|---|
| 896 | +#endif |
|---|
| 849 | 897 | #ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 850 | 898 | bl trace_hardirqs_off |
|---|
| 851 | 899 | #endif |
|---|
| .. | .. |
|---|
| 854 | 902 | cbnz x2, work_pending |
|---|
| 855 | 903 | finish_ret_to_user: |
|---|
| 856 | 904 | user_enter_irqoff |
|---|
| 905 | +ret_to_user_naked: |
|---|
| 857 | 906 | enable_step_tsk x19, x2 |
|---|
| 858 | 907 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
|---|
| 859 | 908 | bl stackleak_erase |
|---|
| 860 | 909 | #endif |
|---|
| 861 | 910 | kernel_exit 0 |
|---|
| 862 | 911 | |
|---|
| 912 | +#ifdef CONFIG_IRQ_PIPELINE |
|---|
| 913 | +fast_ret_from_el0_irq: |
|---|
| 914 | + disable_daif |
|---|
| 915 | + gic_prio_kentry_setup tmp=x3 |
|---|
| 916 | +fast_ret_to_user: |
|---|
| 917 | + ldr x19, [tsk, #TSK_TI_FLAGS] |
|---|
| 918 | + b ret_to_user_naked |
|---|
| 919 | +#endif |
|---|
| 920 | + |
|---|
| 863 | 921 | /* |
|---|
| 864 | 922 | * Ok, we need to do extra processing, enter the slow path. |
|---|
| 865 | 923 | */ |
|---|