.. | .. |
---|
733 | 733 | btl $9, EFLAGS(%rsp) /* were interrupts off? */ |
---|
734 | 734 | jnc 1f |
---|
735 | 735 | 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
---|
| 736 | +#ifndef CONFIG_PREEMPT_LAZY |
---|
736 | 737 | jnz 1f |
---|
| 738 | +#else |
---|
| 739 | + jz do_preempt_schedule_irq |
---|
| 740 | + |
---|
| 741 | + # atleast preempt count == 0 ? |
---|
| 742 | + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) |
---|
| 743 | + jnz 1f |
---|
| 744 | + |
---|
| 745 | + movq PER_CPU_VAR(current_task), %rcx |
---|
| 746 | + cmpl $0, TASK_TI_preempt_lazy_count(%rcx) |
---|
| 747 | + jnz 1f |
---|
| 748 | + |
---|
| 749 | + btl $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) |
---|
| 750 | + jnc 1f |
---|
| 751 | +do_preempt_schedule_irq: |
---|
| 752 | +#endif |
---|
737 | 753 | call preempt_schedule_irq |
---|
738 | 754 | jmp 0b |
---|
739 | 755 | 1: |
---|
.. | .. |
---|
1084 | 1100 | jmp 2b |
---|
1085 | 1101 | .previous |
---|
1086 | 1102 | |
---|
| 1103 | +#ifndef CONFIG_PREEMPT_RT_FULL |
---|
1087 | 1104 | /* Call softirq on interrupt stack. Interrupts are off. */ |
---|
1088 | 1105 | ENTRY(do_softirq_own_stack) |
---|
1089 | 1106 | pushq %rbp |
---|
.. | .. |
---|
1094 | 1111 | leaveq |
---|
1095 | 1112 | ret |
---|
1096 | 1113 | ENDPROC(do_softirq_own_stack) |
---|
| 1114 | +#endif |
---|
1097 | 1115 | |
---|
1098 | 1116 | #ifdef CONFIG_XEN |
---|
1099 | 1117 | idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
---|