.. | .. |
---|
2045 | 2045 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
---|
2046 | 2046 | goto out; |
---|
2047 | 2047 | |
---|
| 2048 | + inband_migration_notify(p, dest_cpu); |
---|
2048 | 2049 | if (task_running(rq, p) || p->state == TASK_WAKING) { |
---|
2049 | 2050 | struct migration_arg arg = { p, dest_cpu }; |
---|
2050 | 2051 | /* Need help from migration thread: drop lock and wait. */ |
---|
.. | .. |
---|
3065 | 3066 | * - we're serialized against set_special_state() by virtue of |
---|
3066 | 3067 | * it disabling IRQs (this allows not taking ->pi_lock). |
---|
3067 | 3068 | */ |
---|
3068 | | - if (!(p->state & state)) |
---|
| 3069 | + if (!(p->state & state) || task_is_off_stage(p)) |
---|
3069 | 3070 | goto out; |
---|
3070 | 3071 | |
---|
3071 | 3072 | success = 1; |
---|
.. | .. |
---|
3083 | 3084 | */ |
---|
3084 | 3085 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
---|
3085 | 3086 | smp_mb__after_spinlock(); |
---|
3086 | | - if (!(p->state & state)) |
---|
| 3087 | + if (!(p->state & state) || task_is_off_stage(p)) |
---|
3087 | 3088 | goto unlock; |
---|
3088 | 3089 | |
---|
3089 | 3090 | #ifdef CONFIG_FREEZER |
---|
.. | .. |
---|
3348 | 3349 | init_numa_balancing(clone_flags, p); |
---|
3349 | 3350 | #ifdef CONFIG_SMP |
---|
3350 | 3351 | p->wake_entry.u_flags = CSD_TYPE_TTWU; |
---|
| 3352 | +#endif |
---|
| 3353 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 3354 | + init_task_stall_bits(p); |
---|
3351 | 3355 | #endif |
---|
3352 | 3356 | } |
---|
3353 | 3357 | |
---|
.. | .. |
---|
3816 | 3820 | rseq_preempt(prev); |
---|
3817 | 3821 | fire_sched_out_preempt_notifiers(prev, next); |
---|
3818 | 3822 | prepare_task(next); |
---|
| 3823 | + prepare_inband_switch(next); |
---|
| 3824 | + /* |
---|
| 3825 | + * Do not fold the following hard irqs disabling into |
---|
| 3826 | + * prepare_inband_switch(), this is required when pipelining |
---|
| 3827 | + * interrupts, not only by alternate scheduling. |
---|
| 3828 | + */ |
---|
| 3829 | + hard_cond_local_irq_disable(); |
---|
3819 | 3830 | prepare_arch_switch(next); |
---|
3820 | 3831 | } |
---|
3821 | 3832 | |
---|
.. | .. |
---|
3973 | 3984 | * finish_task_switch() will drop rq->lock() and lower preempt_count |
---|
3974 | 3985 | * and the preempt_enable() will end up enabling preemption (on |
---|
3975 | 3986 | * PREEMPT_COUNT kernels). |
---|
| 3987 | + * |
---|
| 3988 | + * If interrupts are pipelined, we may enable hard irqs since |
---|
| 3989 | + * the in-band stage is stalled. If dovetailing is enabled |
---|
| 3990 | + * too, schedule_tail() is the place where transitions of |
---|
| 3991 | + * tasks from the in-band to the oob stage completes. The |
---|
| 3992 | + * companion core is notified that 'prev' is now suspended in |
---|
| 3993 | + * the in-band stage, and can be safely resumed in the oob |
---|
| 3994 | + * stage. |
---|
3976 | 3995 | */ |
---|
3977 | 3996 | |
---|
| 3997 | + WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled()); |
---|
| 3998 | + hard_cond_local_irq_enable(); |
---|
| 3999 | + oob_trampoline(); |
---|
3978 | 4000 | rq = finish_task_switch(prev); |
---|
3979 | 4001 | balance_callback(rq); |
---|
3980 | 4002 | preempt_enable(); |
---|
.. | .. |
---|
4028 | 4050 | */ |
---|
4029 | 4051 | switch_mm_irqs_off(prev->active_mm, next->mm, next); |
---|
4030 | 4052 | |
---|
| 4053 | + /* |
---|
| 4054 | + * If dovetail is enabled, insert a short window of |
---|
| 4055 | + * opportunity for preemption by out-of-band IRQs |
---|
| 4056 | + * before finalizing the context switch. |
---|
| 4057 | + * dovetail_context_switch() can deal with preempting |
---|
| 4058 | + * partially switched in-band contexts. |
---|
| 4059 | + */ |
---|
| 4060 | + if (dovetailing()) { |
---|
| 4061 | + struct mm_struct *oldmm = prev->active_mm; |
---|
| 4062 | + prev->active_mm = next->mm; |
---|
| 4063 | + hard_local_irq_sync(); |
---|
| 4064 | + prev->active_mm = oldmm; |
---|
| 4065 | + } |
---|
| 4066 | + |
---|
4031 | 4067 | if (!prev->mm) { // from kernel |
---|
4032 | 4068 | /* will mmdrop() in finish_task_switch(). */ |
---|
4033 | 4069 | rq->prev_mm = prev->active_mm; |
---|
.. | .. |
---|
4042 | 4078 | /* Here we just switch the register state and the stack. */ |
---|
4043 | 4079 | switch_to(prev, next, prev); |
---|
4044 | 4080 | barrier(); |
---|
| 4081 | + |
---|
| 4082 | + /* |
---|
| 4083 | + * If 'next' is on its way to the oob stage, don't run the |
---|
| 4084 | + * context switch epilogue just yet. We will do that at some |
---|
| 4085 | + * point later, when the task switches back to the in-band |
---|
| 4086 | + * stage. |
---|
| 4087 | + */ |
---|
| 4088 | + if (unlikely(inband_switch_tail())) |
---|
| 4089 | + return NULL; |
---|
4045 | 4090 | |
---|
4046 | 4091 | return finish_task_switch(prev); |
---|
4047 | 4092 | } |
---|
.. | .. |
---|
4557 | 4602 | panic("corrupted shadow stack detected inside scheduler\n"); |
---|
4558 | 4603 | #endif |
---|
4559 | 4604 | |
---|
| 4605 | + check_inband_stage(); |
---|
| 4606 | + |
---|
4560 | 4607 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
---|
4561 | 4608 | if (!preempt && prev->state && prev->non_block_count) { |
---|
4562 | 4609 | printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", |
---|
.. | .. |
---|
4682 | 4729 | * |
---|
4683 | 4730 | * WARNING: must be called with preemption disabled! |
---|
4684 | 4731 | */ |
---|
4685 | | -static void __sched notrace __schedule(bool preempt) |
---|
| 4732 | +static int __sched notrace __schedule(bool preempt) |
---|
4686 | 4733 | { |
---|
4687 | 4734 | struct task_struct *prev, *next; |
---|
4688 | 4735 | unsigned long *switch_count; |
---|
.. | .. |
---|
4802 | 4849 | |
---|
4803 | 4850 | /* Also unlocks the rq: */ |
---|
4804 | 4851 | rq = context_switch(rq, prev, next, &rf); |
---|
| 4852 | + if (dovetailing() && rq == NULL) |
---|
| 4853 | + /* Task moved to the oob stage. */ |
---|
| 4854 | + return 1; |
---|
4805 | 4855 | } else { |
---|
4806 | 4856 | rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP); |
---|
4807 | 4857 | rq_unlock_irq(rq, &rf); |
---|
4808 | 4858 | } |
---|
4809 | 4859 | |
---|
4810 | 4860 | balance_callback(rq); |
---|
| 4861 | + |
---|
| 4862 | + return 0; |
---|
4811 | 4863 | } |
---|
4812 | 4864 | |
---|
4813 | 4865 | void __noreturn do_task_dead(void) |
---|
.. | .. |
---|
4879 | 4931 | sched_submit_work(tsk); |
---|
4880 | 4932 | do { |
---|
4881 | 4933 | preempt_disable(); |
---|
4882 | | - __schedule(false); |
---|
| 4934 | + if (__schedule(false)) |
---|
| 4935 | + return; |
---|
4883 | 4936 | sched_preempt_enable_no_resched(); |
---|
4884 | 4937 | } while (need_resched()); |
---|
4885 | 4938 | sched_update_worker(tsk); |
---|
.. | .. |
---|
4960 | 5013 | */ |
---|
4961 | 5014 | preempt_disable_notrace(); |
---|
4962 | 5015 | preempt_latency_start(1); |
---|
4963 | | - __schedule(true); |
---|
| 5016 | + if (__schedule(true)) |
---|
| 5017 | + return; |
---|
4964 | 5018 | preempt_latency_stop(1); |
---|
4965 | 5019 | preempt_enable_no_resched_notrace(); |
---|
4966 | 5020 | |
---|
.. | .. |
---|
4982 | 5036 | * If there is a non-zero preempt_count or interrupts are disabled, |
---|
4983 | 5037 | * we do not want to preempt the current task. Just return.. |
---|
4984 | 5038 | */ |
---|
4985 | | - if (likely(!preemptible())) |
---|
| 5039 | + if (likely(!running_inband() || !preemptible())) |
---|
4986 | 5040 | return; |
---|
4987 | 5041 | |
---|
4988 | 5042 | preempt_schedule_common(); |
---|
.. | .. |
---|
5008 | 5062 | { |
---|
5009 | 5063 | enum ctx_state prev_ctx; |
---|
5010 | 5064 | |
---|
5011 | | - if (likely(!preemptible())) |
---|
| 5065 | + if (likely(!running_inband() || !preemptible())) |
---|
5012 | 5066 | return; |
---|
5013 | 5067 | |
---|
5014 | 5068 | do { |
---|
.. | .. |
---|
5049 | 5103 | * off of irq context. |
---|
5050 | 5104 | * Note, that this is called and return with irqs disabled. This will |
---|
5051 | 5105 | * protect us against recursive calling from irq. |
---|
| 5106 | + * |
---|
| 5107 | + * IRQ pipeline: we are called with hard irqs off, synchronize the |
---|
| 5108 | + * pipeline then return the same way, so that the in-band log is |
---|
| 5109 | + * guaranteed empty and further interrupt delivery is postponed by the |
---|
| 5110 | + * hardware until have exited the kernel. |
---|
5052 | 5111 | */ |
---|
5053 | 5112 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
---|
5054 | 5113 | { |
---|
5055 | 5114 | enum ctx_state prev_state; |
---|
| 5115 | + |
---|
| 5116 | + if (irq_pipeline_debug()) { |
---|
| 5117 | + /* Catch any weirdness in pipelined entry code. */ |
---|
| 5118 | + if (WARN_ON_ONCE(!running_inband())) |
---|
| 5119 | + return; |
---|
| 5120 | + WARN_ON_ONCE(!hard_irqs_disabled()); |
---|
| 5121 | + } |
---|
| 5122 | + |
---|
| 5123 | + hard_cond_local_irq_enable(); |
---|
5056 | 5124 | |
---|
5057 | 5125 | /* Catch callers which need to be fixed */ |
---|
5058 | 5126 | BUG_ON(preempt_count() || !irqs_disabled()); |
---|
5059 | 5127 | |
---|
5060 | 5128 | prev_state = exception_enter(); |
---|
5061 | 5129 | |
---|
5062 | | - do { |
---|
| 5130 | + for (;;) { |
---|
5063 | 5131 | preempt_disable(); |
---|
5064 | 5132 | local_irq_enable(); |
---|
5065 | 5133 | __schedule(true); |
---|
| 5134 | + sync_inband_irqs(); |
---|
5066 | 5135 | local_irq_disable(); |
---|
5067 | 5136 | sched_preempt_enable_no_resched(); |
---|
5068 | | - } while (need_resched()); |
---|
| 5137 | + if (!need_resched()) |
---|
| 5138 | + break; |
---|
| 5139 | + hard_cond_local_irq_enable(); |
---|
| 5140 | + } |
---|
5069 | 5141 | |
---|
5070 | 5142 | exception_exit(prev_state); |
---|
5071 | 5143 | } |
---|
.. | .. |
---|
8892 | 8964 | |
---|
8893 | 8965 | #endif /* CONFIG_CGROUP_SCHED */ |
---|
8894 | 8966 | |
---|
| 8967 | +#ifdef CONFIG_DOVETAIL |
---|
| 8968 | + |
---|
| 8969 | +int dovetail_leave_inband(void) |
---|
| 8970 | +{ |
---|
| 8971 | + struct task_struct *p = current; |
---|
| 8972 | + struct irq_pipeline_data *pd; |
---|
| 8973 | + unsigned long flags; |
---|
| 8974 | + |
---|
| 8975 | + preempt_disable(); |
---|
| 8976 | + |
---|
| 8977 | + pd = raw_cpu_ptr(&irq_pipeline); |
---|
| 8978 | + |
---|
| 8979 | + if (WARN_ON_ONCE(dovetail_debug() && pd->task_inflight)) |
---|
| 8980 | + goto out; /* Paranoid. */ |
---|
| 8981 | + |
---|
| 8982 | + raw_spin_lock_irqsave(&p->pi_lock, flags); |
---|
| 8983 | + pd->task_inflight = p; |
---|
| 8984 | + /* |
---|
| 8985 | + * The scope of the off-stage state is broader than _TLF_OOB, |
---|
| 8986 | + * in that it includes the transition path from the in-band |
---|
| 8987 | + * context to the oob stage. |
---|
| 8988 | + */ |
---|
| 8989 | + set_thread_local_flags(_TLF_OFFSTAGE); |
---|
| 8990 | + set_current_state(TASK_INTERRUPTIBLE); |
---|
| 8991 | + raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
---|
| 8992 | + sched_submit_work(p); |
---|
| 8993 | + /* |
---|
| 8994 | + * The current task is scheduled out from the inband stage, |
---|
| 8995 | + * before resuming on the oob stage. Since this code stands |
---|
| 8996 | + * for the scheduling tail of the oob scheduler, |
---|
| 8997 | + * arch_dovetail_switch_finish() is called to perform |
---|
| 8998 | + * architecture-specific fixups (e.g. fpu context reload). |
---|
| 8999 | + */ |
---|
| 9000 | + if (likely(__schedule(false))) { |
---|
| 9001 | + arch_dovetail_switch_finish(false); |
---|
| 9002 | + return 0; |
---|
| 9003 | + } |
---|
| 9004 | + |
---|
| 9005 | + clear_thread_local_flags(_TLF_OFFSTAGE); |
---|
| 9006 | + pd->task_inflight = NULL; |
---|
| 9007 | +out: |
---|
| 9008 | + preempt_enable(); |
---|
| 9009 | + |
---|
| 9010 | + return -ERESTARTSYS; |
---|
| 9011 | +} |
---|
| 9012 | +EXPORT_SYMBOL_GPL(dovetail_leave_inband); |
---|
| 9013 | + |
---|
| 9014 | +void dovetail_resume_inband(void) |
---|
| 9015 | +{ |
---|
| 9016 | + struct task_struct *p; |
---|
| 9017 | + struct rq *rq; |
---|
| 9018 | + |
---|
| 9019 | + p = __this_cpu_read(irq_pipeline.rqlock_owner); |
---|
| 9020 | + if (WARN_ON_ONCE(dovetail_debug() && p == NULL)) |
---|
| 9021 | + return; |
---|
| 9022 | + |
---|
| 9023 | + if (WARN_ON_ONCE(dovetail_debug() && (preempt_count() & STAGE_MASK))) |
---|
| 9024 | + return; |
---|
| 9025 | + |
---|
| 9026 | + rq = finish_task_switch(p); |
---|
| 9027 | + balance_callback(rq); |
---|
| 9028 | + preempt_enable(); |
---|
| 9029 | + oob_trampoline(); |
---|
| 9030 | +} |
---|
| 9031 | +EXPORT_SYMBOL_GPL(dovetail_resume_inband); |
---|
| 9032 | + |
---|
| 9033 | +#ifdef CONFIG_KVM |
---|
| 9034 | + |
---|
| 9035 | +#include <linux/kvm_host.h> |
---|
| 9036 | + |
---|
| 9037 | +static inline void notify_guest_preempt(void) |
---|
| 9038 | +{ |
---|
| 9039 | + struct kvm_oob_notifier *nfy; |
---|
| 9040 | + struct irq_pipeline_data *p; |
---|
| 9041 | + |
---|
| 9042 | + p = raw_cpu_ptr(&irq_pipeline); |
---|
| 9043 | + nfy = p->vcpu_notify; |
---|
| 9044 | + if (unlikely(nfy)) |
---|
| 9045 | + nfy->handler(nfy); |
---|
| 9046 | +} |
---|
| 9047 | +#else |
---|
| 9048 | +static inline void notify_guest_preempt(void) |
---|
| 9049 | +{ } |
---|
| 9050 | +#endif |
---|
| 9051 | + |
---|
| 9052 | +bool dovetail_context_switch(struct dovetail_altsched_context *out, |
---|
| 9053 | + struct dovetail_altsched_context *in, |
---|
| 9054 | + bool leave_inband) |
---|
| 9055 | +{ |
---|
| 9056 | + unsigned long pc __maybe_unused, lockdep_irqs; |
---|
| 9057 | + struct task_struct *next, *prev, *last; |
---|
| 9058 | + struct mm_struct *prev_mm, *next_mm; |
---|
| 9059 | + bool inband_tail = false; |
---|
| 9060 | + |
---|
| 9061 | + WARN_ON_ONCE(dovetail_debug() && on_pipeline_entry()); |
---|
| 9062 | + |
---|
| 9063 | + if (leave_inband) { |
---|
| 9064 | + struct task_struct *tsk = current; |
---|
| 9065 | + /* |
---|
| 9066 | + * We are about to leave the current inband context |
---|
| 9067 | + * for switching to an out-of-band task, save the |
---|
| 9068 | + * preempted context information. |
---|
| 9069 | + */ |
---|
| 9070 | + out->task = tsk; |
---|
| 9071 | + out->active_mm = tsk->active_mm; |
---|
| 9072 | + /* |
---|
| 9073 | + * Switching out-of-band may require some housekeeping |
---|
| 9074 | + * from a kernel VM which might currently run guest |
---|
| 9075 | + * code, notify it about the upcoming preemption. |
---|
| 9076 | + */ |
---|
| 9077 | + notify_guest_preempt(); |
---|
| 9078 | + } |
---|
| 9079 | + |
---|
| 9080 | + arch_dovetail_switch_prepare(leave_inband); |
---|
| 9081 | + |
---|
| 9082 | + next = in->task; |
---|
| 9083 | + prev = out->task; |
---|
| 9084 | + prev_mm = out->active_mm; |
---|
| 9085 | + next_mm = in->active_mm; |
---|
| 9086 | + |
---|
| 9087 | + if (next_mm == NULL) { |
---|
| 9088 | + in->active_mm = prev_mm; |
---|
| 9089 | + in->borrowed_mm = true; |
---|
| 9090 | + enter_lazy_tlb(prev_mm, next); |
---|
| 9091 | + } else { |
---|
| 9092 | + switch_oob_mm(prev_mm, next_mm, next); |
---|
| 9093 | + /* |
---|
| 9094 | + * We might be switching back to the inband context |
---|
| 9095 | + * which we preempted earlier, shortly after "current" |
---|
| 9096 | + * dropped its mm context in the do_exit() path |
---|
| 9097 | + * (next->mm == NULL). In such a case, a lazy TLB |
---|
| 9098 | + * state is expected when leaving the mm. |
---|
| 9099 | + */ |
---|
| 9100 | + if (next->mm == NULL) |
---|
| 9101 | + enter_lazy_tlb(prev_mm, next); |
---|
| 9102 | + } |
---|
| 9103 | + |
---|
| 9104 | + if (out->borrowed_mm) { |
---|
| 9105 | + out->borrowed_mm = false; |
---|
| 9106 | + out->active_mm = NULL; |
---|
| 9107 | + } |
---|
| 9108 | + |
---|
| 9109 | + /* |
---|
| 9110 | + * Tasks running out-of-band may alter the (in-band) |
---|
| 9111 | + * preemption count as long as they don't trigger an in-band |
---|
| 9112 | + * rescheduling, which Dovetail properly blocks. |
---|
| 9113 | + * |
---|
| 9114 | + * If the preemption count is not stack-based but a global |
---|
| 9115 | + * per-cpu variable instead, changing it has a globally |
---|
| 9116 | + * visible side-effect though, which is a problem if the |
---|
| 9117 | + * out-of-band task is preempted and schedules away before the |
---|
| 9118 | + * change is rolled back: this may cause the in-band context |
---|
| 9119 | + * to later resume with a broken preemption count. |
---|
| 9120 | + * |
---|
| 9121 | + * For this reason, the preemption count of any context which |
---|
| 9122 | + * blocks from the out-of-band stage is carried over and |
---|
| 9123 | + * restored across switches, emulating a stack-based |
---|
| 9124 | + * storage. |
---|
| 9125 | + * |
---|
| 9126 | + * Eventually, the count is reset to FORK_PREEMPT_COUNT upon |
---|
| 9127 | + * transition from out-of-band to in-band stage, reinstating |
---|
| 9128 | + * the value in effect when the converse transition happened |
---|
| 9129 | + * at some point before. |
---|
| 9130 | + */ |
---|
| 9131 | + if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) |
---|
| 9132 | + pc = preempt_count(); |
---|
| 9133 | + |
---|
| 9134 | + /* |
---|
| 9135 | + * Like the preemption count and for the same reason, the irq |
---|
| 9136 | + * state maintained by lockdep must be preserved across |
---|
| 9137 | + * switches. |
---|
| 9138 | + */ |
---|
| 9139 | + lockdep_irqs = lockdep_read_irqs_state(); |
---|
| 9140 | + |
---|
| 9141 | + switch_to(prev, next, last); |
---|
| 9142 | + barrier(); |
---|
| 9143 | + |
---|
| 9144 | + if (check_hard_irqs_disabled()) |
---|
| 9145 | + hard_local_irq_disable(); |
---|
| 9146 | + |
---|
| 9147 | + /* |
---|
| 9148 | + * If we entered this routine for switching to an out-of-band |
---|
| 9149 | + * task but don't have _TLF_OOB set for the current context |
---|
| 9150 | + * when resuming, this portion of code is the switch tail of |
---|
| 9151 | + * the inband schedule() routine, finalizing a transition to |
---|
| 9152 | + * the inband stage for the current task. Update the stage |
---|
| 9153 | + * level as/if required. |
---|
| 9154 | + */ |
---|
| 9155 | + if (unlikely(!leave_inband && !test_thread_local_flags(_TLF_OOB))) { |
---|
| 9156 | + if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) |
---|
| 9157 | + preempt_count_set(FORK_PREEMPT_COUNT); |
---|
| 9158 | + else if (unlikely(dovetail_debug() && |
---|
| 9159 | + !(preempt_count() & STAGE_MASK))) |
---|
| 9160 | + WARN_ON_ONCE(1); |
---|
| 9161 | + else |
---|
| 9162 | + preempt_count_sub(STAGE_OFFSET); |
---|
| 9163 | + |
---|
| 9164 | + lockdep_write_irqs_state(lockdep_irqs); |
---|
| 9165 | + |
---|
| 9166 | + /* |
---|
| 9167 | + * Fixup the interrupt state conversely to what |
---|
| 9168 | + * inband_switch_tail() does for the opposite stage |
---|
| 9169 | + * switching direction. |
---|
| 9170 | + */ |
---|
| 9171 | + stall_inband(); |
---|
| 9172 | + trace_hardirqs_off(); |
---|
| 9173 | + inband_tail = true; |
---|
| 9174 | + } else { |
---|
| 9175 | + if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) |
---|
| 9176 | + preempt_count_set(pc); |
---|
| 9177 | + |
---|
| 9178 | + lockdep_write_irqs_state(lockdep_irqs); |
---|
| 9179 | + } |
---|
| 9180 | + |
---|
| 9181 | + arch_dovetail_switch_finish(leave_inband); |
---|
| 9182 | + |
---|
| 9183 | + /* |
---|
| 9184 | + * inband_tail is true whenever we are finalizing a transition |
---|
| 9185 | + * to the inband stage from the oob context for current. See |
---|
| 9186 | + * above. |
---|
| 9187 | + */ |
---|
| 9188 | + return inband_tail; |
---|
| 9189 | +} |
---|
| 9190 | +EXPORT_SYMBOL_GPL(dovetail_context_switch); |
---|
| 9191 | + |
---|
| 9192 | +#endif /* CONFIG_DOVETAIL */ |
---|
| 9193 | + |
---|
8895 | 9194 | void dump_cpu_task(int cpu) |
---|
8896 | 9195 | { |
---|
8897 | 9196 | pr_info("Task dump for CPU %d:\n", cpu); |
---|