.. | .. |
---|
876 | 876 | int err, cpu = smp_processor_id(); |
---|
877 | 877 | int ret; |
---|
878 | 878 | |
---|
| 879 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 880 | + /* |
---|
| 881 | + * If any tasks disabled migration before we got here, |
---|
| 882 | + * go back and sleep again. |
---|
| 883 | + */ |
---|
| 884 | + if (cpu_nr_pinned(cpu)) |
---|
| 885 | + return -EAGAIN; |
---|
| 886 | +#endif |
---|
| 887 | + |
---|
879 | 888 | /* Ensure this CPU doesn't handle any more interrupts. */ |
---|
880 | 889 | err = __cpu_disable(); |
---|
881 | 890 | if (err < 0) |
---|
.. | .. |
---|
903 | 912 | return 0; |
---|
904 | 913 | } |
---|
905 | 914 | |
---|
| 915 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 916 | +struct task_struct *takedown_cpu_task; |
---|
| 917 | +#endif |
---|
| 918 | + |
---|
906 | 919 | static int takedown_cpu(unsigned int cpu) |
---|
907 | 920 | { |
---|
908 | 921 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
---|
.. | .. |
---|
917 | 930 | */ |
---|
918 | 931 | irq_lock_sparse(); |
---|
919 | 932 | |
---|
| 933 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 934 | + WARN_ON_ONCE(takedown_cpu_task); |
---|
| 935 | + takedown_cpu_task = current; |
---|
| 936 | + |
---|
| 937 | +again: |
---|
| 938 | + /* |
---|
| 939 | + * If a task pins this CPU after we pass this check, take_cpu_down |
---|
| 940 | + * will return -EAGAIN. |
---|
| 941 | + */ |
---|
| 942 | + for (;;) { |
---|
| 943 | + int nr_pinned; |
---|
| 944 | + |
---|
| 945 | + set_current_state(TASK_UNINTERRUPTIBLE); |
---|
| 946 | + nr_pinned = cpu_nr_pinned(cpu); |
---|
| 947 | + if (nr_pinned == 0) |
---|
| 948 | + break; |
---|
| 949 | + schedule(); |
---|
| 950 | + } |
---|
| 951 | + set_current_state(TASK_RUNNING); |
---|
| 952 | +#endif |
---|
| 953 | + |
---|
920 | 954 | /* |
---|
921 | 955 | * So now all preempt/rcu users must observe !cpu_active(). |
---|
922 | 956 | */ |
---|
923 | 957 | err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); |
---|
| 958 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 959 | + if (err == -EAGAIN) |
---|
| 960 | + goto again; |
---|
| 961 | +#endif |
---|
924 | 962 | if (err) { |
---|
| 963 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 964 | + takedown_cpu_task = NULL; |
---|
| 965 | +#endif |
---|
925 | 966 | /* CPU refused to die */ |
---|
926 | 967 | irq_unlock_sparse(); |
---|
927 | 968 | /* Unpark the hotplug thread so we can rollback there */ |
---|
.. | .. |
---|
940 | 981 | wait_for_ap_thread(st, false); |
---|
941 | 982 | BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); |
---|
942 | 983 | |
---|
| 984 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 985 | + takedown_cpu_task = NULL; |
---|
| 986 | +#endif |
---|
943 | 987 | /* Interrupts are moved away from the dying cpu, reenable alloc/free */ |
---|
944 | 988 | irq_unlock_sparse(); |
---|
945 | 989 | |
---|