| .. | .. |
|---|
| 61 | 61 | #include <linux/trace_events.h> |
|---|
| 62 | 62 | #include <linux/suspend.h> |
|---|
| 63 | 63 | #include <linux/ftrace.h> |
|---|
| 64 | +#include <linux/delay.h> |
|---|
| 65 | +#include <linux/gfp.h> |
|---|
| 66 | +#include <linux/oom.h> |
|---|
| 67 | +#include <linux/smpboot.h> |
|---|
| 68 | +#include <linux/jiffies.h> |
|---|
| 69 | +#include <linux/sched/isolation.h> |
|---|
| 70 | +#include "../time/tick-internal.h" |
|---|
| 64 | 71 | |
|---|
| 65 | 72 | #include "tree.h" |
|---|
| 66 | 73 | #include "rcu.h" |
|---|
| .. | .. |
|---|
| 245 | 252 | this_cpu_ptr(&rcu_sched_data), true); |
|---|
| 246 | 253 | } |
|---|
| 247 | 254 | |
|---|
| 255 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 256 | +static void rcu_preempt_qs(void); |
|---|
| 257 | + |
|---|
| 258 | +void rcu_bh_qs(void) |
|---|
| 259 | +{ |
|---|
| 260 | + unsigned long flags; |
|---|
| 261 | + |
|---|
| 262 | + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ |
|---|
| 263 | + local_irq_save(flags); |
|---|
| 264 | + rcu_preempt_qs(); |
|---|
| 265 | + local_irq_restore(flags); |
|---|
| 266 | +} |
|---|
| 267 | +#else |
|---|
| 248 | 268 | void rcu_bh_qs(void) |
|---|
| 249 | 269 | { |
|---|
| 250 | 270 | RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); |
|---|
| .. | .. |
|---|
| 255 | 275 | __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); |
|---|
| 256 | 276 | } |
|---|
| 257 | 277 | } |
|---|
| 278 | +#endif |
|---|
| 258 | 279 | |
|---|
| 259 | 280 | /* |
|---|
| 260 | 281 | * Steal a bit from the bottom of ->dynticks for idle entry/exit |
|---|
| .. | .. |
|---|
| 569 | 590 | } |
|---|
| 570 | 591 | EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); |
|---|
| 571 | 592 | |
|---|
| 593 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 572 | 594 | /* |
|---|
| 573 | 595 | * Return the number of RCU-bh GPs completed thus far for debug & stats. |
|---|
| 574 | 596 | */ |
|---|
| .. | .. |
|---|
| 577 | 599 | return READ_ONCE(rcu_bh_state.gp_seq); |
|---|
| 578 | 600 | } |
|---|
| 579 | 601 | EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); |
|---|
| 602 | +#endif |
|---|
| 580 | 603 | |
|---|
| 581 | 604 | /* |
|---|
| 582 | 605 | * Return the number of RCU expedited batches completed thus far for |
|---|
| .. | .. |
|---|
| 600 | 623 | } |
|---|
| 601 | 624 | EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); |
|---|
| 602 | 625 | |
|---|
| 626 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 603 | 627 | /* |
|---|
| 604 | 628 | * Force a quiescent state. |
|---|
| 605 | 629 | */ |
|---|
| .. | .. |
|---|
| 617 | 641 | force_quiescent_state(&rcu_bh_state); |
|---|
| 618 | 642 | } |
|---|
| 619 | 643 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
|---|
| 644 | + |
|---|
| 645 | +#else |
|---|
| 646 | +void rcu_force_quiescent_state(void) |
|---|
| 647 | +{ |
|---|
| 648 | +} |
|---|
| 649 | +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); |
|---|
| 650 | +#endif |
|---|
| 620 | 651 | |
|---|
| 621 | 652 | /* |
|---|
| 622 | 653 | * Force a quiescent state for RCU-sched. |
|---|
| .. | .. |
|---|
| 675 | 706 | case RCU_FLAVOR: |
|---|
| 676 | 707 | rsp = rcu_state_p; |
|---|
| 677 | 708 | break; |
|---|
| 709 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 678 | 710 | case RCU_BH_FLAVOR: |
|---|
| 679 | 711 | rsp = &rcu_bh_state; |
|---|
| 680 | 712 | break; |
|---|
| 713 | +#endif |
|---|
| 681 | 714 | case RCU_SCHED_FLAVOR: |
|---|
| 682 | 715 | rsp = &rcu_sched_state; |
|---|
| 683 | 716 | break; |
|---|
| .. | .. |
|---|
| 1264 | 1297 | !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && |
|---|
| 1265 | 1298 | (rnp->ffmask & rdp->grpmask)) { |
|---|
| 1266 | 1299 | init_irq_work(&rdp->rcu_iw, rcu_iw_handler); |
|---|
| 1300 | + rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ; |
|---|
| 1267 | 1301 | rdp->rcu_iw_pending = true; |
|---|
| 1268 | 1302 | rdp->rcu_iw_gp_seq = rnp->gp_seq; |
|---|
| 1269 | 1303 | irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); |
|---|
| .. | .. |
|---|
| 2873 | 2907 | /* |
|---|
| 2874 | 2908 | * Do RCU core processing for the current CPU. |
|---|
| 2875 | 2909 | */ |
|---|
| 2876 | | -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) |
|---|
| 2910 | +static __latent_entropy void rcu_process_callbacks(void) |
|---|
| 2877 | 2911 | { |
|---|
| 2878 | 2912 | struct rcu_state *rsp; |
|---|
| 2879 | 2913 | |
|---|
| 2880 | 2914 | if (cpu_is_offline(smp_processor_id())) |
|---|
| 2881 | 2915 | return; |
|---|
| 2882 | | - trace_rcu_utilization(TPS("Start RCU core")); |
|---|
| 2883 | 2916 | for_each_rcu_flavor(rsp) |
|---|
| 2884 | 2917 | __rcu_process_callbacks(rsp); |
|---|
| 2885 | | - trace_rcu_utilization(TPS("End RCU core")); |
|---|
| 2886 | 2918 | } |
|---|
| 2887 | 2919 | |
|---|
| 2920 | +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); |
|---|
| 2888 | 2921 | /* |
|---|
| 2889 | 2922 | * Schedule RCU callback invocation. If the specified type of RCU |
|---|
| 2890 | 2923 | * does not support RCU priority boosting, just do a direct call, |
|---|
| .. | .. |
|---|
| 2896 | 2929 | { |
|---|
| 2897 | 2930 | if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) |
|---|
| 2898 | 2931 | return; |
|---|
| 2899 | | - if (likely(!rsp->boost)) { |
|---|
| 2900 | | - rcu_do_batch(rsp, rdp); |
|---|
| 2901 | | - return; |
|---|
| 2902 | | - } |
|---|
| 2903 | | - invoke_rcu_callbacks_kthread(); |
|---|
| 2932 | + rcu_do_batch(rsp, rdp); |
|---|
| 2904 | 2933 | } |
|---|
| 2905 | 2934 | |
|---|
| 2935 | +static void rcu_wake_cond(struct task_struct *t, int status) |
|---|
| 2936 | +{ |
|---|
| 2937 | + /* |
|---|
| 2938 | + * If the thread is yielding, only wake it when this |
|---|
| 2939 | + * is invoked from idle |
|---|
| 2940 | + */ |
|---|
| 2941 | + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) |
|---|
| 2942 | + wake_up_process(t); |
|---|
| 2943 | +} |
|---|
| 2944 | + |
|---|
| 2945 | +/* |
|---|
| 2946 | + * Wake up this CPU's rcuc kthread to do RCU core processing. |
|---|
| 2947 | + */ |
|---|
| 2906 | 2948 | static void invoke_rcu_core(void) |
|---|
| 2907 | 2949 | { |
|---|
| 2908 | | - if (cpu_online(smp_processor_id())) |
|---|
| 2909 | | - raise_softirq(RCU_SOFTIRQ); |
|---|
| 2950 | + unsigned long flags; |
|---|
| 2951 | + struct task_struct *t; |
|---|
| 2952 | + |
|---|
| 2953 | + if (!cpu_online(smp_processor_id())) |
|---|
| 2954 | + return; |
|---|
| 2955 | + local_irq_save(flags); |
|---|
| 2956 | + __this_cpu_write(rcu_cpu_has_work, 1); |
|---|
| 2957 | + t = __this_cpu_read(rcu_cpu_kthread_task); |
|---|
| 2958 | + if (t != NULL && current != t) |
|---|
| 2959 | + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); |
|---|
| 2960 | + local_irq_restore(flags); |
|---|
| 2910 | 2961 | } |
|---|
| 2962 | + |
|---|
| 2963 | +static void rcu_cpu_kthread_park(unsigned int cpu) |
|---|
| 2964 | +{ |
|---|
| 2965 | + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; |
|---|
| 2966 | +} |
|---|
| 2967 | + |
|---|
| 2968 | +static int rcu_cpu_kthread_should_run(unsigned int cpu) |
|---|
| 2969 | +{ |
|---|
| 2970 | + return __this_cpu_read(rcu_cpu_has_work); |
|---|
| 2971 | +} |
|---|
| 2972 | + |
|---|
| 2973 | +/* |
|---|
| 2974 | + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the |
|---|
| 2975 | + * RCU softirq used in flavors and configurations of RCU that do not |
|---|
| 2976 | + * support RCU priority boosting. |
|---|
| 2977 | + */ |
|---|
| 2978 | +static void rcu_cpu_kthread(unsigned int cpu) |
|---|
| 2979 | +{ |
|---|
| 2980 | + unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); |
|---|
| 2981 | + char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); |
|---|
| 2982 | + int spincnt; |
|---|
| 2983 | + |
|---|
| 2984 | + for (spincnt = 0; spincnt < 10; spincnt++) { |
|---|
| 2985 | + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); |
|---|
| 2986 | + local_bh_disable(); |
|---|
| 2987 | + *statusp = RCU_KTHREAD_RUNNING; |
|---|
| 2988 | + this_cpu_inc(rcu_cpu_kthread_loops); |
|---|
| 2989 | + local_irq_disable(); |
|---|
| 2990 | + work = *workp; |
|---|
| 2991 | + *workp = 0; |
|---|
| 2992 | + local_irq_enable(); |
|---|
| 2993 | + if (work) |
|---|
| 2994 | + rcu_process_callbacks(); |
|---|
| 2995 | + local_bh_enable(); |
|---|
| 2996 | + if (*workp == 0) { |
|---|
| 2997 | + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); |
|---|
| 2998 | + *statusp = RCU_KTHREAD_WAITING; |
|---|
| 2999 | + return; |
|---|
| 3000 | + } |
|---|
| 3001 | + } |
|---|
| 3002 | + *statusp = RCU_KTHREAD_YIELDING; |
|---|
| 3003 | + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); |
|---|
| 3004 | + schedule_timeout_interruptible(2); |
|---|
| 3005 | + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); |
|---|
| 3006 | + *statusp = RCU_KTHREAD_WAITING; |
|---|
| 3007 | +} |
|---|
| 3008 | + |
|---|
| 3009 | +static struct smp_hotplug_thread rcu_cpu_thread_spec = { |
|---|
| 3010 | + .store = &rcu_cpu_kthread_task, |
|---|
| 3011 | + .thread_should_run = rcu_cpu_kthread_should_run, |
|---|
| 3012 | + .thread_fn = rcu_cpu_kthread, |
|---|
| 3013 | + .thread_comm = "rcuc/%u", |
|---|
| 3014 | + .setup = rcu_cpu_kthread_setup, |
|---|
| 3015 | + .park = rcu_cpu_kthread_park, |
|---|
| 3016 | +}; |
|---|
| 3017 | + |
|---|
| 3018 | +/* |
|---|
| 3019 | + * Spawn per-CPU RCU core processing kthreads. |
|---|
| 3020 | + */ |
|---|
| 3021 | +static int __init rcu_spawn_core_kthreads(void) |
|---|
| 3022 | +{ |
|---|
| 3023 | + int cpu; |
|---|
| 3024 | + |
|---|
| 3025 | + for_each_possible_cpu(cpu) |
|---|
| 3026 | + per_cpu(rcu_cpu_has_work, cpu) = 0; |
|---|
| 3027 | + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
|---|
| 3028 | + return 0; |
|---|
| 3029 | +} |
|---|
| 3030 | +early_initcall(rcu_spawn_core_kthreads); |
|---|
| 2911 | 3031 | |
|---|
| 2912 | 3032 | /* |
|---|
| 2913 | 3033 | * Handle any core-RCU processing required by a call_rcu() invocation. |
|---|
| .. | .. |
|---|
| 3060 | 3180 | } |
|---|
| 3061 | 3181 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
|---|
| 3062 | 3182 | |
|---|
| 3183 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 3063 | 3184 | /** |
|---|
| 3064 | 3185 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
|---|
| 3065 | 3186 | * @head: structure to be used for queueing the RCU updates. |
|---|
| .. | .. |
|---|
| 3087 | 3208 | __call_rcu(head, func, &rcu_bh_state, -1, 0); |
|---|
| 3088 | 3209 | } |
|---|
| 3089 | 3210 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
|---|
| 3211 | +#endif |
|---|
| 3090 | 3212 | |
|---|
| 3091 | 3213 | /* |
|---|
| 3092 | 3214 | * Queue an RCU callback for lazy invocation after a grace period. |
|---|
| .. | .. |
|---|
| 3172 | 3294 | } |
|---|
| 3173 | 3295 | EXPORT_SYMBOL_GPL(synchronize_sched); |
|---|
| 3174 | 3296 | |
|---|
| 3297 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 3175 | 3298 | /** |
|---|
| 3176 | 3299 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. |
|---|
| 3177 | 3300 | * |
|---|
| .. | .. |
|---|
| 3198 | 3321 | wait_rcu_gp(call_rcu_bh); |
|---|
| 3199 | 3322 | } |
|---|
| 3200 | 3323 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
|---|
| 3324 | +#endif |
|---|
| 3201 | 3325 | |
|---|
| 3202 | 3326 | /** |
|---|
| 3203 | 3327 | * get_state_synchronize_rcu - Snapshot current RCU state |
|---|
| .. | .. |
|---|
| 3505 | 3629 | mutex_unlock(&rsp->barrier_mutex); |
|---|
| 3506 | 3630 | } |
|---|
| 3507 | 3631 | |
|---|
| 3632 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 3508 | 3633 | /** |
|---|
| 3509 | 3634 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. |
|---|
| 3510 | 3635 | */ |
|---|
| .. | .. |
|---|
| 3513 | 3638 | _rcu_barrier(&rcu_bh_state); |
|---|
| 3514 | 3639 | } |
|---|
| 3515 | 3640 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); |
|---|
| 3641 | +#endif |
|---|
| 3516 | 3642 | |
|---|
| 3517 | 3643 | /** |
|---|
| 3518 | 3644 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. |
|---|
| .. | .. |
|---|
| 3662 | 3788 | rnp->ffmask |= rdp->grpmask; |
|---|
| 3663 | 3789 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
|---|
| 3664 | 3790 | } |
|---|
| 3665 | | - if (IS_ENABLED(CONFIG_TREE_SRCU)) |
|---|
| 3666 | | - srcu_online_cpu(cpu); |
|---|
| 3667 | 3791 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
|---|
| 3668 | 3792 | return 0; /* Too early in boot for scheduler work. */ |
|---|
| 3669 | 3793 | sync_sched_exp_online_cleanup(cpu); |
|---|
| .. | .. |
|---|
| 3691 | 3815 | } |
|---|
| 3692 | 3816 | |
|---|
| 3693 | 3817 | rcutree_affinity_setting(cpu, cpu); |
|---|
| 3694 | | - if (IS_ENABLED(CONFIG_TREE_SRCU)) |
|---|
| 3695 | | - srcu_offline_cpu(cpu); |
|---|
| 3696 | 3818 | return 0; |
|---|
| 3697 | 3819 | } |
|---|
| 3698 | 3820 | |
|---|
| .. | .. |
|---|
| 4160 | 4282 | |
|---|
| 4161 | 4283 | rcu_bootup_announce(); |
|---|
| 4162 | 4284 | rcu_init_geometry(); |
|---|
| 4285 | +#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 4163 | 4286 | rcu_init_one(&rcu_bh_state); |
|---|
| 4287 | +#endif |
|---|
| 4164 | 4288 | rcu_init_one(&rcu_sched_state); |
|---|
| 4165 | 4289 | if (dump_tree) |
|---|
| 4166 | 4290 | rcu_dump_rcu_node_tree(&rcu_sched_state); |
|---|
| 4167 | 4291 | __rcu_init_preempt(); |
|---|
| 4168 | | - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
|---|
| 4169 | 4292 | |
|---|
| 4170 | 4293 | /* |
|---|
| 4171 | 4294 | * We don't need protection against CPU-hotplug here because |
|---|