| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0+ */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License as published by |
|---|
| 6 | | - * the Free Software Foundation; either version 2 of the License, or |
|---|
| 7 | | - * (at your option) any later version. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | | - * |
|---|
| 14 | | - * You should have received a copy of the GNU General Public License |
|---|
| 15 | | - * along with this program; if not, you can access it online at |
|---|
| 16 | | - * http://www.gnu.org/licenses/gpl-2.0.html. |
|---|
| 17 | 4 | * |
|---|
| 18 | 5 | * Copyright IBM Corporation, 2008 |
|---|
| 19 | 6 | * |
|---|
| 20 | 7 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
|---|
| 21 | | - * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm |
|---|
| 8 | + * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm |
|---|
| 22 | 9 | * |
|---|
| 23 | | - * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
|---|
| 10 | + * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> |
|---|
| 24 | 11 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
|---|
| 25 | 12 | * |
|---|
| 26 | 13 | * For detailed explanation of Read-Copy Update mechanism see - |
|---|
| .. | .. |
|---|
| 30 | 17 | #ifndef __LINUX_RCUTREE_H |
|---|
| 31 | 18 | #define __LINUX_RCUTREE_H |
|---|
| 32 | 19 | |
|---|
| 20 | +void rcu_softirq_qs(void); |
|---|
| 33 | 21 | void rcu_note_context_switch(bool preempt); |
|---|
| 34 | 22 | int rcu_needs_cpu(u64 basem, u64 *nextevt); |
|---|
| 35 | 23 | void rcu_cpu_stall_reset(void); |
|---|
| .. | .. |
|---|
| 44 | 32 | rcu_note_context_switch(false); |
|---|
| 45 | 33 | } |
|---|
| 46 | 34 | |
|---|
| 47 | | -void synchronize_rcu_bh(void); |
|---|
| 48 | | -void synchronize_sched_expedited(void); |
|---|
| 49 | 35 | void synchronize_rcu_expedited(void); |
|---|
| 50 | | - |
|---|
| 51 | | -void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); |
|---|
| 52 | | - |
|---|
| 53 | | -/** |
|---|
| 54 | | - * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period |
|---|
| 55 | | - * |
|---|
| 56 | | - * Wait for an RCU-bh grace period to elapse, but use a "big hammer" |
|---|
| 57 | | - * approach to force the grace period to end quickly. This consumes |
|---|
| 58 | | - * significant time on all CPUs and is unfriendly to real-time workloads, |
|---|
| 59 | | - * so is thus not recommended for any sort of common-case code. In fact, |
|---|
| 60 | | - * if you are using synchronize_rcu_bh_expedited() in a loop, please |
|---|
| 61 | | - * restructure your code to batch your updates, and then use a single |
|---|
| 62 | | - * synchronize_rcu_bh() instead. |
|---|
| 63 | | - * |
|---|
| 64 | | - * Note that it is illegal to call this function while holding any lock |
|---|
| 65 | | - * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal |
|---|
| 66 | | - * to call this function from a CPU-hotplug notifier. Failing to observe |
|---|
| 67 | | - * these restriction will result in deadlock. |
|---|
| 68 | | - */ |
|---|
| 69 | | -static inline void synchronize_rcu_bh_expedited(void) |
|---|
| 70 | | -{ |
|---|
| 71 | | - synchronize_sched_expedited(); |
|---|
| 72 | | -} |
|---|
| 36 | +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); |
|---|
| 73 | 37 | |
|---|
| 74 | 38 | void rcu_barrier(void); |
|---|
| 75 | | -void rcu_barrier_bh(void); |
|---|
| 76 | | -void rcu_barrier_sched(void); |
|---|
| 77 | 39 | bool rcu_eqs_special_set(int cpu); |
|---|
| 40 | +void rcu_momentary_dyntick_idle(void); |
|---|
| 41 | +void kfree_rcu_scheduler_running(void); |
|---|
| 42 | +bool rcu_gp_might_be_stalled(void); |
|---|
| 78 | 43 | unsigned long get_state_synchronize_rcu(void); |
|---|
| 79 | 44 | void cond_synchronize_rcu(unsigned long oldstate); |
|---|
| 80 | | -unsigned long get_state_synchronize_sched(void); |
|---|
| 81 | | -void cond_synchronize_sched(unsigned long oldstate); |
|---|
| 82 | 45 | |
|---|
| 83 | 46 | void rcu_idle_enter(void); |
|---|
| 84 | 47 | void rcu_idle_exit(void); |
|---|
| 85 | 48 | void rcu_irq_enter(void); |
|---|
| 86 | 49 | void rcu_irq_exit(void); |
|---|
| 50 | +void rcu_irq_exit_preempt(void); |
|---|
| 87 | 51 | void rcu_irq_enter_irqson(void); |
|---|
| 88 | 52 | void rcu_irq_exit_irqson(void); |
|---|
| 53 | + |
|---|
| 54 | +#ifdef CONFIG_PROVE_RCU |
|---|
| 55 | +void rcu_irq_exit_check_preempt(void); |
|---|
| 56 | +#else |
|---|
| 57 | +static inline void rcu_irq_exit_check_preempt(void) { } |
|---|
| 58 | +#endif |
|---|
| 89 | 59 | |
|---|
| 90 | 60 | void exit_rcu(void); |
|---|
| 91 | 61 | |
|---|
| 92 | 62 | void rcu_scheduler_starting(void); |
|---|
| 93 | | -extern int rcu_scheduler_active __read_mostly; |
|---|
| 63 | +extern int rcu_scheduler_active; |
|---|
| 94 | 64 | void rcu_end_inkernel_boot(void); |
|---|
| 65 | +bool rcu_inkernel_boot_has_ended(void); |
|---|
| 95 | 66 | bool rcu_is_watching(void); |
|---|
| 67 | +#ifndef CONFIG_PREEMPTION |
|---|
| 96 | 68 | void rcu_all_qs(void); |
|---|
| 69 | +#endif |
|---|
| 97 | 70 | |
|---|
| 98 | 71 | /* RCUtree hotplug events */ |
|---|
| 99 | 72 | int rcutree_prepare_cpu(unsigned int cpu); |
|---|