hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/rcutree.h
....@@ -1,26 +1,13 @@
1
+/* SPDX-License-Identifier: GPL-2.0+ */
12 /*
23 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
174 *
185 * Copyright IBM Corporation, 2008
196 *
207 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21
- * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
8
+ * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm
229 *
23
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
10
+ * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
2411 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
2512 *
2613 * For detailed explanation of Read-Copy Update mechanism see -
....@@ -30,6 +17,7 @@
3017 #ifndef __LINUX_RCUTREE_H
3118 #define __LINUX_RCUTREE_H
3219
20
+void rcu_softirq_qs(void);
3321 void rcu_note_context_switch(bool preempt);
3422 int rcu_needs_cpu(u64 basem, u64 *nextevt);
3523 void rcu_cpu_stall_reset(void);
....@@ -44,64 +32,41 @@
4432 rcu_note_context_switch(false);
4533 }
4634
47
-#ifdef CONFIG_PREEMPT_RT_FULL
48
-# define synchronize_rcu_bh synchronize_rcu
49
-#else
50
-void synchronize_rcu_bh(void);
51
-#endif
52
-void synchronize_sched_expedited(void);
5335 void synchronize_rcu_expedited(void);
54
-
55
-void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
56
-
57
-/**
58
- * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
59
- *
60
- * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
61
- * approach to force the grace period to end quickly. This consumes
62
- * significant time on all CPUs and is unfriendly to real-time workloads,
63
- * so is thus not recommended for any sort of common-case code. In fact,
64
- * if you are using synchronize_rcu_bh_expedited() in a loop, please
65
- * restructure your code to batch your updates, and then use a single
66
- * synchronize_rcu_bh() instead.
67
- *
68
- * Note that it is illegal to call this function while holding any lock
69
- * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
70
- * to call this function from a CPU-hotplug notifier. Failing to observe
71
- * these restriction will result in deadlock.
72
- */
73
-static inline void synchronize_rcu_bh_expedited(void)
74
-{
75
- synchronize_sched_expedited();
76
-}
36
+void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
7737
7838 void rcu_barrier(void);
79
-#ifdef CONFIG_PREEMPT_RT_FULL
80
-# define rcu_barrier_bh rcu_barrier
81
-#else
82
-void rcu_barrier_bh(void);
83
-#endif
84
-void rcu_barrier_sched(void);
8539 bool rcu_eqs_special_set(int cpu);
40
+void rcu_momentary_dyntick_idle(void);
41
+void kfree_rcu_scheduler_running(void);
42
+bool rcu_gp_might_be_stalled(void);
8643 unsigned long get_state_synchronize_rcu(void);
8744 void cond_synchronize_rcu(unsigned long oldstate);
88
-unsigned long get_state_synchronize_sched(void);
89
-void cond_synchronize_sched(unsigned long oldstate);
9045
9146 void rcu_idle_enter(void);
9247 void rcu_idle_exit(void);
9348 void rcu_irq_enter(void);
9449 void rcu_irq_exit(void);
50
+void rcu_irq_exit_preempt(void);
9551 void rcu_irq_enter_irqson(void);
9652 void rcu_irq_exit_irqson(void);
53
+
54
+#ifdef CONFIG_PROVE_RCU
55
+void rcu_irq_exit_check_preempt(void);
56
+#else
57
+static inline void rcu_irq_exit_check_preempt(void) { }
58
+#endif
9759
9860 void exit_rcu(void);
9961
10062 void rcu_scheduler_starting(void);
10163 extern int rcu_scheduler_active __read_mostly;
10264 void rcu_end_inkernel_boot(void);
65
+bool rcu_inkernel_boot_has_ended(void);
10366 bool rcu_is_watching(void);
67
+#ifndef CONFIG_PREEMPTION
10468 void rcu_all_qs(void);
69
+#endif
10570
10671 /* RCUtree hotplug events */
10772 int rcutree_prepare_cpu(unsigned int cpu);