hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/rcu/tiny.c
....@@ -1,23 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
34 *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
17
- *
185 * Copyright IBM Corporation, 2008
196 *
20
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
218 *
229 * For detailed explanation of Read-Copy Update mechanism see -
2310 * Documentation/RCU
....@@ -35,6 +22,8 @@
3522 #include <linux/time.h>
3623 #include <linux/cpu.h>
3724 #include <linux/prefetch.h>
25
+#include <linux/slab.h>
26
+#include <linux/mm.h>
3827
3928 #include "rcu.h"
4029
....@@ -46,69 +35,27 @@
4635 };
4736
4837 /* Definition for rcupdate control block. */
49
-static struct rcu_ctrlblk rcu_sched_ctrlblk = {
50
- .donetail = &rcu_sched_ctrlblk.rcucblist,
51
- .curtail = &rcu_sched_ctrlblk.rcucblist,
38
+static struct rcu_ctrlblk rcu_ctrlblk = {
39
+ .donetail = &rcu_ctrlblk.rcucblist,
40
+ .curtail = &rcu_ctrlblk.rcucblist,
5241 };
5342
54
-static struct rcu_ctrlblk rcu_bh_ctrlblk = {
55
- .donetail = &rcu_bh_ctrlblk.rcucblist,
56
- .curtail = &rcu_bh_ctrlblk.rcucblist,
57
-};
58
-
59
-void rcu_barrier_bh(void)
43
+void rcu_barrier(void)
6044 {
61
- wait_rcu_gp(call_rcu_bh);
45
+ wait_rcu_gp(call_rcu);
6246 }
63
-EXPORT_SYMBOL(rcu_barrier_bh);
47
+EXPORT_SYMBOL(rcu_barrier);
6448
65
-void rcu_barrier_sched(void)
49
+/* Record an rcu quiescent state. */
50
+void rcu_qs(void)
6651 {
67
- wait_rcu_gp(call_rcu_sched);
68
-}
69
-EXPORT_SYMBOL(rcu_barrier_sched);
52
+ unsigned long flags;
7053
71
-/*
72
- * Helper function for rcu_sched_qs() and rcu_bh_qs().
73
- * Also irqs are disabled to avoid confusion due to interrupt handlers
74
- * invoking call_rcu().
75
- */
76
-static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
77
-{
78
- if (rcp->donetail != rcp->curtail) {
79
- rcp->donetail = rcp->curtail;
80
- return 1;
54
+ local_irq_save(flags);
55
+ if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
56
+ rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
57
+ raise_softirq_irqoff(RCU_SOFTIRQ);
8158 }
82
-
83
- return 0;
84
-}
85
-
86
-/*
87
- * Record an rcu quiescent state. And an rcu_bh quiescent state while we
88
- * are at it, given that any rcu quiescent state is also an rcu_bh
89
- * quiescent state. Use "+" instead of "||" to defeat short circuiting.
90
- */
91
-void rcu_sched_qs(void)
92
-{
93
- unsigned long flags;
94
-
95
- local_irq_save(flags);
96
- if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
97
- rcu_qsctr_help(&rcu_bh_ctrlblk))
98
- raise_softirq(RCU_SOFTIRQ);
99
- local_irq_restore(flags);
100
-}
101
-
102
-/*
103
- * Record an rcu_bh quiescent state.
104
- */
105
-void rcu_bh_qs(void)
106
-{
107
- unsigned long flags;
108
-
109
- local_irq_save(flags);
110
- if (rcu_qsctr_help(&rcu_bh_ctrlblk))
111
- raise_softirq(RCU_SOFTIRQ);
11259 local_irq_restore(flags);
11360 }
11461
....@@ -118,36 +65,60 @@
11865 * be called from hardirq context. It is normally called from the
11966 * scheduling-clock interrupt.
12067 */
121
-void rcu_check_callbacks(int user)
68
+void rcu_sched_clock_irq(int user)
12269 {
123
- if (user)
124
- rcu_sched_qs();
125
- if (user || !in_softirq())
126
- rcu_bh_qs();
70
+ if (user) {
71
+ rcu_qs();
72
+ } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
73
+ set_tsk_need_resched(current);
74
+ set_preempt_need_resched();
75
+ }
12776 }
12877
12978 /*
130
- * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
131
- * whose grace period has elapsed.
79
+ * Reclaim the specified callback, either by invoking it for non-kfree cases or
80
+ * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
13281 */
133
-static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
82
+static inline bool rcu_reclaim_tiny(struct rcu_head *head)
83
+{
84
+ rcu_callback_t f;
85
+ unsigned long offset = (unsigned long)head->func;
86
+
87
+ rcu_lock_acquire(&rcu_callback_map);
88
+ if (__is_kvfree_rcu_offset(offset)) {
89
+ trace_rcu_invoke_kvfree_callback("", head, offset);
90
+ kvfree((void *)head - offset);
91
+ rcu_lock_release(&rcu_callback_map);
92
+ return true;
93
+ }
94
+
95
+ trace_rcu_invoke_callback("", head);
96
+ f = head->func;
97
+ WRITE_ONCE(head->func, (rcu_callback_t)0L);
98
+ f(head);
99
+ rcu_lock_release(&rcu_callback_map);
100
+ return false;
101
+}
102
+
103
+/* Invoke the RCU callbacks whose grace period has elapsed. */
104
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
134105 {
135106 struct rcu_head *next, *list;
136107 unsigned long flags;
137108
138109 /* Move the ready-to-invoke callbacks to a local list. */
139110 local_irq_save(flags);
140
- if (rcp->donetail == &rcp->rcucblist) {
111
+ if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
141112 /* No callbacks ready, so just leave. */
142113 local_irq_restore(flags);
143114 return;
144115 }
145
- list = rcp->rcucblist;
146
- rcp->rcucblist = *rcp->donetail;
147
- *rcp->donetail = NULL;
148
- if (rcp->curtail == rcp->donetail)
149
- rcp->curtail = &rcp->rcucblist;
150
- rcp->donetail = &rcp->rcucblist;
116
+ list = rcu_ctrlblk.rcucblist;
117
+ rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
118
+ *rcu_ctrlblk.donetail = NULL;
119
+ if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
120
+ rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
121
+ rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
151122 local_irq_restore(flags);
152123
153124 /* Invoke the callbacks on the local list. */
....@@ -156,43 +127,37 @@
156127 prefetch(next);
157128 debug_rcu_head_unqueue(list);
158129 local_bh_disable();
159
- __rcu_reclaim("", list);
130
+ rcu_reclaim_tiny(list);
160131 local_bh_enable();
161132 list = next;
162133 }
163134 }
164135
165
-static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
166
-{
167
- __rcu_process_callbacks(&rcu_sched_ctrlblk);
168
- __rcu_process_callbacks(&rcu_bh_ctrlblk);
169
-}
170
-
171136 /*
172137 * Wait for a grace period to elapse. But it is illegal to invoke
173
- * synchronize_sched() from within an RCU read-side critical section.
174
- * Therefore, any legal call to synchronize_sched() is a quiescent
175
- * state, and so on a UP system, synchronize_sched() need do nothing.
176
- * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
177
- * benefits of doing might_sleep() to reduce latency.)
138
+ * synchronize_rcu() from within an RCU read-side critical section.
139
+ * Therefore, any legal call to synchronize_rcu() is a quiescent
140
+ * state, and so on a UP system, synchronize_rcu() need do nothing.
141
+ * (But Lai Jiangshan points out the benefits of doing might_sleep()
142
+ * to reduce latency.)
178143 *
179144 * Cool, huh? (Due to Josh Triplett.)
180145 */
181
-void synchronize_sched(void)
146
+void synchronize_rcu(void)
182147 {
183148 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
184149 lock_is_held(&rcu_lock_map) ||
185150 lock_is_held(&rcu_sched_lock_map),
186
- "Illegal synchronize_sched() in RCU read-side critical section");
151
+ "Illegal synchronize_rcu() in RCU read-side critical section");
187152 }
188
-EXPORT_SYMBOL_GPL(synchronize_sched);
153
+EXPORT_SYMBOL_GPL(synchronize_rcu);
189154
190155 /*
191
- * Helper function for call_rcu() and call_rcu_bh().
156
+ * Post an RCU callback to be invoked after the end of an RCU grace
157
+ * period. But since we have but one CPU, that would be after any
158
+ * quiescent state.
192159 */
193
-static void __call_rcu(struct rcu_head *head,
194
- rcu_callback_t func,
195
- struct rcu_ctrlblk *rcp)
160
+void call_rcu(struct rcu_head *head, rcu_callback_t func)
196161 {
197162 unsigned long flags;
198163
....@@ -201,39 +166,20 @@
201166 head->next = NULL;
202167
203168 local_irq_save(flags);
204
- *rcp->curtail = head;
205
- rcp->curtail = &head->next;
169
+ *rcu_ctrlblk.curtail = head;
170
+ rcu_ctrlblk.curtail = &head->next;
206171 local_irq_restore(flags);
207172
208173 if (unlikely(is_idle_task(current))) {
209
- /* force scheduling for rcu_sched_qs() */
174
+ /* force scheduling for rcu_qs() */
210175 resched_cpu(0);
211176 }
212177 }
213
-
214
-/*
215
- * Post an RCU callback to be invoked after the end of an RCU-sched grace
216
- * period. But since we have but one CPU, that would be after any
217
- * quiescent state.
218
- */
219
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
220
-{
221
- __call_rcu(head, func, &rcu_sched_ctrlblk);
222
-}
223
-EXPORT_SYMBOL_GPL(call_rcu_sched);
224
-
225
-/*
226
- * Post an RCU bottom-half callback to be invoked after any subsequent
227
- * quiescent state.
228
- */
229
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
230
-{
231
- __call_rcu(head, func, &rcu_bh_ctrlblk);
232
-}
233
-EXPORT_SYMBOL_GPL(call_rcu_bh);
178
+EXPORT_SYMBOL_GPL(call_rcu);
234179
235180 void __init rcu_init(void)
236181 {
237182 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
238183 rcu_early_boot_tests();
184
+ srcu_init();
239185 }