hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/rcu/sync.c
....@@ -1,19 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * RCU-based infrastructure for lightweight reader-writer locking
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
174 *
185 * Copyright (c) 2015, Red Hat, Inc.
196 *
....@@ -23,65 +10,18 @@
2310 #include <linux/rcu_sync.h>
2411 #include <linux/sched.h>
2512
26
-#ifdef CONFIG_PROVE_RCU
27
-#define __INIT_HELD(func) .held = func,
28
-#else
29
-#define __INIT_HELD(func)
30
-#endif
31
-
32
-static const struct {
33
- void (*sync)(void);
34
- void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
35
- void (*wait)(void);
36
-#ifdef CONFIG_PROVE_RCU
37
- int (*held)(void);
38
-#endif
39
-} gp_ops[] = {
40
- [RCU_SYNC] = {
41
- .sync = synchronize_rcu,
42
- .call = call_rcu,
43
- .wait = rcu_barrier,
44
- __INIT_HELD(rcu_read_lock_held)
45
- },
46
- [RCU_SCHED_SYNC] = {
47
- .sync = synchronize_sched,
48
- .call = call_rcu_sched,
49
- .wait = rcu_barrier_sched,
50
- __INIT_HELD(rcu_read_lock_sched_held)
51
- },
52
- [RCU_BH_SYNC] = {
53
- .sync = synchronize_rcu_bh,
54
- .call = call_rcu_bh,
55
- .wait = rcu_barrier_bh,
56
- __INIT_HELD(rcu_read_lock_bh_held)
57
- },
58
-};
59
-
60
-enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
61
-enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
13
+enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
6214
6315 #define rss_lock gp_wait.lock
64
-
65
-#ifdef CONFIG_PROVE_RCU
66
-void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
67
-{
68
- RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
69
- "suspicious rcu_sync_is_idle() usage");
70
-}
71
-
72
-EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
73
-#endif
7416
7517 /**
7618 * rcu_sync_init() - Initialize an rcu_sync structure
7719 * @rsp: Pointer to rcu_sync structure to be initialized
78
- * @type: Flavor of RCU with which to synchronize rcu_sync structure
7920 */
80
-void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
21
+void rcu_sync_init(struct rcu_sync *rsp)
8122 {
8223 memset(rsp, 0, sizeof(*rsp));
8324 init_waitqueue_head(&rsp->gp_wait);
84
- rsp->gp_type = type;
8525 }
8626
8727 /**
....@@ -97,6 +37,70 @@
9737 {
9838 rsp->gp_count++;
9939 rsp->gp_state = GP_PASSED;
40
+}
41
+
42
+
43
+static void rcu_sync_func(struct rcu_head *rhp);
44
+
45
+static void rcu_sync_call(struct rcu_sync *rsp)
46
+{
47
+ call_rcu(&rsp->cb_head, rcu_sync_func);
48
+}
49
+
50
+/**
51
+ * rcu_sync_func() - Callback function managing reader access to fastpath
52
+ * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
53
+ *
54
+ * This function is passed to call_rcu() function by rcu_sync_enter() and
55
+ * rcu_sync_exit(), so that it is invoked after a grace period following the
56
+ * that invocation of enter/exit.
57
+ *
58
+ * If it is called by rcu_sync_enter() it signals that all the readers were
59
+ * switched onto slow path.
60
+ *
61
+ * If it is called by rcu_sync_exit() it takes action based on events that
62
+ * have taken place in the meantime, so that closely spaced rcu_sync_enter()
63
+ * and rcu_sync_exit() pairs need not wait for a grace period.
64
+ *
65
+ * If another rcu_sync_enter() is invoked before the grace period
66
+ * ended, reset state to allow the next rcu_sync_exit() to let the
67
+ * readers back onto their fastpaths (after a grace period). If both
68
+ * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
69
+ * before the grace period ended, re-invoke call_rcu() on behalf of that
70
+ * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
71
+ * can again use their fastpaths.
72
+ */
73
+static void rcu_sync_func(struct rcu_head *rhp)
74
+{
75
+ struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
76
+ unsigned long flags;
77
+
78
+ WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
79
+ WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
80
+
81
+ spin_lock_irqsave(&rsp->rss_lock, flags);
82
+ if (rsp->gp_count) {
83
+ /*
84
+ * We're at least a GP after the GP_IDLE->GP_ENTER transition.
85
+ */
86
+ WRITE_ONCE(rsp->gp_state, GP_PASSED);
87
+ wake_up_locked(&rsp->gp_wait);
88
+ } else if (rsp->gp_state == GP_REPLAY) {
89
+ /*
90
+ * A new rcu_sync_exit() has happened; requeue the callback to
91
+ * catch a later GP.
92
+ */
93
+ WRITE_ONCE(rsp->gp_state, GP_EXIT);
94
+ rcu_sync_call(rsp);
95
+ } else {
96
+ /*
97
+ * We're at least a GP after the last rcu_sync_exit(); eveybody
98
+ * will now have observed the write side critical section.
99
+ * Let 'em rip!.
100
+ */
101
+ WRITE_ONCE(rsp->gp_state, GP_IDLE);
102
+ }
103
+ spin_unlock_irqrestore(&rsp->rss_lock, flags);
100104 }
101105
102106 /**
....@@ -116,85 +120,43 @@
116120 */
117121 void rcu_sync_enter(struct rcu_sync *rsp)
118122 {
119
- bool need_wait, need_sync;
123
+ int gp_state;
120124
121125 spin_lock_irq(&rsp->rss_lock);
122
- need_wait = rsp->gp_count++;
123
- need_sync = rsp->gp_state == GP_IDLE;
124
- if (need_sync)
125
- rsp->gp_state = GP_PENDING;
126
+ gp_state = rsp->gp_state;
127
+ if (gp_state == GP_IDLE) {
128
+ WRITE_ONCE(rsp->gp_state, GP_ENTER);
129
+ WARN_ON_ONCE(rsp->gp_count);
130
+ /*
131
+ * Note that we could simply do rcu_sync_call(rsp) here and
132
+ * avoid the "if (gp_state == GP_IDLE)" block below.
133
+ *
134
+ * However, synchronize_rcu() can be faster if rcu_expedited
135
+ * or rcu_blocking_is_gp() is true.
136
+ *
137
+ * Another reason is that we can't wait for rcu callback if
138
+ * we are called at early boot time but this shouldn't happen.
139
+ */
140
+ }
141
+ rsp->gp_count++;
126142 spin_unlock_irq(&rsp->rss_lock);
127143
128
- BUG_ON(need_wait && need_sync);
129
-
130
- if (need_sync) {
131
- gp_ops[rsp->gp_type].sync();
132
- rsp->gp_state = GP_PASSED;
133
- wake_up_all(&rsp->gp_wait);
134
- } else if (need_wait) {
135
- wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
136
- } else {
144
+ if (gp_state == GP_IDLE) {
137145 /*
138
- * Possible when there's a pending CB from a rcu_sync_exit().
139
- * Nobody has yet been allowed the 'fast' path and thus we can
140
- * avoid doing any sync(). The callback will get 'dropped'.
146
+ * See the comment above, this simply does the "synchronous"
147
+ * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
141148 */
142
- BUG_ON(rsp->gp_state != GP_PASSED);
149
+ synchronize_rcu();
150
+ rcu_sync_func(&rsp->cb_head);
151
+ /* Not really needed, wait_event() would see GP_PASSED. */
152
+ return;
143153 }
154
+
155
+ wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
144156 }
145157
146158 /**
147
- * rcu_sync_func() - Callback function managing reader access to fastpath
148
- * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
149
- *
150
- * This function is passed to one of the call_rcu() functions by
151
- * rcu_sync_exit(), so that it is invoked after a grace period following the
152
- * that invocation of rcu_sync_exit(). It takes action based on events that
153
- * have taken place in the meantime, so that closely spaced rcu_sync_enter()
154
- * and rcu_sync_exit() pairs need not wait for a grace period.
155
- *
156
- * If another rcu_sync_enter() is invoked before the grace period
157
- * ended, reset state to allow the next rcu_sync_exit() to let the
158
- * readers back onto their fastpaths (after a grace period). If both
159
- * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
160
- * before the grace period ended, re-invoke call_rcu() on behalf of that
161
- * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
162
- * can again use their fastpaths.
163
- */
164
-static void rcu_sync_func(struct rcu_head *rhp)
165
-{
166
- struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
167
- unsigned long flags;
168
-
169
- BUG_ON(rsp->gp_state != GP_PASSED);
170
- BUG_ON(rsp->cb_state == CB_IDLE);
171
-
172
- spin_lock_irqsave(&rsp->rss_lock, flags);
173
- if (rsp->gp_count) {
174
- /*
175
- * A new rcu_sync_begin() has happened; drop the callback.
176
- */
177
- rsp->cb_state = CB_IDLE;
178
- } else if (rsp->cb_state == CB_REPLAY) {
179
- /*
180
- * A new rcu_sync_exit() has happened; requeue the callback
181
- * to catch a later GP.
182
- */
183
- rsp->cb_state = CB_PENDING;
184
- gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
185
- } else {
186
- /*
187
- * We're at least a GP after rcu_sync_exit(); eveybody will now
188
- * have observed the write side critical section. Let 'em rip!.
189
- */
190
- rsp->cb_state = CB_IDLE;
191
- rsp->gp_state = GP_IDLE;
192
- }
193
- spin_unlock_irqrestore(&rsp->rss_lock, flags);
194
-}
195
-
196
-/**
197
- * rcu_sync_exit() - Allow readers back onto fast patch after grace period
159
+ * rcu_sync_exit() - Allow readers back onto fast path after grace period
198160 * @rsp: Pointer to rcu_sync structure to use for synchronization
199161 *
200162 * This function is used by updaters who have completed, and can therefore
....@@ -205,13 +167,16 @@
205167 */
206168 void rcu_sync_exit(struct rcu_sync *rsp)
207169 {
170
+ WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
171
+ WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
172
+
208173 spin_lock_irq(&rsp->rss_lock);
209174 if (!--rsp->gp_count) {
210
- if (rsp->cb_state == CB_IDLE) {
211
- rsp->cb_state = CB_PENDING;
212
- gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
213
- } else if (rsp->cb_state == CB_PENDING) {
214
- rsp->cb_state = CB_REPLAY;
175
+ if (rsp->gp_state == GP_PASSED) {
176
+ WRITE_ONCE(rsp->gp_state, GP_EXIT);
177
+ rcu_sync_call(rsp);
178
+ } else if (rsp->gp_state == GP_EXIT) {
179
+ WRITE_ONCE(rsp->gp_state, GP_REPLAY);
215180 }
216181 }
217182 spin_unlock_irq(&rsp->rss_lock);
....@@ -223,18 +188,19 @@
223188 */
224189 void rcu_sync_dtor(struct rcu_sync *rsp)
225190 {
226
- int cb_state;
191
+ int gp_state;
227192
228
- BUG_ON(rsp->gp_count);
193
+ WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
194
+ WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
229195
230196 spin_lock_irq(&rsp->rss_lock);
231
- if (rsp->cb_state == CB_REPLAY)
232
- rsp->cb_state = CB_PENDING;
233
- cb_state = rsp->cb_state;
197
+ if (rsp->gp_state == GP_REPLAY)
198
+ WRITE_ONCE(rsp->gp_state, GP_EXIT);
199
+ gp_state = rsp->gp_state;
234200 spin_unlock_irq(&rsp->rss_lock);
235201
236
- if (cb_state != CB_IDLE) {
237
- gp_ops[rsp->gp_type].wait();
238
- BUG_ON(rsp->cb_state != CB_IDLE);
202
+ if (gp_state != GP_IDLE) {
203
+ rcu_barrier();
204
+ WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
239205 }
240206 }