.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
---|
1 | 2 | /* |
---|
2 | 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion, |
---|
3 | 4 | * tiny version for non-preemptible single-CPU use. |
---|
4 | 5 | * |
---|
5 | | - * This program is free software; you can redistribute it and/or modify |
---|
6 | | - * it under the terms of the GNU General Public License as published by |
---|
7 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
8 | | - * (at your option) any later version. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, |
---|
11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
13 | | - * GNU General Public License for more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program; if not, you can access it online at |
---|
17 | | - * http://www.gnu.org/licenses/gpl-2.0.html. |
---|
18 | | - * |
---|
19 | 6 | * Copyright (C) IBM Corporation, 2017 |
---|
20 | 7 | * |
---|
21 | | - * Author: Paul McKenney <paulmck@us.ibm.com> |
---|
| 8 | + * Author: Paul McKenney <paulmck@linux.ibm.com> |
---|
22 | 9 | */ |
---|
23 | 10 | |
---|
24 | 11 | #include <linux/export.h> |
---|
.. | .. |
---|
34 | 21 | #include "rcu.h" |
---|
35 | 22 | |
---|
36 | 23 | int rcu_scheduler_active __read_mostly; |
---|
| 24 | +static LIST_HEAD(srcu_boot_list); |
---|
| 25 | +static bool srcu_init_done; |
---|
37 | 26 | |
---|
38 | | -static int init_srcu_struct_fields(struct srcu_struct *sp) |
---|
| 27 | +static int init_srcu_struct_fields(struct srcu_struct *ssp) |
---|
39 | 28 | { |
---|
40 | | - sp->srcu_lock_nesting[0] = 0; |
---|
41 | | - sp->srcu_lock_nesting[1] = 0; |
---|
42 | | - init_swait_queue_head(&sp->srcu_wq); |
---|
43 | | - sp->srcu_cb_head = NULL; |
---|
44 | | - sp->srcu_cb_tail = &sp->srcu_cb_head; |
---|
45 | | - sp->srcu_gp_running = false; |
---|
46 | | - sp->srcu_gp_waiting = false; |
---|
47 | | - sp->srcu_idx = 0; |
---|
48 | | - INIT_WORK(&sp->srcu_work, srcu_drive_gp); |
---|
| 29 | + ssp->srcu_lock_nesting[0] = 0; |
---|
| 30 | + ssp->srcu_lock_nesting[1] = 0; |
---|
| 31 | + init_swait_queue_head(&ssp->srcu_wq); |
---|
| 32 | + ssp->srcu_cb_head = NULL; |
---|
| 33 | + ssp->srcu_cb_tail = &ssp->srcu_cb_head; |
---|
| 34 | + ssp->srcu_gp_running = false; |
---|
| 35 | + ssp->srcu_gp_waiting = false; |
---|
| 36 | + ssp->srcu_idx = 0; |
---|
| 37 | + ssp->srcu_idx_max = 0; |
---|
| 38 | + INIT_WORK(&ssp->srcu_work, srcu_drive_gp); |
---|
| 39 | + INIT_LIST_HEAD(&ssp->srcu_work.entry); |
---|
49 | 40 | return 0; |
---|
50 | 41 | } |
---|
51 | 42 | |
---|
52 | 43 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
53 | 44 | |
---|
54 | | -int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
---|
| 45 | +int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
---|
55 | 46 | struct lock_class_key *key) |
---|
56 | 47 | { |
---|
57 | 48 | /* Don't re-initialize a lock while it is held. */ |
---|
58 | | - debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
---|
59 | | - lockdep_init_map(&sp->dep_map, name, key, 0); |
---|
60 | | - return init_srcu_struct_fields(sp); |
---|
| 49 | + debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
---|
| 50 | + lockdep_init_map(&ssp->dep_map, name, key, 0); |
---|
| 51 | + return init_srcu_struct_fields(ssp); |
---|
61 | 52 | } |
---|
62 | 53 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
---|
63 | 54 | |
---|
.. | .. |
---|
65 | 56 | |
---|
66 | 57 | /* |
---|
67 | 58 | * init_srcu_struct - initialize a sleep-RCU structure |
---|
68 | | - * @sp: structure to initialize. |
---|
| 59 | + * @ssp: structure to initialize. |
---|
69 | 60 | * |
---|
70 | 61 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
---|
71 | 62 | * to any other function. Each srcu_struct represents a separate domain |
---|
72 | 63 | * of SRCU protection. |
---|
73 | 64 | */ |
---|
74 | | -int init_srcu_struct(struct srcu_struct *sp) |
---|
| 65 | +int init_srcu_struct(struct srcu_struct *ssp) |
---|
75 | 66 | { |
---|
76 | | - return init_srcu_struct_fields(sp); |
---|
| 67 | + return init_srcu_struct_fields(ssp); |
---|
77 | 68 | } |
---|
78 | 69 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
---|
79 | 70 | |
---|
.. | .. |
---|
81 | 72 | |
---|
82 | 73 | /* |
---|
83 | 74 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
---|
84 | | - * @sp: structure to clean up. |
---|
| 75 | + * @ssp: structure to clean up. |
---|
85 | 76 | * |
---|
86 | 77 | * Must invoke this after you are finished using a given srcu_struct that |
---|
87 | 78 | * was initialized via init_srcu_struct(), else you leak memory. |
---|
88 | 79 | */ |
---|
89 | | -void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) |
---|
| 80 | +void cleanup_srcu_struct(struct srcu_struct *ssp) |
---|
90 | 81 | { |
---|
91 | | - WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]); |
---|
92 | | - if (quiesced) |
---|
93 | | - WARN_ON(work_pending(&sp->srcu_work)); |
---|
94 | | - else |
---|
95 | | - flush_work(&sp->srcu_work); |
---|
96 | | - WARN_ON(sp->srcu_gp_running); |
---|
97 | | - WARN_ON(sp->srcu_gp_waiting); |
---|
98 | | - WARN_ON(sp->srcu_cb_head); |
---|
99 | | - WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail); |
---|
| 82 | + WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]); |
---|
| 83 | + flush_work(&ssp->srcu_work); |
---|
| 84 | + WARN_ON(ssp->srcu_gp_running); |
---|
| 85 | + WARN_ON(ssp->srcu_gp_waiting); |
---|
| 86 | + WARN_ON(ssp->srcu_cb_head); |
---|
| 87 | + WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail); |
---|
| 88 | + WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max); |
---|
| 89 | + WARN_ON(ssp->srcu_idx & 0x1); |
---|
100 | 90 | } |
---|
101 | | -EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); |
---|
| 91 | +EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
---|
102 | 92 | |
---|
103 | 93 | /* |
---|
104 | 94 | * Removes the count for the old reader from the appropriate element of |
---|
105 | 95 | * the srcu_struct. |
---|
106 | 96 | */ |
---|
107 | | -void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
---|
| 97 | +void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
---|
108 | 98 | { |
---|
109 | | - int newval = sp->srcu_lock_nesting[idx] - 1; |
---|
| 99 | + int newval = ssp->srcu_lock_nesting[idx] - 1; |
---|
110 | 100 | |
---|
111 | | - WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); |
---|
112 | | - if (!newval && READ_ONCE(sp->srcu_gp_waiting)) |
---|
113 | | - swake_up_one(&sp->srcu_wq); |
---|
| 101 | + WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); |
---|
| 102 | + if (!newval && READ_ONCE(ssp->srcu_gp_waiting)) |
---|
| 103 | + swake_up_one(&ssp->srcu_wq); |
---|
114 | 104 | } |
---|
115 | 105 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
---|
116 | 106 | |
---|
117 | 107 | /* |
---|
118 | 108 | * Workqueue handler to drive one grace period and invoke any callbacks |
---|
119 | | - * that become ready as a result. Single-CPU and !PREEMPT operation |
---|
| 109 | + * that become ready as a result. Single-CPU and !PREEMPTION operation |
---|
120 | 110 | * means that we get away with murder on synchronization. ;-) |
---|
121 | 111 | */ |
---|
122 | 112 | void srcu_drive_gp(struct work_struct *wp) |
---|
.. | .. |
---|
124 | 114 | int idx; |
---|
125 | 115 | struct rcu_head *lh; |
---|
126 | 116 | struct rcu_head *rhp; |
---|
127 | | - struct srcu_struct *sp; |
---|
| 117 | + struct srcu_struct *ssp; |
---|
128 | 118 | |
---|
129 | | - sp = container_of(wp, struct srcu_struct, srcu_work); |
---|
130 | | - if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head)) |
---|
| 119 | + ssp = container_of(wp, struct srcu_struct, srcu_work); |
---|
| 120 | + if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) |
---|
131 | 121 | return; /* Already running or nothing to do. */ |
---|
132 | 122 | |
---|
133 | 123 | /* Remove recently arrived callbacks and wait for readers. */ |
---|
134 | | - WRITE_ONCE(sp->srcu_gp_running, true); |
---|
| 124 | + WRITE_ONCE(ssp->srcu_gp_running, true); |
---|
135 | 125 | local_irq_disable(); |
---|
136 | | - lh = sp->srcu_cb_head; |
---|
137 | | - sp->srcu_cb_head = NULL; |
---|
138 | | - sp->srcu_cb_tail = &sp->srcu_cb_head; |
---|
| 126 | + lh = ssp->srcu_cb_head; |
---|
| 127 | + ssp->srcu_cb_head = NULL; |
---|
| 128 | + ssp->srcu_cb_tail = &ssp->srcu_cb_head; |
---|
139 | 129 | local_irq_enable(); |
---|
140 | | - idx = sp->srcu_idx; |
---|
141 | | - WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); |
---|
142 | | - WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ |
---|
143 | | - swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); |
---|
144 | | - WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ |
---|
| 130 | + idx = (ssp->srcu_idx & 0x2) / 2; |
---|
| 131 | + WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
---|
| 132 | + WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ |
---|
| 133 | + swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); |
---|
| 134 | + WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ |
---|
| 135 | + WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
---|
145 | 136 | |
---|
146 | 137 | /* Invoke the callbacks we removed above. */ |
---|
147 | 138 | while (lh) { |
---|
.. | .. |
---|
158 | 149 | * at interrupt level, but the ->srcu_gp_running checks will |
---|
159 | 150 | * straighten that out. |
---|
160 | 151 | */ |
---|
161 | | - WRITE_ONCE(sp->srcu_gp_running, false); |
---|
162 | | - if (READ_ONCE(sp->srcu_cb_head)) |
---|
163 | | - schedule_work(&sp->srcu_work); |
---|
| 152 | + WRITE_ONCE(ssp->srcu_gp_running, false); |
---|
| 153 | + if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) |
---|
| 154 | + schedule_work(&ssp->srcu_work); |
---|
164 | 155 | } |
---|
165 | 156 | EXPORT_SYMBOL_GPL(srcu_drive_gp); |
---|
| 157 | + |
---|
| 158 | +static void srcu_gp_start_if_needed(struct srcu_struct *ssp) |
---|
| 159 | +{ |
---|
| 160 | + unsigned short cookie; |
---|
| 161 | + |
---|
| 162 | + cookie = get_state_synchronize_srcu(ssp); |
---|
| 163 | + if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) |
---|
| 164 | + return; |
---|
| 165 | + WRITE_ONCE(ssp->srcu_idx_max, cookie); |
---|
| 166 | + if (!READ_ONCE(ssp->srcu_gp_running)) { |
---|
| 167 | + if (likely(srcu_init_done)) |
---|
| 168 | + schedule_work(&ssp->srcu_work); |
---|
| 169 | + else if (list_empty(&ssp->srcu_work.entry)) |
---|
| 170 | + list_add(&ssp->srcu_work.entry, &srcu_boot_list); |
---|
| 171 | + } |
---|
| 172 | +} |
---|
166 | 173 | |
---|
167 | 174 | /* |
---|
168 | 175 | * Enqueue an SRCU callback on the specified srcu_struct structure, |
---|
169 | 176 | * initiating grace-period processing if it is not already running. |
---|
170 | 177 | */ |
---|
171 | | -void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
---|
| 178 | +void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
---|
172 | 179 | rcu_callback_t func) |
---|
173 | 180 | { |
---|
174 | 181 | unsigned long flags; |
---|
.. | .. |
---|
176 | 183 | rhp->func = func; |
---|
177 | 184 | rhp->next = NULL; |
---|
178 | 185 | local_irq_save(flags); |
---|
179 | | - *sp->srcu_cb_tail = rhp; |
---|
180 | | - sp->srcu_cb_tail = &rhp->next; |
---|
| 186 | + *ssp->srcu_cb_tail = rhp; |
---|
| 187 | + ssp->srcu_cb_tail = &rhp->next; |
---|
181 | 188 | local_irq_restore(flags); |
---|
182 | | - if (!READ_ONCE(sp->srcu_gp_running)) |
---|
183 | | - schedule_work(&sp->srcu_work); |
---|
| 189 | + srcu_gp_start_if_needed(ssp); |
---|
184 | 190 | } |
---|
185 | 191 | EXPORT_SYMBOL_GPL(call_srcu); |
---|
186 | 192 | |
---|
187 | 193 | /* |
---|
188 | 194 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
---|
189 | 195 | */ |
---|
190 | | -void synchronize_srcu(struct srcu_struct *sp) |
---|
| 196 | +void synchronize_srcu(struct srcu_struct *ssp) |
---|
191 | 197 | { |
---|
192 | 198 | struct rcu_synchronize rs; |
---|
193 | 199 | |
---|
194 | 200 | init_rcu_head_on_stack(&rs.head); |
---|
195 | 201 | init_completion(&rs.completion); |
---|
196 | | - call_srcu(sp, &rs.head, wakeme_after_rcu); |
---|
| 202 | + call_srcu(ssp, &rs.head, wakeme_after_rcu); |
---|
197 | 203 | wait_for_completion(&rs.completion); |
---|
198 | 204 | destroy_rcu_head_on_stack(&rs.head); |
---|
199 | 205 | } |
---|
200 | 206 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
---|
201 | 207 | |
---|
| 208 | +/* |
---|
| 209 | + * get_state_synchronize_srcu - Provide an end-of-grace-period cookie |
---|
| 210 | + */ |
---|
| 211 | +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) |
---|
| 212 | +{ |
---|
| 213 | + unsigned long ret; |
---|
| 214 | + |
---|
| 215 | + barrier(); |
---|
| 216 | + ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; |
---|
| 217 | + barrier(); |
---|
| 218 | + return ret & USHRT_MAX; |
---|
| 219 | +} |
---|
| 220 | +EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); |
---|
| 221 | + |
---|
| 222 | +/* |
---|
| 223 | + * start_poll_synchronize_srcu - Provide cookie and start grace period |
---|
| 224 | + * |
---|
| 225 | + * The difference between this and get_state_synchronize_srcu() is that |
---|
| 226 | + * this function ensures that the poll_state_synchronize_srcu() will |
---|
| 227 | + * eventually return the value true. |
---|
| 228 | + */ |
---|
| 229 | +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) |
---|
| 230 | +{ |
---|
| 231 | + unsigned long ret = get_state_synchronize_srcu(ssp); |
---|
| 232 | + |
---|
| 233 | + srcu_gp_start_if_needed(ssp); |
---|
| 234 | + return ret; |
---|
| 235 | +} |
---|
| 236 | +EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); |
---|
| 237 | + |
---|
| 238 | +/* |
---|
| 239 | + * poll_state_synchronize_srcu - Has cookie's grace period ended? |
---|
| 240 | + */ |
---|
| 241 | +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) |
---|
| 242 | +{ |
---|
| 243 | + bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie); |
---|
| 244 | + |
---|
| 245 | + barrier(); |
---|
| 246 | + return ret; |
---|
| 247 | +} |
---|
| 248 | +EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); |
---|
| 249 | + |
---|
202 | 250 | /* Lockdep diagnostics. */ |
---|
203 | 251 | void __init rcu_scheduler_starting(void) |
---|
204 | 252 | { |
---|
205 | 253 | rcu_scheduler_active = RCU_SCHEDULER_RUNNING; |
---|
206 | 254 | } |
---|
| 255 | + |
---|
| 256 | +/* |
---|
| 257 | + * Queue work for srcu_struct structures with early boot callbacks. |
---|
| 258 | + * The work won't actually execute until the workqueue initialization |
---|
| 259 | + * phase that takes place after the scheduler starts. |
---|
| 260 | + */ |
---|
| 261 | +void __init srcu_init(void) |
---|
| 262 | +{ |
---|
| 263 | + struct srcu_struct *ssp; |
---|
| 264 | + |
---|
| 265 | + srcu_init_done = true; |
---|
| 266 | + while (!list_empty(&srcu_boot_list)) { |
---|
| 267 | + ssp = list_first_entry(&srcu_boot_list, |
---|
| 268 | + struct srcu_struct, srcu_work.entry); |
---|
| 269 | + list_del_init(&ssp->srcu_work.entry); |
---|
| 270 | + schedule_work(&ssp->srcu_work); |
---|
| 271 | + } |
---|
| 272 | +} |
---|