.. | .. |
---|
6 | 6 | #include "allowedips.h" |
---|
7 | 7 | #include "peer.h" |
---|
8 | 8 | |
---|
| 9 | +enum { MAX_ALLOWEDIPS_DEPTH = 129 }; |
---|
| 10 | + |
---|
| 11 | +static struct kmem_cache *node_cache; |
---|
| 12 | + |
---|
9 | 13 | static void swap_endian(u8 *dst, const u8 *src, u8 bits) |
---|
10 | 14 | { |
---|
11 | 15 | if (bits == 32) { |
---|
.. | .. |
---|
28 | 32 | node->bitlen = bits; |
---|
29 | 33 | memcpy(node->bits, src, bits / 8U); |
---|
30 | 34 | } |
---|
31 | | -#define CHOOSE_NODE(parent, key) \ |
---|
32 | | - parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] |
---|
| 35 | + |
---|
| 36 | +static inline u8 choose(struct allowedips_node *node, const u8 *key) |
---|
| 37 | +{ |
---|
| 38 | + return (key[node->bit_at_a] >> node->bit_at_b) & 1; |
---|
| 39 | +} |
---|
33 | 40 | |
---|
34 | 41 | static void push_rcu(struct allowedips_node **stack, |
---|
35 | 42 | struct allowedips_node __rcu *p, unsigned int *len) |
---|
36 | 43 | { |
---|
37 | 44 | if (rcu_access_pointer(p)) { |
---|
38 | | - WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); |
---|
| 45 | + if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH)) |
---|
| 46 | + return; |
---|
39 | 47 | stack[(*len)++] = rcu_dereference_raw(p); |
---|
40 | 48 | } |
---|
41 | 49 | } |
---|
42 | 50 | |
---|
| 51 | +static void node_free_rcu(struct rcu_head *rcu) |
---|
| 52 | +{ |
---|
| 53 | + kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); |
---|
| 54 | +} |
---|
| 55 | + |
---|
43 | 56 | static void root_free_rcu(struct rcu_head *rcu) |
---|
44 | 57 | { |
---|
45 | | - struct allowedips_node *node, *stack[128] = { |
---|
| 58 | + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { |
---|
46 | 59 | container_of(rcu, struct allowedips_node, rcu) }; |
---|
47 | 60 | unsigned int len = 1; |
---|
48 | 61 | |
---|
49 | 62 | while (len > 0 && (node = stack[--len])) { |
---|
50 | 63 | push_rcu(stack, node->bit[0], &len); |
---|
51 | 64 | push_rcu(stack, node->bit[1], &len); |
---|
52 | | - kfree(node); |
---|
| 65 | + kmem_cache_free(node_cache, node); |
---|
53 | 66 | } |
---|
54 | 67 | } |
---|
55 | 68 | |
---|
56 | 69 | static void root_remove_peer_lists(struct allowedips_node *root) |
---|
57 | 70 | { |
---|
58 | | - struct allowedips_node *node, *stack[128] = { root }; |
---|
| 71 | + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root }; |
---|
59 | 72 | unsigned int len = 1; |
---|
60 | 73 | |
---|
61 | 74 | while (len > 0 && (node = stack[--len])) { |
---|
.. | .. |
---|
64 | 77 | if (rcu_access_pointer(node->peer)) |
---|
65 | 78 | list_del(&node->peer_list); |
---|
66 | 79 | } |
---|
67 | | -} |
---|
68 | | - |
---|
69 | | -static void walk_remove_by_peer(struct allowedips_node __rcu **top, |
---|
70 | | - struct wg_peer *peer, struct mutex *lock) |
---|
71 | | -{ |
---|
72 | | -#define REF(p) rcu_access_pointer(p) |
---|
73 | | -#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) |
---|
74 | | -#define PUSH(p) ({ \ |
---|
75 | | - WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ |
---|
76 | | - stack[len++] = p; \ |
---|
77 | | - }) |
---|
78 | | - |
---|
79 | | - struct allowedips_node __rcu **stack[128], **nptr; |
---|
80 | | - struct allowedips_node *node, *prev; |
---|
81 | | - unsigned int len; |
---|
82 | | - |
---|
83 | | - if (unlikely(!peer || !REF(*top))) |
---|
84 | | - return; |
---|
85 | | - |
---|
86 | | - for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { |
---|
87 | | - nptr = stack[len - 1]; |
---|
88 | | - node = DEREF(nptr); |
---|
89 | | - if (!node) { |
---|
90 | | - --len; |
---|
91 | | - continue; |
---|
92 | | - } |
---|
93 | | - if (!prev || REF(prev->bit[0]) == node || |
---|
94 | | - REF(prev->bit[1]) == node) { |
---|
95 | | - if (REF(node->bit[0])) |
---|
96 | | - PUSH(&node->bit[0]); |
---|
97 | | - else if (REF(node->bit[1])) |
---|
98 | | - PUSH(&node->bit[1]); |
---|
99 | | - } else if (REF(node->bit[0]) == prev) { |
---|
100 | | - if (REF(node->bit[1])) |
---|
101 | | - PUSH(&node->bit[1]); |
---|
102 | | - } else { |
---|
103 | | - if (rcu_dereference_protected(node->peer, |
---|
104 | | - lockdep_is_held(lock)) == peer) { |
---|
105 | | - RCU_INIT_POINTER(node->peer, NULL); |
---|
106 | | - list_del_init(&node->peer_list); |
---|
107 | | - if (!node->bit[0] || !node->bit[1]) { |
---|
108 | | - rcu_assign_pointer(*nptr, DEREF( |
---|
109 | | - &node->bit[!REF(node->bit[0])])); |
---|
110 | | - kfree_rcu(node, rcu); |
---|
111 | | - node = DEREF(nptr); |
---|
112 | | - } |
---|
113 | | - } |
---|
114 | | - --len; |
---|
115 | | - } |
---|
116 | | - } |
---|
117 | | - |
---|
118 | | -#undef REF |
---|
119 | | -#undef DEREF |
---|
120 | | -#undef PUSH |
---|
121 | 80 | } |
---|
122 | 81 | |
---|
123 | 82 | static unsigned int fls128(u64 a, u64 b) |
---|
.. | .. |
---|
159 | 118 | found = node; |
---|
160 | 119 | if (node->cidr == bits) |
---|
161 | 120 | break; |
---|
162 | | - node = rcu_dereference_bh(CHOOSE_NODE(node, key)); |
---|
| 121 | + node = rcu_dereference_bh(node->bit[choose(node, key)]); |
---|
163 | 122 | } |
---|
164 | 123 | return found; |
---|
165 | 124 | } |
---|
.. | .. |
---|
191 | 150 | u8 cidr, u8 bits, struct allowedips_node **rnode, |
---|
192 | 151 | struct mutex *lock) |
---|
193 | 152 | { |
---|
194 | | - struct allowedips_node *node = rcu_dereference_protected(trie, |
---|
195 | | - lockdep_is_held(lock)); |
---|
| 153 | + struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); |
---|
196 | 154 | struct allowedips_node *parent = NULL; |
---|
197 | 155 | bool exact = false; |
---|
198 | 156 | |
---|
.. | .. |
---|
202 | 160 | exact = true; |
---|
203 | 161 | break; |
---|
204 | 162 | } |
---|
205 | | - node = rcu_dereference_protected(CHOOSE_NODE(parent, key), |
---|
206 | | - lockdep_is_held(lock)); |
---|
| 163 | + node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); |
---|
207 | 164 | } |
---|
208 | 165 | *rnode = parent; |
---|
209 | 166 | return exact; |
---|
| 167 | +} |
---|
| 168 | + |
---|
| 169 | +static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node) |
---|
| 170 | +{ |
---|
| 171 | + node->parent_bit_packed = (unsigned long)parent | bit; |
---|
| 172 | + rcu_assign_pointer(*parent, node); |
---|
| 173 | +} |
---|
| 174 | + |
---|
| 175 | +static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) |
---|
| 176 | +{ |
---|
| 177 | + u8 bit = choose(parent, node->bits); |
---|
| 178 | + connect_node(&parent->bit[bit], bit, node); |
---|
210 | 179 | } |
---|
211 | 180 | |
---|
212 | 181 | static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, |
---|
.. | .. |
---|
218 | 187 | return -EINVAL; |
---|
219 | 188 | |
---|
220 | 189 | if (!rcu_access_pointer(*trie)) { |
---|
221 | | - node = kzalloc(sizeof(*node), GFP_KERNEL); |
---|
| 190 | + node = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
---|
222 | 191 | if (unlikely(!node)) |
---|
223 | 192 | return -ENOMEM; |
---|
224 | 193 | RCU_INIT_POINTER(node->peer, peer); |
---|
225 | 194 | list_add_tail(&node->peer_list, &peer->allowedips_list); |
---|
226 | 195 | copy_and_assign_cidr(node, key, cidr, bits); |
---|
227 | | - rcu_assign_pointer(*trie, node); |
---|
| 196 | + connect_node(trie, 2, node); |
---|
228 | 197 | return 0; |
---|
229 | 198 | } |
---|
230 | 199 | if (node_placement(*trie, key, cidr, bits, &node, lock)) { |
---|
.. | .. |
---|
233 | 202 | return 0; |
---|
234 | 203 | } |
---|
235 | 204 | |
---|
236 | | - newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); |
---|
| 205 | + newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
---|
237 | 206 | if (unlikely(!newnode)) |
---|
238 | 207 | return -ENOMEM; |
---|
239 | 208 | RCU_INIT_POINTER(newnode->peer, peer); |
---|
.. | .. |
---|
243 | 212 | if (!node) { |
---|
244 | 213 | down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); |
---|
245 | 214 | } else { |
---|
246 | | - down = rcu_dereference_protected(CHOOSE_NODE(node, key), |
---|
247 | | - lockdep_is_held(lock)); |
---|
| 215 | + const u8 bit = choose(node, key); |
---|
| 216 | + down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); |
---|
248 | 217 | if (!down) { |
---|
249 | | - rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); |
---|
| 218 | + connect_node(&node->bit[bit], bit, newnode); |
---|
250 | 219 | return 0; |
---|
251 | 220 | } |
---|
252 | 221 | } |
---|
.. | .. |
---|
254 | 223 | parent = node; |
---|
255 | 224 | |
---|
256 | 225 | if (newnode->cidr == cidr) { |
---|
257 | | - rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); |
---|
| 226 | + choose_and_connect_node(newnode, down); |
---|
258 | 227 | if (!parent) |
---|
259 | | - rcu_assign_pointer(*trie, newnode); |
---|
| 228 | + connect_node(trie, 2, newnode); |
---|
260 | 229 | else |
---|
261 | | - rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), |
---|
262 | | - newnode); |
---|
263 | | - } else { |
---|
264 | | - node = kzalloc(sizeof(*node), GFP_KERNEL); |
---|
265 | | - if (unlikely(!node)) { |
---|
266 | | - list_del(&newnode->peer_list); |
---|
267 | | - kfree(newnode); |
---|
268 | | - return -ENOMEM; |
---|
269 | | - } |
---|
270 | | - INIT_LIST_HEAD(&node->peer_list); |
---|
271 | | - copy_and_assign_cidr(node, newnode->bits, cidr, bits); |
---|
272 | | - |
---|
273 | | - rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); |
---|
274 | | - rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); |
---|
275 | | - if (!parent) |
---|
276 | | - rcu_assign_pointer(*trie, node); |
---|
277 | | - else |
---|
278 | | - rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), |
---|
279 | | - node); |
---|
| 230 | + choose_and_connect_node(parent, newnode); |
---|
| 231 | + return 0; |
---|
280 | 232 | } |
---|
| 233 | + |
---|
| 234 | + node = kmem_cache_zalloc(node_cache, GFP_KERNEL); |
---|
| 235 | + if (unlikely(!node)) { |
---|
| 236 | + list_del(&newnode->peer_list); |
---|
| 237 | + kmem_cache_free(node_cache, newnode); |
---|
| 238 | + return -ENOMEM; |
---|
| 239 | + } |
---|
| 240 | + INIT_LIST_HEAD(&node->peer_list); |
---|
| 241 | + copy_and_assign_cidr(node, newnode->bits, cidr, bits); |
---|
| 242 | + |
---|
| 243 | + choose_and_connect_node(node, down); |
---|
| 244 | + choose_and_connect_node(node, newnode); |
---|
| 245 | + if (!parent) |
---|
| 246 | + connect_node(trie, 2, node); |
---|
| 247 | + else |
---|
| 248 | + choose_and_connect_node(parent, node); |
---|
281 | 249 | return 0; |
---|
282 | 250 | } |
---|
283 | 251 | |
---|
.. | .. |
---|
335 | 303 | void wg_allowedips_remove_by_peer(struct allowedips *table, |
---|
336 | 304 | struct wg_peer *peer, struct mutex *lock) |
---|
337 | 305 | { |
---|
| 306 | + struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; |
---|
| 307 | + bool free_parent; |
---|
| 308 | + |
---|
| 309 | + if (list_empty(&peer->allowedips_list)) |
---|
| 310 | + return; |
---|
338 | 311 | ++table->seq; |
---|
339 | | - walk_remove_by_peer(&table->root4, peer, lock); |
---|
340 | | - walk_remove_by_peer(&table->root6, peer, lock); |
---|
| 312 | + list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { |
---|
| 313 | + list_del_init(&node->peer_list); |
---|
| 314 | + RCU_INIT_POINTER(node->peer, NULL); |
---|
| 315 | + if (node->bit[0] && node->bit[1]) |
---|
| 316 | + continue; |
---|
| 317 | + child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], |
---|
| 318 | + lockdep_is_held(lock)); |
---|
| 319 | + if (child) |
---|
| 320 | + child->parent_bit_packed = node->parent_bit_packed; |
---|
| 321 | + parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); |
---|
| 322 | + *parent_bit = child; |
---|
| 323 | + parent = (void *)parent_bit - |
---|
| 324 | + offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); |
---|
| 325 | + free_parent = !rcu_access_pointer(node->bit[0]) && |
---|
| 326 | + !rcu_access_pointer(node->bit[1]) && |
---|
| 327 | + (node->parent_bit_packed & 3) <= 1 && |
---|
| 328 | + !rcu_access_pointer(parent->peer); |
---|
| 329 | + if (free_parent) |
---|
| 330 | + child = rcu_dereference_protected( |
---|
| 331 | + parent->bit[!(node->parent_bit_packed & 1)], |
---|
| 332 | + lockdep_is_held(lock)); |
---|
| 333 | + call_rcu(&node->rcu, node_free_rcu); |
---|
| 334 | + if (!free_parent) |
---|
| 335 | + continue; |
---|
| 336 | + if (child) |
---|
| 337 | + child->parent_bit_packed = parent->parent_bit_packed; |
---|
| 338 | + *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; |
---|
| 339 | + call_rcu(&parent->rcu, node_free_rcu); |
---|
| 340 | + } |
---|
341 | 341 | } |
---|
342 | 342 | |
---|
343 | 343 | int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) |
---|
.. | .. |
---|
374 | 374 | return NULL; |
---|
375 | 375 | } |
---|
376 | 376 | |
---|
| 377 | +int __init wg_allowedips_slab_init(void) |
---|
| 378 | +{ |
---|
| 379 | + node_cache = KMEM_CACHE(allowedips_node, 0); |
---|
| 380 | + return node_cache ? 0 : -ENOMEM; |
---|
| 381 | +} |
---|
| 382 | + |
---|
| 383 | +void wg_allowedips_slab_uninit(void) |
---|
| 384 | +{ |
---|
| 385 | + rcu_barrier(); |
---|
| 386 | + kmem_cache_destroy(node_cache); |
---|
| 387 | +} |
---|
| 388 | + |
---|
377 | 389 | #include "selftest/allowedips.c" |
---|