.. | .. |
---|
15 | 15 | #include <linux/rcupdate.h> |
---|
16 | 16 | #include <linux/list.h> |
---|
17 | 17 | |
---|
| 18 | +static struct kmem_cache *peer_cache; |
---|
18 | 19 | static atomic64_t peer_counter = ATOMIC64_INIT(0); |
---|
19 | 20 | |
---|
20 | 21 | struct wg_peer *wg_peer_create(struct wg_device *wg, |
---|
.. | .. |
---|
29 | 30 | if (wg->num_peers >= MAX_PEERS_PER_DEVICE) |
---|
30 | 31 | return ERR_PTR(ret); |
---|
31 | 32 | |
---|
32 | | - peer = kzalloc(sizeof(*peer), GFP_KERNEL); |
---|
| 33 | + peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); |
---|
33 | 34 | if (unlikely(!peer)) |
---|
34 | 35 | return ERR_PTR(ret); |
---|
35 | | - peer->device = wg; |
---|
| 36 | + if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) |
---|
| 37 | + goto err; |
---|
36 | 38 | |
---|
| 39 | + peer->device = wg; |
---|
37 | 40 | wg_noise_handshake_init(&peer->handshake, &wg->static_identity, |
---|
38 | 41 | public_key, preshared_key, peer); |
---|
39 | | - if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) |
---|
40 | | - goto err_1; |
---|
41 | | - if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, |
---|
42 | | - MAX_QUEUED_PACKETS)) |
---|
43 | | - goto err_2; |
---|
44 | | - if (wg_packet_queue_init(&peer->rx_queue, NULL, false, |
---|
45 | | - MAX_QUEUED_PACKETS)) |
---|
46 | | - goto err_3; |
---|
47 | | - |
---|
48 | 42 | peer->internal_id = atomic64_inc_return(&peer_counter); |
---|
49 | 43 | peer->serial_work_cpu = nr_cpumask_bits; |
---|
50 | 44 | wg_cookie_init(&peer->latest_cookie); |
---|
51 | 45 | wg_timers_init(peer); |
---|
52 | 46 | wg_cookie_checker_precompute_peer_keys(peer); |
---|
53 | 47 | spin_lock_init(&peer->keypairs.keypair_update_lock); |
---|
54 | | - INIT_WORK(&peer->transmit_handshake_work, |
---|
55 | | - wg_packet_handshake_send_worker); |
---|
| 48 | + INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); |
---|
| 49 | + INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); |
---|
| 50 | + wg_prev_queue_init(&peer->tx_queue); |
---|
| 51 | + wg_prev_queue_init(&peer->rx_queue); |
---|
56 | 52 | rwlock_init(&peer->endpoint_lock); |
---|
57 | 53 | kref_init(&peer->refcount); |
---|
58 | 54 | skb_queue_head_init(&peer->staged_packet_queue); |
---|
.. | .. |
---|
68 | 64 | pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); |
---|
69 | 65 | return peer; |
---|
70 | 66 | |
---|
71 | | -err_3: |
---|
72 | | - wg_packet_queue_free(&peer->tx_queue, false); |
---|
73 | | -err_2: |
---|
74 | | - dst_cache_destroy(&peer->endpoint_cache); |
---|
75 | | -err_1: |
---|
76 | | - kfree(peer); |
---|
| 67 | +err: |
---|
| 68 | + kmem_cache_free(peer_cache, peer); |
---|
77 | 69 | return ERR_PTR(ret); |
---|
78 | 70 | } |
---|
79 | 71 | |
---|
.. | .. |
---|
97 | 89 | /* Mark as dead, so that we don't allow jumping contexts after. */ |
---|
98 | 90 | WRITE_ONCE(peer->is_dead, true); |
---|
99 | 91 | |
---|
100 | | - /* The caller must now synchronize_rcu() for this to take effect. */ |
---|
| 92 | + /* The caller must now synchronize_net() for this to take effect. */ |
---|
101 | 93 | } |
---|
102 | 94 | |
---|
103 | 95 | static void peer_remove_after_dead(struct wg_peer *peer) |
---|
.. | .. |
---|
169 | 161 | lockdep_assert_held(&peer->device->device_update_lock); |
---|
170 | 162 | |
---|
171 | 163 | peer_make_dead(peer); |
---|
172 | | - synchronize_rcu(); |
---|
| 164 | + synchronize_net(); |
---|
173 | 165 | peer_remove_after_dead(peer); |
---|
174 | 166 | } |
---|
175 | 167 | |
---|
.. | .. |
---|
187 | 179 | peer_make_dead(peer); |
---|
188 | 180 | list_add_tail(&peer->peer_list, &dead_peers); |
---|
189 | 181 | } |
---|
190 | | - synchronize_rcu(); |
---|
| 182 | + synchronize_net(); |
---|
191 | 183 | list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) |
---|
192 | 184 | peer_remove_after_dead(peer); |
---|
193 | 185 | } |
---|
.. | .. |
---|
197 | 189 | struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); |
---|
198 | 190 | |
---|
199 | 191 | dst_cache_destroy(&peer->endpoint_cache); |
---|
200 | | - wg_packet_queue_free(&peer->rx_queue, false); |
---|
201 | | - wg_packet_queue_free(&peer->tx_queue, false); |
---|
| 192 | + WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); |
---|
202 | 193 | |
---|
203 | 194 | /* The final zeroing takes care of clearing any remaining handshake key |
---|
204 | 195 | * material and other potentially sensitive information. |
---|
205 | 196 | */ |
---|
206 | | - kzfree(peer); |
---|
| 197 | + memzero_explicit(peer, sizeof(*peer)); |
---|
| 198 | + kmem_cache_free(peer_cache, peer); |
---|
207 | 199 | } |
---|
208 | 200 | |
---|
209 | 201 | static void kref_release(struct kref *refcount) |
---|
.. | .. |
---|
235 | 227 | return; |
---|
236 | 228 | kref_put(&peer->refcount, kref_release); |
---|
237 | 229 | } |
---|
| 230 | + |
---|
| 231 | +int __init wg_peer_init(void) |
---|
| 232 | +{ |
---|
| 233 | + peer_cache = KMEM_CACHE(wg_peer, 0); |
---|
| 234 | + return peer_cache ? 0 : -ENOMEM; |
---|
| 235 | +} |
---|
| 236 | + |
---|
| 237 | +void wg_peer_uninit(void) |
---|
| 238 | +{ |
---|
| 239 | + kmem_cache_destroy(peer_cache); |
---|
| 240 | +} |
---|