hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/net/core/sock_reuseport.c
....@@ -16,26 +16,23 @@
1616
1717 DEFINE_SPINLOCK(reuseport_lock);
1818
19
-#define REUSEPORT_MIN_ID 1
2019 static DEFINE_IDA(reuseport_ida);
2120
22
-int reuseport_get_id(struct sock_reuseport *reuse)
21
+void reuseport_has_conns_set(struct sock *sk)
2322 {
24
- int id;
23
+ struct sock_reuseport *reuse;
2524
26
- if (reuse->reuseport_id)
27
- return reuse->reuseport_id;
25
+ if (!rcu_access_pointer(sk->sk_reuseport_cb))
26
+ return;
2827
29
- id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0,
30
- /* Called under reuseport_lock */
31
- GFP_ATOMIC);
32
- if (id < 0)
33
- return id;
34
-
35
- reuse->reuseport_id = id;
36
-
37
- return reuse->reuseport_id;
28
+ spin_lock_bh(&reuseport_lock);
29
+ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
30
+ lockdep_is_held(&reuseport_lock));
31
+ if (likely(reuse))
32
+ reuse->has_conns = 1;
33
+ spin_unlock_bh(&reuseport_lock);
3834 }
35
+EXPORT_SYMBOL(reuseport_has_conns_set);
3936
4037 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
4138 {
....@@ -55,6 +52,7 @@
5552 int reuseport_alloc(struct sock *sk, bool bind_inany)
5653 {
5754 struct sock_reuseport *reuse;
55
+ int id, ret = 0;
5856
5957 /* bh lock used since this function call may precede hlist lock in
6058 * soft irq of receive path or setsockopt from process context
....@@ -78,10 +76,18 @@
7876
7977 reuse = __reuseport_alloc(INIT_SOCKS);
8078 if (!reuse) {
81
- spin_unlock_bh(&reuseport_lock);
82
- return -ENOMEM;
79
+ ret = -ENOMEM;
80
+ goto out;
8381 }
8482
83
+ id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
84
+ if (id < 0) {
85
+ kfree(reuse);
86
+ ret = id;
87
+ goto out;
88
+ }
89
+
90
+ reuse->reuseport_id = id;
8591 reuse->socks[0] = sk;
8692 reuse->num_socks = 1;
8793 reuse->bind_inany = bind_inany;
....@@ -90,7 +96,7 @@
9096 out:
9197 spin_unlock_bh(&reuseport_lock);
9298
93
- return 0;
99
+ return ret;
94100 }
95101 EXPORT_SYMBOL(reuseport_alloc);
96102
....@@ -107,7 +113,6 @@
107113 if (!more_reuse)
108114 return NULL;
109115
110
- more_reuse->max_socks = more_socks_size;
111116 more_reuse->num_socks = reuse->num_socks;
112117 more_reuse->prog = reuse->prog;
113118 more_reuse->reuseport_id = reuse->reuseport_id;
....@@ -136,8 +141,7 @@
136141
137142 reuse = container_of(head, struct sock_reuseport, rcu);
138143 sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
139
- if (reuse->reuseport_id)
140
- ida_simple_remove(&reuseport_ida, reuse->reuseport_id);
144
+ ida_free(&reuseport_ida, reuse->reuseport_id);
141145 kfree(reuse);
142146 }
143147
....@@ -145,6 +149,8 @@
145149 * reuseport_add_sock - Add a socket to the reuseport group of another.
146150 * @sk: New socket to add to the group.
147151 * @sk2: Socket belonging to the existing reuseport group.
152
+ * @bind_inany: Whether or not the group is bound to a local INANY address.
153
+ *
148154 * May return ENOMEM and not add socket to group under memory pressure.
149155 */
150156 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
....@@ -188,6 +194,7 @@
188194 call_rcu(&old_reuse->rcu, reuseport_free_rcu);
189195 return 0;
190196 }
197
+EXPORT_SYMBOL(reuseport_add_sock);
191198
192199 void reuseport_detach_sock(struct sock *sk)
193200 {
....@@ -198,12 +205,15 @@
198205 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
199206 lockdep_is_held(&reuseport_lock));
200207
201
- /* At least one of the sk in this reuseport group is added to
202
- * a bpf map. Notify the bpf side. The bpf map logic will
203
- * remove the sk if it is indeed added to a bpf map.
208
+ /* Notify the bpf side. The sk may be added to a sockarray
209
+ * map. If so, sockarray logic will remove it from the map.
210
+ *
211
+ * Other bpf map types that work with reuseport, like sockmap,
212
+ * don't need an explicit callback from here. They override sk
213
+ * unhash/close ops to remove the sk from the map before we
214
+ * get to this point.
204215 */
205
- if (reuse->reuseport_id)
206
- bpf_sk_reuseport_detach(sk);
216
+ bpf_sk_reuseport_detach(sk);
207217
208218 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
209219
....@@ -341,3 +351,27 @@
341351 return 0;
342352 }
343353 EXPORT_SYMBOL(reuseport_attach_prog);
354
+
355
+int reuseport_detach_prog(struct sock *sk)
356
+{
357
+ struct sock_reuseport *reuse;
358
+ struct bpf_prog *old_prog;
359
+
360
+ if (!rcu_access_pointer(sk->sk_reuseport_cb))
361
+ return sk->sk_reuseport ? -ENOENT : -EINVAL;
362
+
363
+ old_prog = NULL;
364
+ spin_lock_bh(&reuseport_lock);
365
+ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
366
+ lockdep_is_held(&reuseport_lock));
367
+ old_prog = rcu_replace_pointer(reuse->prog, old_prog,
368
+ lockdep_is_held(&reuseport_lock));
369
+ spin_unlock_bh(&reuseport_lock);
370
+
371
+ if (!old_prog)
372
+ return -ENOENT;
373
+
374
+ sk_reuseport_prog_free(old_prog);
375
+ return 0;
376
+}
377
+EXPORT_SYMBOL(reuseport_detach_prog);