forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/infiniband/core/netlink.c
....@@ -36,27 +36,31 @@
3636 #include <linux/export.h>
3737 #include <net/netlink.h>
3838 #include <net/net_namespace.h>
39
+#include <net/netns/generic.h>
3940 #include <net/sock.h>
4041 #include <rdma/rdma_netlink.h>
4142 #include <linux/module.h>
4243 #include "core_priv.h"
4344
44
-static DEFINE_MUTEX(rdma_nl_mutex);
45
-static struct sock *nls;
4645 static struct {
47
- const struct rdma_nl_cbs *cb_table;
46
+ const struct rdma_nl_cbs *cb_table;
47
+ /* Synchronizes between ongoing netlink commands and netlink client
48
+ * unregistration.
49
+ */
50
+ struct rw_semaphore sem;
4851 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
4952
50
-int rdma_nl_chk_listeners(unsigned int group)
53
+bool rdma_nl_chk_listeners(unsigned int group)
5154 {
52
- return (netlink_has_listeners(nls, group)) ? 0 : -1;
55
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(&init_net);
56
+
57
+ return netlink_has_listeners(rnet->nl_sock, group);
5358 }
5459 EXPORT_SYMBOL(rdma_nl_chk_listeners);
5560
5661 static bool is_nl_msg_valid(unsigned int type, unsigned int op)
5762 {
5863 static const unsigned int max_num_ops[RDMA_NL_NUM_CLIENTS] = {
59
- [RDMA_NL_RDMA_CM] = RDMA_NL_RDMA_CM_NUM_OPS,
6064 [RDMA_NL_IWCM] = RDMA_NL_IWPM_NUM_OPS,
6165 [RDMA_NL_LS] = RDMA_NL_LS_NUM_OPS,
6266 [RDMA_NL_NLDEV] = RDMA_NLDEV_NUM_OPS,
....@@ -74,62 +78,53 @@
7478 return (op < max_num_ops[type]) ? true : false;
7579 }
7680
77
-static bool is_nl_valid(unsigned int type, unsigned int op)
81
+static const struct rdma_nl_cbs *
82
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
7883 {
7984 const struct rdma_nl_cbs *cb_table;
8085
81
- if (!is_nl_msg_valid(type, op))
82
- return false;
86
+ /*
87
+ * Currently only NLDEV client is supporting netlink commands in
88
+ * non init_net net namespace.
89
+ */
90
+ if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
91
+ return NULL;
8392
84
- if (!rdma_nl_types[type].cb_table) {
85
- mutex_unlock(&rdma_nl_mutex);
93
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
94
+ if (!cb_table) {
95
+ /*
96
+ * Didn't get valid reference of the table, attempt module
97
+ * load once.
98
+ */
99
+ up_read(&rdma_nl_types[type].sem);
100
+
86101 request_module("rdma-netlink-subsys-%d", type);
87
- mutex_lock(&rdma_nl_mutex);
102
+
103
+ down_read(&rdma_nl_types[type].sem);
104
+ cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
88105 }
89
-
90
- cb_table = rdma_nl_types[type].cb_table;
91
-
92106 if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
93
- return false;
94
- return true;
107
+ return NULL;
108
+ return cb_table;
95109 }
96110
97111 void rdma_nl_register(unsigned int index,
98112 const struct rdma_nl_cbs cb_table[])
99113 {
100
- mutex_lock(&rdma_nl_mutex);
101
- if (!is_nl_msg_valid(index, 0)) {
102
- /*
103
- * All clients are not interesting in success/failure of
104
- * this call. They want to see the print to error log and
105
- * continue their initialization. Print warning for them,
106
- * because it is programmer's error to be here.
107
- */
108
- mutex_unlock(&rdma_nl_mutex);
109
- WARN(true,
110
- "The not-valid %u index was supplied to RDMA netlink\n",
111
- index);
114
+ if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
115
+ WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
112116 return;
113
- }
114117
115
- if (rdma_nl_types[index].cb_table) {
116
- mutex_unlock(&rdma_nl_mutex);
117
- WARN(true,
118
- "The %u index is already registered in RDMA netlink\n",
119
- index);
120
- return;
121
- }
122
-
123
- rdma_nl_types[index].cb_table = cb_table;
124
- mutex_unlock(&rdma_nl_mutex);
118
+ /* Pairs with the READ_ONCE in is_nl_valid() */
119
+ smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
125120 }
126121 EXPORT_SYMBOL(rdma_nl_register);
127122
128123 void rdma_nl_unregister(unsigned int index)
129124 {
130
- mutex_lock(&rdma_nl_mutex);
125
+ down_write(&rdma_nl_types[index].sem);
131126 rdma_nl_types[index].cb_table = NULL;
132
- mutex_unlock(&rdma_nl_mutex);
127
+ up_write(&rdma_nl_types[index].sem);
133128 }
134129 EXPORT_SYMBOL(rdma_nl_unregister);
135130
....@@ -161,15 +156,21 @@
161156 unsigned int index = RDMA_NL_GET_CLIENT(type);
162157 unsigned int op = RDMA_NL_GET_OP(type);
163158 const struct rdma_nl_cbs *cb_table;
159
+ int err = -EINVAL;
164160
165
- if (!is_nl_valid(index, op))
161
+ if (!is_nl_msg_valid(index, op))
166162 return -EINVAL;
167163
168
- cb_table = rdma_nl_types[index].cb_table;
164
+ down_read(&rdma_nl_types[index].sem);
165
+ cb_table = get_cb_table(skb, index, op);
166
+ if (!cb_table)
167
+ goto done;
169168
170169 if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
171
- !netlink_capable(skb, CAP_NET_ADMIN))
172
- return -EPERM;
170
+ !netlink_capable(skb, CAP_NET_ADMIN)) {
171
+ err = -EPERM;
172
+ goto done;
173
+ }
173174
174175 /*
175176 * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
....@@ -177,24 +178,24 @@
177178 */
178179 if (index == RDMA_NL_LS) {
179180 if (cb_table[op].doit)
180
- return cb_table[op].doit(skb, nlh, extack);
181
- return -EINVAL;
181
+ err = cb_table[op].doit(skb, nlh, extack);
182
+ goto done;
182183 }
183184 /* FIXME: Convert IWCM to properly handle doit callbacks */
184
- if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
185
- index == RDMA_NL_IWCM) {
185
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
186186 struct netlink_dump_control c = {
187187 .dump = cb_table[op].dump,
188188 };
189189 if (c.dump)
190
- return netlink_dump_start(nls, skb, nlh, &c);
191
- return -EINVAL;
190
+ err = netlink_dump_start(skb->sk, skb, nlh, &c);
191
+ goto done;
192192 }
193193
194194 if (cb_table[op].doit)
195
- return cb_table[op].doit(skb, nlh, extack);
196
-
197
- return 0;
195
+ err = cb_table[op].doit(skb, nlh, extack);
196
+done:
197
+ up_read(&rdma_nl_types[index].sem);
198
+ return err;
198199 }
199200
200201 /*
....@@ -255,47 +256,44 @@
255256
256257 static void rdma_nl_rcv(struct sk_buff *skb)
257258 {
258
- mutex_lock(&rdma_nl_mutex);
259259 rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
260
- mutex_unlock(&rdma_nl_mutex);
261260 }
262261
263
-int rdma_nl_unicast(struct sk_buff *skb, u32 pid)
262
+int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
264263 {
264
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
265265 int err;
266266
267
- err = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
267
+ err = netlink_unicast(rnet->nl_sock, skb, pid, MSG_DONTWAIT);
268268 return (err < 0) ? err : 0;
269269 }
270270 EXPORT_SYMBOL(rdma_nl_unicast);
271271
272
-int rdma_nl_unicast_wait(struct sk_buff *skb, __u32 pid)
272
+int rdma_nl_unicast_wait(struct net *net, struct sk_buff *skb, __u32 pid)
273273 {
274
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
274275 int err;
275276
276
- err = netlink_unicast(nls, skb, pid, 0);
277
+ err = netlink_unicast(rnet->nl_sock, skb, pid, 0);
277278 return (err < 0) ? err : 0;
278279 }
279280 EXPORT_SYMBOL(rdma_nl_unicast_wait);
280281
281
-int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags)
282
+int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
283
+ unsigned int group, gfp_t flags)
282284 {
283
- return nlmsg_multicast(nls, skb, 0, group, flags);
285
+ struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
286
+
287
+ return nlmsg_multicast(rnet->nl_sock, skb, 0, group, flags);
284288 }
285289 EXPORT_SYMBOL(rdma_nl_multicast);
286290
287
-int __init rdma_nl_init(void)
291
+void rdma_nl_init(void)
288292 {
289
- struct netlink_kernel_cfg cfg = {
290
- .input = rdma_nl_rcv,
291
- };
293
+ int idx;
292294
293
- nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg);
294
- if (!nls)
295
- return -ENOMEM;
296
-
297
- nls->sk_sndtimeo = 10 * HZ;
298
- return 0;
295
+ for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
296
+ init_rwsem(&rdma_nl_types[idx].sem);
299297 }
300298
301299 void rdma_nl_exit(void)
....@@ -303,9 +301,31 @@
303301 int idx;
304302
305303 for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
306
- rdma_nl_unregister(idx);
304
+ WARN(rdma_nl_types[idx].cb_table,
305
+ "Netlink client %d wasn't released prior to unloading %s\n",
306
+ idx, KBUILD_MODNAME);
307
+}
307308
308
- netlink_kernel_release(nls);
309
+int rdma_nl_net_init(struct rdma_dev_net *rnet)
310
+{
311
+ struct net *net = read_pnet(&rnet->net);
312
+ struct netlink_kernel_cfg cfg = {
313
+ .input = rdma_nl_rcv,
314
+ };
315
+ struct sock *nls;
316
+
317
+ nls = netlink_kernel_create(net, NETLINK_RDMA, &cfg);
318
+ if (!nls)
319
+ return -ENOMEM;
320
+
321
+ nls->sk_sndtimeo = 10 * HZ;
322
+ rnet->nl_sock = nls;
323
+ return 0;
324
+}
325
+
326
+void rdma_nl_net_exit(struct rdma_dev_net *rnet)
327
+{
328
+ netlink_kernel_release(rnet->nl_sock);
309329 }
310330
311331 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_RDMA);