.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
2 | 3 | |
---|
3 | 4 | #include <linux/workqueue.h> |
---|
.. | .. |
---|
18 | 19 | #include <linux/net_namespace.h> |
---|
19 | 20 | #include <linux/sched/task.h> |
---|
20 | 21 | #include <linux/uidgid.h> |
---|
| 22 | +#include <linux/cookie.h> |
---|
21 | 23 | |
---|
22 | 24 | #include <net/sock.h> |
---|
23 | 25 | #include <net/netlink.h> |
---|
.. | .. |
---|
38 | 40 | DECLARE_RWSEM(net_rwsem); |
---|
39 | 41 | EXPORT_SYMBOL_GPL(net_rwsem); |
---|
40 | 42 | |
---|
| 43 | +#ifdef CONFIG_KEYS |
---|
| 44 | +static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) }; |
---|
| 45 | +#endif |
---|
| 46 | + |
---|
41 | 47 | struct net init_net = { |
---|
42 | 48 | .count = REFCOUNT_INIT(1), |
---|
43 | 49 | .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), |
---|
| 50 | +#ifdef CONFIG_KEYS |
---|
| 51 | + .key_domain = &init_net_key_domain, |
---|
| 52 | +#endif |
---|
44 | 53 | }; |
---|
45 | 54 | EXPORT_SYMBOL(init_net); |
---|
46 | 55 | |
---|
.. | .. |
---|
60 | 69 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ |
---|
61 | 70 | |
---|
62 | 71 | static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; |
---|
| 72 | + |
---|
| 73 | +DEFINE_COOKIE(net_cookie); |
---|
| 74 | + |
---|
| 75 | +u64 __net_gen_cookie(struct net *net) |
---|
| 76 | +{ |
---|
| 77 | + while (1) { |
---|
| 78 | + u64 res = atomic64_read(&net->net_cookie); |
---|
| 79 | + |
---|
| 80 | + if (res) |
---|
| 81 | + return res; |
---|
| 82 | + res = gen_cookie_next(&net_cookie); |
---|
| 83 | + atomic64_cmpxchg(&net->net_cookie, 0, res); |
---|
| 84 | + } |
---|
| 85 | +} |
---|
63 | 86 | |
---|
64 | 87 | static struct net_generic *net_alloc_generic(void) |
---|
65 | 88 | { |
---|
.. | .. |
---|
112 | 135 | |
---|
113 | 136 | static int ops_init(const struct pernet_operations *ops, struct net *net) |
---|
114 | 137 | { |
---|
| 138 | + struct net_generic *ng; |
---|
115 | 139 | int err = -ENOMEM; |
---|
116 | 140 | void *data = NULL; |
---|
117 | 141 | |
---|
.. | .. |
---|
130 | 154 | if (!err) |
---|
131 | 155 | return 0; |
---|
132 | 156 | |
---|
| 157 | + if (ops->id && ops->size) { |
---|
133 | 158 | cleanup: |
---|
| 159 | + ng = rcu_dereference_protected(net->gen, |
---|
| 160 | + lockdep_is_held(&pernet_ops_rwsem)); |
---|
| 161 | + ng->ptr[*ops->id] = NULL; |
---|
| 162 | + } |
---|
| 163 | + |
---|
134 | 164 | kfree(data); |
---|
135 | 165 | |
---|
136 | 166 | out: |
---|
.. | .. |
---|
141 | 171 | { |
---|
142 | 172 | if (ops->id && ops->size) { |
---|
143 | 173 | kfree(net_generic(net, *ops->id)); |
---|
| 174 | + } |
---|
| 175 | +} |
---|
| 176 | + |
---|
| 177 | +static void ops_pre_exit_list(const struct pernet_operations *ops, |
---|
| 178 | + struct list_head *net_exit_list) |
---|
| 179 | +{ |
---|
| 180 | + struct net *net; |
---|
| 181 | + |
---|
| 182 | + if (ops->pre_exit) { |
---|
| 183 | + list_for_each_entry(net, net_exit_list, exit_list) |
---|
| 184 | + ops->pre_exit(net); |
---|
144 | 185 | } |
---|
145 | 186 | } |
---|
146 | 187 | |
---|
.. | .. |
---|
194 | 235 | return 0; |
---|
195 | 236 | } |
---|
196 | 237 | |
---|
197 | | -/* Must be called from RCU-critical section or with nsid_lock held. If |
---|
198 | | - * a new id is assigned, the bool alloc is set to true, thus the |
---|
199 | | - * caller knows that the new id must be notified via rtnl. |
---|
200 | | - */ |
---|
201 | | -static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) |
---|
| 238 | +/* Must be called from RCU-critical section or with nsid_lock held */ |
---|
| 239 | +static int __peernet2id(const struct net *net, struct net *peer) |
---|
202 | 240 | { |
---|
203 | 241 | int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); |
---|
204 | | - bool alloc_it = *alloc; |
---|
205 | | - |
---|
206 | | - *alloc = false; |
---|
207 | 242 | |
---|
208 | 243 | /* Magic value for id 0. */ |
---|
209 | 244 | if (id == NET_ID_ZERO) |
---|
.. | .. |
---|
211 | 246 | if (id > 0) |
---|
212 | 247 | return id; |
---|
213 | 248 | |
---|
214 | | - if (alloc_it) { |
---|
215 | | - id = alloc_netid(net, peer, -1); |
---|
216 | | - *alloc = true; |
---|
217 | | - return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; |
---|
218 | | - } |
---|
219 | | - |
---|
220 | 249 | return NETNSA_NSID_NOT_ASSIGNED; |
---|
221 | 250 | } |
---|
222 | 251 | |
---|
223 | | -/* Must be called from RCU-critical section or with nsid_lock held */ |
---|
224 | | -static int __peernet2id(struct net *net, struct net *peer) |
---|
225 | | -{ |
---|
226 | | - bool no = false; |
---|
227 | | - |
---|
228 | | - return __peernet2id_alloc(net, peer, &no); |
---|
229 | | -} |
---|
230 | | - |
---|
231 | | -static void rtnl_net_notifyid(struct net *net, int cmd, int id, gfp_t gfp); |
---|
| 252 | +static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, |
---|
| 253 | + struct nlmsghdr *nlh, gfp_t gfp); |
---|
232 | 254 | /* This function returns the id of a peer netns. If no id is assigned, one will |
---|
233 | 255 | * be allocated and returned. |
---|
234 | 256 | */ |
---|
235 | 257 | int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp) |
---|
236 | 258 | { |
---|
237 | | - bool alloc = false, alive = false; |
---|
238 | 259 | int id; |
---|
239 | 260 | |
---|
240 | 261 | if (refcount_read(&net->count) == 0) |
---|
241 | 262 | return NETNSA_NSID_NOT_ASSIGNED; |
---|
| 263 | + |
---|
242 | 264 | spin_lock_bh(&net->nsid_lock); |
---|
243 | | - /* |
---|
244 | | - * When peer is obtained from RCU lists, we may race with |
---|
| 265 | + id = __peernet2id(net, peer); |
---|
| 266 | + if (id >= 0) { |
---|
| 267 | + spin_unlock_bh(&net->nsid_lock); |
---|
| 268 | + return id; |
---|
| 269 | + } |
---|
| 270 | + |
---|
| 271 | + /* When peer is obtained from RCU lists, we may race with |
---|
245 | 272 | * its cleanup. Check whether it's alive, and this guarantees |
---|
246 | 273 | * we never hash a peer back to net->netns_ids, after it has |
---|
247 | 274 | * just been idr_remove()'d from there in cleanup_net(). |
---|
248 | 275 | */ |
---|
249 | | - if (maybe_get_net(peer)) |
---|
250 | | - alive = alloc = true; |
---|
251 | | - id = __peernet2id_alloc(net, peer, &alloc); |
---|
| 276 | + if (!maybe_get_net(peer)) { |
---|
| 277 | + spin_unlock_bh(&net->nsid_lock); |
---|
| 278 | + return NETNSA_NSID_NOT_ASSIGNED; |
---|
| 279 | + } |
---|
| 280 | + |
---|
| 281 | + id = alloc_netid(net, peer, -1); |
---|
252 | 282 | spin_unlock_bh(&net->nsid_lock); |
---|
253 | | - if (alloc && id >= 0) |
---|
254 | | - rtnl_net_notifyid(net, RTM_NEWNSID, id, gfp); |
---|
255 | | - if (alive) |
---|
256 | | - put_net(peer); |
---|
| 283 | + |
---|
| 284 | + put_net(peer); |
---|
| 285 | + if (id < 0) |
---|
| 286 | + return NETNSA_NSID_NOT_ASSIGNED; |
---|
| 287 | + |
---|
| 288 | + rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp); |
---|
| 289 | + |
---|
257 | 290 | return id; |
---|
258 | 291 | } |
---|
259 | 292 | EXPORT_SYMBOL_GPL(peernet2id_alloc); |
---|
260 | 293 | |
---|
261 | 294 | /* This function returns, if assigned, the id of a peer netns. */ |
---|
262 | | -int peernet2id(struct net *net, struct net *peer) |
---|
| 295 | +int peernet2id(const struct net *net, struct net *peer) |
---|
263 | 296 | { |
---|
264 | 297 | int id; |
---|
265 | 298 | |
---|
.. | .. |
---|
274 | 307 | /* This function returns true is the peer netns has an id assigned into the |
---|
275 | 308 | * current netns. |
---|
276 | 309 | */ |
---|
277 | | -bool peernet_has_id(struct net *net, struct net *peer) |
---|
| 310 | +bool peernet_has_id(const struct net *net, struct net *peer) |
---|
278 | 311 | { |
---|
279 | 312 | return peernet2id(net, peer) >= 0; |
---|
280 | 313 | } |
---|
281 | 314 | |
---|
282 | | -struct net *get_net_ns_by_id(struct net *net, int id) |
---|
| 315 | +struct net *get_net_ns_by_id(const struct net *net, int id) |
---|
283 | 316 | { |
---|
284 | 317 | struct net *peer; |
---|
285 | 318 | |
---|
.. | .. |
---|
331 | 364 | */ |
---|
332 | 365 | list_add(&net->exit_list, &net_exit_list); |
---|
333 | 366 | saved_ops = ops; |
---|
| 367 | + list_for_each_entry_continue_reverse(ops, &pernet_list, list) |
---|
| 368 | + ops_pre_exit_list(ops, &net_exit_list); |
---|
| 369 | + |
---|
| 370 | + synchronize_rcu(); |
---|
| 371 | + |
---|
| 372 | + ops = saved_ops; |
---|
334 | 373 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) |
---|
335 | 374 | ops_exit_list(ops, &net_exit_list); |
---|
336 | 375 | |
---|
.. | .. |
---|
389 | 428 | if (!net) |
---|
390 | 429 | goto out_free; |
---|
391 | 430 | |
---|
| 431 | +#ifdef CONFIG_KEYS |
---|
| 432 | + net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL); |
---|
| 433 | + if (!net->key_domain) |
---|
| 434 | + goto out_free_2; |
---|
| 435 | + refcount_set(&net->key_domain->usage, 1); |
---|
| 436 | +#endif |
---|
| 437 | + |
---|
392 | 438 | rcu_assign_pointer(net->gen, ng); |
---|
393 | 439 | out: |
---|
394 | 440 | return net; |
---|
395 | 441 | |
---|
| 442 | +#ifdef CONFIG_KEYS |
---|
| 443 | +out_free_2: |
---|
| 444 | + kmem_cache_free(net_cachep, net); |
---|
| 445 | + net = NULL; |
---|
| 446 | +#endif |
---|
396 | 447 | out_free: |
---|
397 | 448 | kfree(ng); |
---|
398 | 449 | goto out; |
---|
.. | .. |
---|
444 | 495 | |
---|
445 | 496 | if (rv < 0) { |
---|
446 | 497 | put_userns: |
---|
| 498 | +#ifdef CONFIG_KEYS |
---|
| 499 | + key_remove_domain(net->key_domain); |
---|
| 500 | +#endif |
---|
447 | 501 | put_user_ns(user_ns); |
---|
448 | 502 | net_drop_ns(net); |
---|
449 | 503 | dec_ucounts: |
---|
.. | .. |
---|
498 | 552 | idr_remove(&tmp->netns_ids, id); |
---|
499 | 553 | spin_unlock_bh(&tmp->nsid_lock); |
---|
500 | 554 | if (id >= 0) |
---|
501 | | - rtnl_net_notifyid(tmp, RTM_DELNSID, id, |
---|
| 555 | + rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL, |
---|
502 | 556 | GFP_KERNEL); |
---|
503 | 557 | if (tmp == last) |
---|
504 | 558 | break; |
---|
.. | .. |
---|
544 | 598 | list_add_tail(&net->exit_list, &net_exit_list); |
---|
545 | 599 | } |
---|
546 | 600 | |
---|
| 601 | + /* Run all of the network namespace pre_exit methods */ |
---|
| 602 | + list_for_each_entry_reverse(ops, &pernet_list, list) |
---|
| 603 | + ops_pre_exit_list(ops, &net_exit_list); |
---|
| 604 | + |
---|
547 | 605 | /* |
---|
548 | 606 | * Another CPU might be rcu-iterating the list, wait for it. |
---|
549 | 607 | * This needs to be before calling the exit() notifiers, so |
---|
550 | 608 | * the rcu_barrier() below isn't sufficient alone. |
---|
| 609 | + * Also the pre_exit() and exit() methods need this barrier. |
---|
551 | 610 | */ |
---|
552 | 611 | synchronize_rcu(); |
---|
553 | 612 | |
---|
.. | .. |
---|
570 | 629 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { |
---|
571 | 630 | list_del_init(&net->exit_list); |
---|
572 | 631 | dec_net_namespaces(net->ucounts); |
---|
| 632 | +#ifdef CONFIG_KEYS |
---|
| 633 | + key_remove_domain(net->key_domain); |
---|
| 634 | +#endif |
---|
573 | 635 | put_user_ns(net->user_ns); |
---|
574 | 636 | net_drop_ns(net); |
---|
575 | 637 | } |
---|
.. | .. |
---|
686 | 748 | [NETNSA_NSID] = { .type = NLA_S32 }, |
---|
687 | 749 | [NETNSA_PID] = { .type = NLA_U32 }, |
---|
688 | 750 | [NETNSA_FD] = { .type = NLA_U32 }, |
---|
| 751 | + [NETNSA_TARGET_NSID] = { .type = NLA_S32 }, |
---|
689 | 752 | }; |
---|
690 | 753 | |
---|
691 | 754 | static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, |
---|
.. | .. |
---|
697 | 760 | struct net *peer; |
---|
698 | 761 | int nsid, err; |
---|
699 | 762 | |
---|
700 | | - err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, |
---|
701 | | - rtnl_net_policy, extack); |
---|
| 763 | + err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb, |
---|
| 764 | + NETNSA_MAX, rtnl_net_policy, extack); |
---|
702 | 765 | if (err < 0) |
---|
703 | 766 | return err; |
---|
704 | 767 | if (!tb[NETNSA_NSID]) { |
---|
.. | .. |
---|
736 | 799 | err = alloc_netid(net, peer, nsid); |
---|
737 | 800 | spin_unlock_bh(&net->nsid_lock); |
---|
738 | 801 | if (err >= 0) { |
---|
739 | | - rtnl_net_notifyid(net, RTM_NEWNSID, err, GFP_KERNEL); |
---|
| 802 | + rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, |
---|
| 803 | + nlh, GFP_KERNEL); |
---|
740 | 804 | err = 0; |
---|
741 | 805 | } else if (err == -ENOSPC && nsid >= 0) { |
---|
742 | 806 | err = -EEXIST; |
---|
.. | .. |
---|
752 | 816 | { |
---|
753 | 817 | return NLMSG_ALIGN(sizeof(struct rtgenmsg)) |
---|
754 | 818 | + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ |
---|
| 819 | + + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */ |
---|
755 | 820 | ; |
---|
756 | 821 | } |
---|
757 | 822 | |
---|
758 | | -static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, |
---|
759 | | - int cmd, struct net *net, int nsid) |
---|
| 823 | +struct net_fill_args { |
---|
| 824 | + u32 portid; |
---|
| 825 | + u32 seq; |
---|
| 826 | + int flags; |
---|
| 827 | + int cmd; |
---|
| 828 | + int nsid; |
---|
| 829 | + bool add_ref; |
---|
| 830 | + int ref_nsid; |
---|
| 831 | +}; |
---|
| 832 | + |
---|
| 833 | +static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args) |
---|
760 | 834 | { |
---|
761 | 835 | struct nlmsghdr *nlh; |
---|
762 | 836 | struct rtgenmsg *rth; |
---|
763 | 837 | |
---|
764 | | - nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); |
---|
| 838 | + nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth), |
---|
| 839 | + args->flags); |
---|
765 | 840 | if (!nlh) |
---|
766 | 841 | return -EMSGSIZE; |
---|
767 | 842 | |
---|
768 | 843 | rth = nlmsg_data(nlh); |
---|
769 | 844 | rth->rtgen_family = AF_UNSPEC; |
---|
770 | 845 | |
---|
771 | | - if (nla_put_s32(skb, NETNSA_NSID, nsid)) |
---|
| 846 | + if (nla_put_s32(skb, NETNSA_NSID, args->nsid)) |
---|
| 847 | + goto nla_put_failure; |
---|
| 848 | + |
---|
| 849 | + if (args->add_ref && |
---|
| 850 | + nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid)) |
---|
772 | 851 | goto nla_put_failure; |
---|
773 | 852 | |
---|
774 | 853 | nlmsg_end(skb, nlh); |
---|
.. | .. |
---|
779 | 858 | return -EMSGSIZE; |
---|
780 | 859 | } |
---|
781 | 860 | |
---|
| 861 | +static int rtnl_net_valid_getid_req(struct sk_buff *skb, |
---|
| 862 | + const struct nlmsghdr *nlh, |
---|
| 863 | + struct nlattr **tb, |
---|
| 864 | + struct netlink_ext_ack *extack) |
---|
| 865 | +{ |
---|
| 866 | + int i, err; |
---|
| 867 | + |
---|
| 868 | + if (!netlink_strict_get_check(skb)) |
---|
| 869 | + return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), |
---|
| 870 | + tb, NETNSA_MAX, rtnl_net_policy, |
---|
| 871 | + extack); |
---|
| 872 | + |
---|
| 873 | + err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb, |
---|
| 874 | + NETNSA_MAX, rtnl_net_policy, |
---|
| 875 | + extack); |
---|
| 876 | + if (err) |
---|
| 877 | + return err; |
---|
| 878 | + |
---|
| 879 | + for (i = 0; i <= NETNSA_MAX; i++) { |
---|
| 880 | + if (!tb[i]) |
---|
| 881 | + continue; |
---|
| 882 | + |
---|
| 883 | + switch (i) { |
---|
| 884 | + case NETNSA_PID: |
---|
| 885 | + case NETNSA_FD: |
---|
| 886 | + case NETNSA_NSID: |
---|
| 887 | + case NETNSA_TARGET_NSID: |
---|
| 888 | + break; |
---|
| 889 | + default: |
---|
| 890 | + NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request"); |
---|
| 891 | + return -EINVAL; |
---|
| 892 | + } |
---|
| 893 | + } |
---|
| 894 | + |
---|
| 895 | + return 0; |
---|
| 896 | +} |
---|
| 897 | + |
---|
782 | 898 | static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, |
---|
783 | 899 | struct netlink_ext_ack *extack) |
---|
784 | 900 | { |
---|
785 | 901 | struct net *net = sock_net(skb->sk); |
---|
786 | 902 | struct nlattr *tb[NETNSA_MAX + 1]; |
---|
| 903 | + struct net_fill_args fillargs = { |
---|
| 904 | + .portid = NETLINK_CB(skb).portid, |
---|
| 905 | + .seq = nlh->nlmsg_seq, |
---|
| 906 | + .cmd = RTM_NEWNSID, |
---|
| 907 | + }; |
---|
| 908 | + struct net *peer, *target = net; |
---|
787 | 909 | struct nlattr *nla; |
---|
788 | 910 | struct sk_buff *msg; |
---|
789 | | - struct net *peer; |
---|
790 | | - int err, id; |
---|
| 911 | + int err; |
---|
791 | 912 | |
---|
792 | | - err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, |
---|
793 | | - rtnl_net_policy, extack); |
---|
| 913 | + err = rtnl_net_valid_getid_req(skb, nlh, tb, extack); |
---|
794 | 914 | if (err < 0) |
---|
795 | 915 | return err; |
---|
796 | 916 | if (tb[NETNSA_PID]) { |
---|
.. | .. |
---|
799 | 919 | } else if (tb[NETNSA_FD]) { |
---|
800 | 920 | peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); |
---|
801 | 921 | nla = tb[NETNSA_FD]; |
---|
| 922 | + } else if (tb[NETNSA_NSID]) { |
---|
| 923 | + peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID])); |
---|
| 924 | + if (!peer) |
---|
| 925 | + peer = ERR_PTR(-ENOENT); |
---|
| 926 | + nla = tb[NETNSA_NSID]; |
---|
802 | 927 | } else { |
---|
803 | 928 | NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); |
---|
804 | 929 | return -EINVAL; |
---|
.. | .. |
---|
810 | 935 | return PTR_ERR(peer); |
---|
811 | 936 | } |
---|
812 | 937 | |
---|
| 938 | + if (tb[NETNSA_TARGET_NSID]) { |
---|
| 939 | + int id = nla_get_s32(tb[NETNSA_TARGET_NSID]); |
---|
| 940 | + |
---|
| 941 | + target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id); |
---|
| 942 | + if (IS_ERR(target)) { |
---|
| 943 | + NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]); |
---|
| 944 | + NL_SET_ERR_MSG(extack, |
---|
| 945 | + "Target netns reference is invalid"); |
---|
| 946 | + err = PTR_ERR(target); |
---|
| 947 | + goto out; |
---|
| 948 | + } |
---|
| 949 | + fillargs.add_ref = true; |
---|
| 950 | + fillargs.ref_nsid = peernet2id(net, peer); |
---|
| 951 | + } |
---|
| 952 | + |
---|
813 | 953 | msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); |
---|
814 | 954 | if (!msg) { |
---|
815 | 955 | err = -ENOMEM; |
---|
816 | 956 | goto out; |
---|
817 | 957 | } |
---|
818 | 958 | |
---|
819 | | - id = peernet2id(net, peer); |
---|
820 | | - err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, |
---|
821 | | - RTM_NEWNSID, net, id); |
---|
| 959 | + fillargs.nsid = peernet2id(target, peer); |
---|
| 960 | + err = rtnl_net_fill(msg, &fillargs); |
---|
822 | 961 | if (err < 0) |
---|
823 | 962 | goto err_out; |
---|
824 | 963 | |
---|
.. | .. |
---|
828 | 967 | err_out: |
---|
829 | 968 | nlmsg_free(msg); |
---|
830 | 969 | out: |
---|
| 970 | + if (fillargs.add_ref) |
---|
| 971 | + put_net(target); |
---|
831 | 972 | put_net(peer); |
---|
832 | 973 | return err; |
---|
833 | 974 | } |
---|
834 | 975 | |
---|
835 | 976 | struct rtnl_net_dump_cb { |
---|
836 | | - struct net *net; |
---|
| 977 | + struct net *tgt_net; |
---|
| 978 | + struct net *ref_net; |
---|
837 | 979 | struct sk_buff *skb; |
---|
838 | | - struct netlink_callback *cb; |
---|
| 980 | + struct net_fill_args fillargs; |
---|
839 | 981 | int idx; |
---|
840 | 982 | int s_idx; |
---|
841 | 983 | }; |
---|
.. | .. |
---|
849 | 991 | if (net_cb->idx < net_cb->s_idx) |
---|
850 | 992 | goto cont; |
---|
851 | 993 | |
---|
852 | | - ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, |
---|
853 | | - net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, |
---|
854 | | - RTM_NEWNSID, net_cb->net, id); |
---|
| 994 | + net_cb->fillargs.nsid = id; |
---|
| 995 | + if (net_cb->fillargs.add_ref) |
---|
| 996 | + net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer); |
---|
| 997 | + ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs); |
---|
855 | 998 | if (ret < 0) |
---|
856 | 999 | return ret; |
---|
857 | 1000 | |
---|
.. | .. |
---|
860 | 1003 | return 0; |
---|
861 | 1004 | } |
---|
862 | 1005 | |
---|
| 1006 | +static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk, |
---|
| 1007 | + struct rtnl_net_dump_cb *net_cb, |
---|
| 1008 | + struct netlink_callback *cb) |
---|
| 1009 | +{ |
---|
| 1010 | + struct netlink_ext_ack *extack = cb->extack; |
---|
| 1011 | + struct nlattr *tb[NETNSA_MAX + 1]; |
---|
| 1012 | + int err, i; |
---|
| 1013 | + |
---|
| 1014 | + err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb, |
---|
| 1015 | + NETNSA_MAX, rtnl_net_policy, |
---|
| 1016 | + extack); |
---|
| 1017 | + if (err < 0) |
---|
| 1018 | + return err; |
---|
| 1019 | + |
---|
| 1020 | + for (i = 0; i <= NETNSA_MAX; i++) { |
---|
| 1021 | + if (!tb[i]) |
---|
| 1022 | + continue; |
---|
| 1023 | + |
---|
| 1024 | + if (i == NETNSA_TARGET_NSID) { |
---|
| 1025 | + struct net *net; |
---|
| 1026 | + |
---|
| 1027 | + net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i])); |
---|
| 1028 | + if (IS_ERR(net)) { |
---|
| 1029 | + NL_SET_BAD_ATTR(extack, tb[i]); |
---|
| 1030 | + NL_SET_ERR_MSG(extack, |
---|
| 1031 | + "Invalid target network namespace id"); |
---|
| 1032 | + return PTR_ERR(net); |
---|
| 1033 | + } |
---|
| 1034 | + net_cb->fillargs.add_ref = true; |
---|
| 1035 | + net_cb->ref_net = net_cb->tgt_net; |
---|
| 1036 | + net_cb->tgt_net = net; |
---|
| 1037 | + } else { |
---|
| 1038 | + NL_SET_BAD_ATTR(extack, tb[i]); |
---|
| 1039 | + NL_SET_ERR_MSG(extack, |
---|
| 1040 | + "Unsupported attribute in dump request"); |
---|
| 1041 | + return -EINVAL; |
---|
| 1042 | + } |
---|
| 1043 | + } |
---|
| 1044 | + |
---|
| 1045 | + return 0; |
---|
| 1046 | +} |
---|
| 1047 | + |
---|
863 | 1048 | static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) |
---|
864 | 1049 | { |
---|
865 | | - struct net *net = sock_net(skb->sk); |
---|
866 | 1050 | struct rtnl_net_dump_cb net_cb = { |
---|
867 | | - .net = net, |
---|
| 1051 | + .tgt_net = sock_net(skb->sk), |
---|
868 | 1052 | .skb = skb, |
---|
869 | | - .cb = cb, |
---|
| 1053 | + .fillargs = { |
---|
| 1054 | + .portid = NETLINK_CB(cb->skb).portid, |
---|
| 1055 | + .seq = cb->nlh->nlmsg_seq, |
---|
| 1056 | + .flags = NLM_F_MULTI, |
---|
| 1057 | + .cmd = RTM_NEWNSID, |
---|
| 1058 | + }, |
---|
870 | 1059 | .idx = 0, |
---|
871 | 1060 | .s_idx = cb->args[0], |
---|
872 | 1061 | }; |
---|
| 1062 | + int err = 0; |
---|
| 1063 | + |
---|
| 1064 | + if (cb->strict_check) { |
---|
| 1065 | + err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb); |
---|
| 1066 | + if (err < 0) |
---|
| 1067 | + goto end; |
---|
| 1068 | + } |
---|
873 | 1069 | |
---|
874 | 1070 | rcu_read_lock(); |
---|
875 | | - idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); |
---|
| 1071 | + idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb); |
---|
876 | 1072 | rcu_read_unlock(); |
---|
877 | 1073 | |
---|
878 | 1074 | cb->args[0] = net_cb.idx; |
---|
879 | | - return skb->len; |
---|
| 1075 | +end: |
---|
| 1076 | + if (net_cb.fillargs.add_ref) |
---|
| 1077 | + put_net(net_cb.tgt_net); |
---|
| 1078 | + return err < 0 ? err : skb->len; |
---|
880 | 1079 | } |
---|
881 | 1080 | |
---|
882 | | -static void rtnl_net_notifyid(struct net *net, int cmd, int id, gfp_t gfp) |
---|
| 1081 | +static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, |
---|
| 1082 | + struct nlmsghdr *nlh, gfp_t gfp) |
---|
883 | 1083 | { |
---|
| 1084 | + struct net_fill_args fillargs = { |
---|
| 1085 | + .portid = portid, |
---|
| 1086 | + .seq = nlh ? nlh->nlmsg_seq : 0, |
---|
| 1087 | + .cmd = cmd, |
---|
| 1088 | + .nsid = id, |
---|
| 1089 | + }; |
---|
884 | 1090 | struct sk_buff *msg; |
---|
885 | 1091 | int err = -ENOMEM; |
---|
886 | 1092 | |
---|
.. | .. |
---|
888 | 1094 | if (!msg) |
---|
889 | 1095 | goto out; |
---|
890 | 1096 | |
---|
891 | | - err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); |
---|
| 1097 | + err = rtnl_net_fill(msg, &fillargs); |
---|
892 | 1098 | if (err < 0) |
---|
893 | 1099 | goto err_out; |
---|
894 | 1100 | |
---|
895 | | - rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, gfp); |
---|
| 1101 | + rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp); |
---|
896 | 1102 | return; |
---|
897 | 1103 | |
---|
898 | 1104 | err_out: |
---|
.. | .. |
---|
921 | 1127 | panic("Could not allocate generic netns"); |
---|
922 | 1128 | |
---|
923 | 1129 | rcu_assign_pointer(init_net.gen, ng); |
---|
| 1130 | + |
---|
| 1131 | + preempt_disable(); |
---|
| 1132 | + __net_gen_cookie(&init_net); |
---|
| 1133 | + preempt_enable(); |
---|
924 | 1134 | |
---|
925 | 1135 | down_write(&pernet_ops_rwsem); |
---|
926 | 1136 | if (setup_net(&init_net, &init_user_ns)) |
---|
.. | .. |
---|
967 | 1177 | out_undo: |
---|
968 | 1178 | /* If I have an error cleanup all namespaces I initialized */ |
---|
969 | 1179 | list_del(&ops->list); |
---|
| 1180 | + ops_pre_exit_list(ops, &net_exit_list); |
---|
| 1181 | + synchronize_rcu(); |
---|
970 | 1182 | ops_exit_list(ops, &net_exit_list); |
---|
971 | 1183 | ops_free_list(ops, &net_exit_list); |
---|
972 | 1184 | return error; |
---|
.. | .. |
---|
981 | 1193 | /* See comment in __register_pernet_operations() */ |
---|
982 | 1194 | for_each_net(net) |
---|
983 | 1195 | list_add_tail(&net->exit_list, &net_exit_list); |
---|
| 1196 | + ops_pre_exit_list(ops, &net_exit_list); |
---|
| 1197 | + synchronize_rcu(); |
---|
984 | 1198 | ops_exit_list(ops, &net_exit_list); |
---|
985 | 1199 | ops_free_list(ops, &net_exit_list); |
---|
986 | 1200 | } |
---|
.. | .. |
---|
1005 | 1219 | } else { |
---|
1006 | 1220 | LIST_HEAD(net_exit_list); |
---|
1007 | 1221 | list_add(&init_net.exit_list, &net_exit_list); |
---|
| 1222 | + ops_pre_exit_list(ops, &net_exit_list); |
---|
| 1223 | + synchronize_rcu(); |
---|
1008 | 1224 | ops_exit_list(ops, &net_exit_list); |
---|
1009 | 1225 | ops_free_list(ops, &net_exit_list); |
---|
1010 | 1226 | } |
---|
.. | .. |
---|
1166 | 1382 | put_net(to_net_ns(ns)); |
---|
1167 | 1383 | } |
---|
1168 | 1384 | |
---|
1169 | | -static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) |
---|
| 1385 | +static int netns_install(struct nsset *nsset, struct ns_common *ns) |
---|
1170 | 1386 | { |
---|
| 1387 | + struct nsproxy *nsproxy = nsset->nsproxy; |
---|
1171 | 1388 | struct net *net = to_net_ns(ns); |
---|
1172 | 1389 | |
---|
1173 | 1390 | if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || |
---|
1174 | | - !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) |
---|
| 1391 | + !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) |
---|
1175 | 1392 | return -EPERM; |
---|
1176 | 1393 | |
---|
1177 | 1394 | put_net(nsproxy->net_ns); |
---|