1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
22 #include <net/netlink.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/generic.h>
27 * Our network namespace constructor/destructor lists
30 static LIST_HEAD(pernet_list);
31 static struct list_head *first_device = &pernet_list;
32 DEFINE_MUTEX(net_mutex);
34 LIST_HEAD(net_namespace_list);
35 EXPORT_SYMBOL_GPL(net_namespace_list);
37 struct net init_net = {
38 .count = ATOMIC_INIT(1),
39 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
41 EXPORT_SYMBOL(init_net);
43 static bool init_net_initialized;
45 #define MIN_PERNET_OPS_ID \
46 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
48 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
50 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
52 static struct net_generic *net_alloc_generic(void)
54 struct net_generic *ng;
55 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
57 ng = kzalloc(generic_size, GFP_KERNEL);
59 ng->s.len = max_gen_ptrs;
64 static int net_assign_generic(struct net *net, unsigned int id, void *data)
66 struct net_generic *ng, *old_ng;
68 BUG_ON(!mutex_is_locked(&net_mutex));
69 BUG_ON(id < MIN_PERNET_OPS_ID);
71 old_ng = rcu_dereference_protected(net->gen,
72 lockdep_is_held(&net_mutex));
73 if (old_ng->s.len > id) {
74 old_ng->ptr[id] = data;
78 ng = net_alloc_generic();
83 * Some synchronisation notes:
85 * The net_generic explores the net->gen array inside rcu
86 * read section. Besides once set the net->gen->ptr[x]
87 * pointer never changes (see rules in netns/generic.h).
89 * That said, we simply duplicate this array and schedule
90 * the old copy for kfree after a grace period.
93 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
94 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
97 rcu_assign_pointer(net->gen, ng);
98 kfree_rcu(old_ng, s.rcu);
102 static int ops_init(const struct pernet_operations *ops, struct net *net)
107 if (ops->id && ops->size) {
108 data = kzalloc(ops->size, GFP_KERNEL);
112 err = net_assign_generic(net, *ops->id, data);
118 err = ops->init(net);
129 static void ops_free(const struct pernet_operations *ops, struct net *net)
131 if (ops->id && ops->size) {
132 kfree(net_generic(net, *ops->id));
136 static void ops_exit_list(const struct pernet_operations *ops,
137 struct list_head *net_exit_list)
141 list_for_each_entry(net, net_exit_list, exit_list)
145 ops->exit_batch(net_exit_list);
148 static void ops_free_list(const struct pernet_operations *ops,
149 struct list_head *net_exit_list)
152 if (ops->size && ops->id) {
153 list_for_each_entry(net, net_exit_list, exit_list)
158 /* should be called with nsid_lock held */
159 static int alloc_netid(struct net *net, struct net *peer, int reqid)
161 int min = 0, max = 0;
168 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
171 /* This function is used by idr_for_each(). If net is equal to peer, the
172 * function returns the id so that idr_for_each() stops. Because we cannot
173 * returns the id 0 (idr_for_each() will not stop), we return the magic value
174 * NET_ID_ZERO (-1) for it.
176 #define NET_ID_ZERO -1
177 static int net_eq_idr(int id, void *net, void *peer)
179 if (net_eq(net, peer))
180 return id ? : NET_ID_ZERO;
184 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
185 * is set to true, thus the caller knows that the new id must be notified via
188 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
190 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
191 bool alloc_it = *alloc;
195 /* Magic value for id 0. */
196 if (id == NET_ID_ZERO)
202 id = alloc_netid(net, peer, -1);
204 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
207 return NETNSA_NSID_NOT_ASSIGNED;
210 /* should be called with nsid_lock held */
211 static int __peernet2id(struct net *net, struct net *peer)
215 return __peernet2id_alloc(net, peer, &no);
218 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
219 /* This function returns the id of a peer netns. If no id is assigned, one will
220 * be allocated and returned.
222 int peernet2id_alloc(struct net *net, struct net *peer)
227 if (atomic_read(&net->count) == 0)
228 return NETNSA_NSID_NOT_ASSIGNED;
229 spin_lock_bh(&net->nsid_lock);
230 alloc = atomic_read(&peer->count) == 0 ? false : true;
231 id = __peernet2id_alloc(net, peer, &alloc);
232 spin_unlock_bh(&net->nsid_lock);
233 if (alloc && id >= 0)
234 rtnl_net_notifyid(net, RTM_NEWNSID, id);
238 /* This function returns, if assigned, the id of a peer netns. */
239 int peernet2id(struct net *net, struct net *peer)
243 spin_lock_bh(&net->nsid_lock);
244 id = __peernet2id(net, peer);
245 spin_unlock_bh(&net->nsid_lock);
248 EXPORT_SYMBOL(peernet2id);
250 /* This function returns true is the peer netns has an id assigned into the
253 bool peernet_has_id(struct net *net, struct net *peer)
255 return peernet2id(net, peer) >= 0;
258 struct net *get_net_ns_by_id(struct net *net, int id)
266 spin_lock_bh(&net->nsid_lock);
267 peer = idr_find(&net->netns_ids, id);
270 spin_unlock_bh(&net->nsid_lock);
277 * setup_net runs the initializers for the network namespace object.
279 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
281 /* Must be called with net_mutex held */
282 const struct pernet_operations *ops, *saved_ops;
284 LIST_HEAD(net_exit_list);
286 atomic_set(&net->count, 1);
287 atomic_set(&net->passive, 1);
288 net->dev_base_seq = 1;
289 net->user_ns = user_ns;
290 idr_init(&net->netns_ids);
291 spin_lock_init(&net->nsid_lock);
293 list_for_each_entry(ops, &pernet_list, list) {
294 error = ops_init(ops, net);
302 /* Walk through the list backwards calling the exit functions
303 * for the pernet modules whose init functions did not fail.
305 list_add(&net->exit_list, &net_exit_list);
307 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
308 ops_exit_list(ops, &net_exit_list);
311 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
312 ops_free_list(ops, &net_exit_list);
318 static int __net_init net_defaults_init_net(struct net *net)
320 net->core.sysctl_somaxconn = SOMAXCONN;
324 static struct pernet_operations net_defaults_ops = {
325 .init = net_defaults_init_net,
328 static __init int net_defaults_init(void)
330 if (register_pernet_subsys(&net_defaults_ops))
331 panic("Cannot initialize net default settings");
336 core_initcall(net_defaults_init);
339 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
341 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
344 static void dec_net_namespaces(struct ucounts *ucounts)
346 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
349 static struct kmem_cache *net_cachep;
350 static struct workqueue_struct *netns_wq;
352 static struct net *net_alloc(void)
354 struct net *net = NULL;
355 struct net_generic *ng;
357 ng = net_alloc_generic();
361 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
365 rcu_assign_pointer(net->gen, ng);
374 static void net_free(struct net *net)
376 kfree(rcu_access_pointer(net->gen));
377 kmem_cache_free(net_cachep, net);
380 void net_drop_ns(void *p)
383 if (ns && atomic_dec_and_test(&ns->passive))
387 struct net *copy_net_ns(unsigned long flags,
388 struct user_namespace *user_ns, struct net *old_net)
390 struct ucounts *ucounts;
394 if (!(flags & CLONE_NEWNET))
395 return get_net(old_net);
397 ucounts = inc_net_namespaces(user_ns);
399 return ERR_PTR(-ENOSPC);
403 dec_net_namespaces(ucounts);
404 return ERR_PTR(-ENOMEM);
407 get_user_ns(user_ns);
409 rv = mutex_lock_killable(&net_mutex);
412 dec_net_namespaces(ucounts);
413 put_user_ns(user_ns);
417 net->ucounts = ucounts;
418 rv = setup_net(net, user_ns);
421 list_add_tail_rcu(&net->list, &net_namespace_list);
424 mutex_unlock(&net_mutex);
426 dec_net_namespaces(ucounts);
427 put_user_ns(user_ns);
434 static DEFINE_SPINLOCK(cleanup_list_lock);
435 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
437 static void cleanup_net(struct work_struct *work)
439 const struct pernet_operations *ops;
440 struct net *net, *tmp;
441 struct list_head net_kill_list;
442 LIST_HEAD(net_exit_list);
444 /* Atomically snapshot the list of namespaces to cleanup */
445 spin_lock_irq(&cleanup_list_lock);
446 list_replace_init(&cleanup_list, &net_kill_list);
447 spin_unlock_irq(&cleanup_list_lock);
449 mutex_lock(&net_mutex);
451 /* Don't let anyone else find us. */
453 list_for_each_entry(net, &net_kill_list, cleanup_list) {
454 list_del_rcu(&net->list);
455 list_add_tail(&net->exit_list, &net_exit_list);
459 spin_lock_bh(&tmp->nsid_lock);
460 id = __peernet2id(tmp, net);
462 idr_remove(&tmp->netns_ids, id);
463 spin_unlock_bh(&tmp->nsid_lock);
465 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
467 spin_lock_bh(&net->nsid_lock);
468 idr_destroy(&net->netns_ids);
469 spin_unlock_bh(&net->nsid_lock);
475 * Another CPU might be rcu-iterating the list, wait for it.
476 * This needs to be before calling the exit() notifiers, so
477 * the rcu_barrier() below isn't sufficient alone.
481 /* Run all of the network namespace exit methods */
482 list_for_each_entry_reverse(ops, &pernet_list, list)
483 ops_exit_list(ops, &net_exit_list);
485 /* Free the net generic variables */
486 list_for_each_entry_reverse(ops, &pernet_list, list)
487 ops_free_list(ops, &net_exit_list);
489 mutex_unlock(&net_mutex);
491 /* Ensure there are no outstanding rcu callbacks using this
496 /* Finally it is safe to free my network namespace structure */
497 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
498 list_del_init(&net->exit_list);
499 dec_net_namespaces(net->ucounts);
500 put_user_ns(net->user_ns);
504 static DECLARE_WORK(net_cleanup_work, cleanup_net);
506 void __put_net(struct net *net)
508 /* Cleanup the network namespace in process context */
511 spin_lock_irqsave(&cleanup_list_lock, flags);
512 list_add(&net->cleanup_list, &cleanup_list);
513 spin_unlock_irqrestore(&cleanup_list_lock, flags);
515 queue_work(netns_wq, &net_cleanup_work);
517 EXPORT_SYMBOL_GPL(__put_net);
519 struct net *get_net_ns_by_fd(int fd)
522 struct ns_common *ns;
525 file = proc_ns_fget(fd);
527 return ERR_CAST(file);
529 ns = get_proc_ns(file_inode(file));
530 if (ns->ops == &netns_operations)
531 net = get_net(container_of(ns, struct net, ns));
533 net = ERR_PTR(-EINVAL);
540 struct net *get_net_ns_by_fd(int fd)
542 return ERR_PTR(-EINVAL);
545 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
547 struct net *get_net_ns_by_pid(pid_t pid)
549 struct task_struct *tsk;
552 /* Lookup the network namespace */
553 net = ERR_PTR(-ESRCH);
555 tsk = find_task_by_vpid(pid);
557 struct nsproxy *nsproxy;
559 nsproxy = tsk->nsproxy;
561 net = get_net(nsproxy->net_ns);
567 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
569 static __net_init int net_ns_net_init(struct net *net)
572 net->ns.ops = &netns_operations;
574 return ns_alloc_inum(&net->ns);
577 static __net_exit void net_ns_net_exit(struct net *net)
579 ns_free_inum(&net->ns);
582 static struct pernet_operations __net_initdata net_ns_ops = {
583 .init = net_ns_net_init,
584 .exit = net_ns_net_exit,
587 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
588 [NETNSA_NONE] = { .type = NLA_UNSPEC },
589 [NETNSA_NSID] = { .type = NLA_S32 },
590 [NETNSA_PID] = { .type = NLA_U32 },
591 [NETNSA_FD] = { .type = NLA_U32 },
594 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
595 struct netlink_ext_ack *extack)
597 struct net *net = sock_net(skb->sk);
598 struct nlattr *tb[NETNSA_MAX + 1];
602 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
603 rtnl_net_policy, extack);
606 if (!tb[NETNSA_NSID])
608 nsid = nla_get_s32(tb[NETNSA_NSID]);
611 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
612 else if (tb[NETNSA_FD])
613 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
617 return PTR_ERR(peer);
619 spin_lock_bh(&net->nsid_lock);
620 if (__peernet2id(net, peer) >= 0) {
621 spin_unlock_bh(&net->nsid_lock);
626 err = alloc_netid(net, peer, nsid);
627 spin_unlock_bh(&net->nsid_lock);
629 rtnl_net_notifyid(net, RTM_NEWNSID, err);
637 static int rtnl_net_get_size(void)
639 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
640 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
644 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
645 int cmd, struct net *net, int nsid)
647 struct nlmsghdr *nlh;
648 struct rtgenmsg *rth;
650 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
654 rth = nlmsg_data(nlh);
655 rth->rtgen_family = AF_UNSPEC;
657 if (nla_put_s32(skb, NETNSA_NSID, nsid))
658 goto nla_put_failure;
664 nlmsg_cancel(skb, nlh);
668 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
669 struct netlink_ext_ack *extack)
671 struct net *net = sock_net(skb->sk);
672 struct nlattr *tb[NETNSA_MAX + 1];
677 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
678 rtnl_net_policy, extack);
682 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
683 else if (tb[NETNSA_FD])
684 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
689 return PTR_ERR(peer);
691 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
697 id = peernet2id(net, peer);
698 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
699 RTM_NEWNSID, net, id);
703 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
713 struct rtnl_net_dump_cb {
716 struct netlink_callback *cb;
721 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
723 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
726 if (net_cb->idx < net_cb->s_idx)
729 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
730 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
731 RTM_NEWNSID, net_cb->net, id);
740 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
742 struct net *net = sock_net(skb->sk);
743 struct rtnl_net_dump_cb net_cb = {
748 .s_idx = cb->args[0],
751 spin_lock_bh(&net->nsid_lock);
752 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
753 spin_unlock_bh(&net->nsid_lock);
755 cb->args[0] = net_cb.idx;
759 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
764 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
768 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
772 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
778 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
781 static int __init net_ns_init(void)
783 struct net_generic *ng;
786 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
790 /* Create workqueue for cleanup */
791 netns_wq = create_singlethread_workqueue("netns");
793 panic("Could not create netns workq");
796 ng = net_alloc_generic();
798 panic("Could not allocate generic netns");
800 rcu_assign_pointer(init_net.gen, ng);
802 mutex_lock(&net_mutex);
803 if (setup_net(&init_net, &init_user_ns))
804 panic("Could not setup the initial network namespace");
806 init_net_initialized = true;
809 list_add_tail_rcu(&init_net.list, &net_namespace_list);
812 mutex_unlock(&net_mutex);
814 register_pernet_subsys(&net_ns_ops);
816 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
817 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
823 pure_initcall(net_ns_init);
826 static int __register_pernet_operations(struct list_head *list,
827 struct pernet_operations *ops)
831 LIST_HEAD(net_exit_list);
833 list_add_tail(&ops->list, list);
834 if (ops->init || (ops->id && ops->size)) {
836 error = ops_init(ops, net);
839 list_add_tail(&net->exit_list, &net_exit_list);
845 /* If I have an error cleanup all namespaces I initialized */
846 list_del(&ops->list);
847 ops_exit_list(ops, &net_exit_list);
848 ops_free_list(ops, &net_exit_list);
852 static void __unregister_pernet_operations(struct pernet_operations *ops)
855 LIST_HEAD(net_exit_list);
857 list_del(&ops->list);
859 list_add_tail(&net->exit_list, &net_exit_list);
860 ops_exit_list(ops, &net_exit_list);
861 ops_free_list(ops, &net_exit_list);
866 static int __register_pernet_operations(struct list_head *list,
867 struct pernet_operations *ops)
869 if (!init_net_initialized) {
870 list_add_tail(&ops->list, list);
874 return ops_init(ops, &init_net);
877 static void __unregister_pernet_operations(struct pernet_operations *ops)
879 if (!init_net_initialized) {
880 list_del(&ops->list);
882 LIST_HEAD(net_exit_list);
883 list_add(&init_net.exit_list, &net_exit_list);
884 ops_exit_list(ops, &net_exit_list);
885 ops_free_list(ops, &net_exit_list);
889 #endif /* CONFIG_NET_NS */
891 static DEFINE_IDA(net_generic_ids);
893 static int register_pernet_operations(struct list_head *list,
894 struct pernet_operations *ops)
900 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
902 if (error == -EAGAIN) {
903 ida_pre_get(&net_generic_ids, GFP_KERNEL);
908 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
910 error = __register_pernet_operations(list, ops);
914 ida_remove(&net_generic_ids, *ops->id);
920 static void unregister_pernet_operations(struct pernet_operations *ops)
923 __unregister_pernet_operations(ops);
926 ida_remove(&net_generic_ids, *ops->id);
930 * register_pernet_subsys - register a network namespace subsystem
931 * @ops: pernet operations structure for the subsystem
933 * Register a subsystem which has init and exit functions
934 * that are called when network namespaces are created and
935 * destroyed respectively.
937 * When registered all network namespace init functions are
938 * called for every existing network namespace. Allowing kernel
939 * modules to have a race free view of the set of network namespaces.
941 * When a new network namespace is created all of the init
942 * methods are called in the order in which they were registered.
944 * When a network namespace is destroyed all of the exit methods
945 * are called in the reverse of the order with which they were
948 int register_pernet_subsys(struct pernet_operations *ops)
951 mutex_lock(&net_mutex);
952 error = register_pernet_operations(first_device, ops);
953 mutex_unlock(&net_mutex);
956 EXPORT_SYMBOL_GPL(register_pernet_subsys);
959 * unregister_pernet_subsys - unregister a network namespace subsystem
960 * @ops: pernet operations structure to manipulate
962 * Remove the pernet operations structure from the list to be
963 * used when network namespaces are created or destroyed. In
964 * addition run the exit method for all existing network
967 void unregister_pernet_subsys(struct pernet_operations *ops)
969 mutex_lock(&net_mutex);
970 unregister_pernet_operations(ops);
971 mutex_unlock(&net_mutex);
973 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
976 * register_pernet_device - register a network namespace device
977 * @ops: pernet operations structure for the subsystem
979 * Register a device which has init and exit functions
980 * that are called when network namespaces are created and
981 * destroyed respectively.
983 * When registered all network namespace init functions are
984 * called for every existing network namespace. Allowing kernel
985 * modules to have a race free view of the set of network namespaces.
987 * When a new network namespace is created all of the init
988 * methods are called in the order in which they were registered.
990 * When a network namespace is destroyed all of the exit methods
991 * are called in the reverse of the order with which they were
994 int register_pernet_device(struct pernet_operations *ops)
997 mutex_lock(&net_mutex);
998 error = register_pernet_operations(&pernet_list, ops);
999 if (!error && (first_device == &pernet_list))
1000 first_device = &ops->list;
1001 mutex_unlock(&net_mutex);
1004 EXPORT_SYMBOL_GPL(register_pernet_device);
1007 * unregister_pernet_device - unregister a network namespace netdevice
1008 * @ops: pernet operations structure to manipulate
1010 * Remove the pernet operations structure from the list to be
1011 * used when network namespaces are created or destroyed. In
1012 * addition run the exit method for all existing network
1015 void unregister_pernet_device(struct pernet_operations *ops)
1017 mutex_lock(&net_mutex);
1018 if (&ops->list == first_device)
1019 first_device = first_device->next;
1020 unregister_pernet_operations(ops);
1021 mutex_unlock(&net_mutex);
1023 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1025 #ifdef CONFIG_NET_NS
1026 static struct ns_common *netns_get(struct task_struct *task)
1028 struct net *net = NULL;
1029 struct nsproxy *nsproxy;
1032 nsproxy = task->nsproxy;
1034 net = get_net(nsproxy->net_ns);
1037 return net ? &net->ns : NULL;
1040 static inline struct net *to_net_ns(struct ns_common *ns)
1042 return container_of(ns, struct net, ns);
1045 static void netns_put(struct ns_common *ns)
1047 put_net(to_net_ns(ns));
1050 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1052 struct net *net = to_net_ns(ns);
1054 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1055 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1058 put_net(nsproxy->net_ns);
1059 nsproxy->net_ns = get_net(net);
1063 static struct user_namespace *netns_owner(struct ns_common *ns)
1065 return to_net_ns(ns)->user_ns;
1068 const struct proc_ns_operations netns_operations = {
1070 .type = CLONE_NEWNET,
1073 .install = netns_install,
1074 .owner = netns_owner,