1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
22 #include <net/netlink.h>
23 #include <net/net_namespace.h>
24 #include <net/netns/generic.h>
27 * Our network namespace constructor/destructor lists
30 static LIST_HEAD(pernet_list
);
31 static struct list_head
*first_device
= &pernet_list
;
32 DEFINE_MUTEX(net_mutex
);
34 LIST_HEAD(net_namespace_list
);
35 EXPORT_SYMBOL_GPL(net_namespace_list
);
37 struct net init_net
= {
38 .count
= ATOMIC_INIT(1),
39 .dev_base_head
= LIST_HEAD_INIT(init_net
.dev_base_head
),
41 EXPORT_SYMBOL(init_net
);
43 static bool init_net_initialized
;
45 #define MIN_PERNET_OPS_ID \
46 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
48 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
50 static unsigned int max_gen_ptrs
= INITIAL_NET_GEN_PTRS
;
52 static struct net_generic
*net_alloc_generic(void)
54 struct net_generic
*ng
;
55 unsigned int generic_size
= offsetof(struct net_generic
, ptr
[max_gen_ptrs
]);
57 ng
= kzalloc(generic_size
, GFP_KERNEL
);
59 ng
->s
.len
= max_gen_ptrs
;
64 static int net_assign_generic(struct net
*net
, unsigned int id
, void *data
)
66 struct net_generic
*ng
, *old_ng
;
68 BUG_ON(!mutex_is_locked(&net_mutex
));
69 BUG_ON(id
< MIN_PERNET_OPS_ID
);
71 old_ng
= rcu_dereference_protected(net
->gen
,
72 lockdep_is_held(&net_mutex
));
73 if (old_ng
->s
.len
> id
) {
74 old_ng
->ptr
[id
] = data
;
78 ng
= net_alloc_generic();
83 * Some synchronisation notes:
85 * The net_generic explores the net->gen array inside rcu
86 * read section. Besides once set the net->gen->ptr[x]
87 * pointer never changes (see rules in netns/generic.h).
89 * That said, we simply duplicate this array and schedule
90 * the old copy for kfree after a grace period.
93 memcpy(&ng
->ptr
[MIN_PERNET_OPS_ID
], &old_ng
->ptr
[MIN_PERNET_OPS_ID
],
94 (old_ng
->s
.len
- MIN_PERNET_OPS_ID
) * sizeof(void *));
97 rcu_assign_pointer(net
->gen
, ng
);
98 kfree_rcu(old_ng
, s
.rcu
);
102 static int ops_init(const struct pernet_operations
*ops
, struct net
*net
)
107 if (ops
->id
&& ops
->size
) {
108 data
= kzalloc(ops
->size
, GFP_KERNEL
);
112 err
= net_assign_generic(net
, *ops
->id
, data
);
118 err
= ops
->init(net
);
129 static void ops_free(const struct pernet_operations
*ops
, struct net
*net
)
131 if (ops
->id
&& ops
->size
) {
132 kfree(net_generic(net
, *ops
->id
));
136 static void ops_exit_list(const struct pernet_operations
*ops
,
137 struct list_head
*net_exit_list
)
141 list_for_each_entry(net
, net_exit_list
, exit_list
)
145 ops
->exit_batch(net_exit_list
);
148 static void ops_free_list(const struct pernet_operations
*ops
,
149 struct list_head
*net_exit_list
)
152 if (ops
->size
&& ops
->id
) {
153 list_for_each_entry(net
, net_exit_list
, exit_list
)
158 /* should be called with nsid_lock held */
159 static int alloc_netid(struct net
*net
, struct net
*peer
, int reqid
)
161 int min
= 0, max
= 0;
168 return idr_alloc(&net
->netns_ids
, peer
, min
, max
, GFP_ATOMIC
);
171 /* This function is used by idr_for_each(). If net is equal to peer, the
172 * function returns the id so that idr_for_each() stops. Because we cannot
173 * returns the id 0 (idr_for_each() will not stop), we return the magic value
174 * NET_ID_ZERO (-1) for it.
176 #define NET_ID_ZERO -1
177 static int net_eq_idr(int id
, void *net
, void *peer
)
179 if (net_eq(net
, peer
))
180 return id
? : NET_ID_ZERO
;
184 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
185 * is set to true, thus the caller knows that the new id must be notified via
188 static int __peernet2id_alloc(struct net
*net
, struct net
*peer
, bool *alloc
)
190 int id
= idr_for_each(&net
->netns_ids
, net_eq_idr
, peer
);
191 bool alloc_it
= *alloc
;
195 /* Magic value for id 0. */
196 if (id
== NET_ID_ZERO
)
202 id
= alloc_netid(net
, peer
, -1);
204 return id
>= 0 ? id
: NETNSA_NSID_NOT_ASSIGNED
;
207 return NETNSA_NSID_NOT_ASSIGNED
;
210 /* should be called with nsid_lock held */
211 static int __peernet2id(struct net
*net
, struct net
*peer
)
215 return __peernet2id_alloc(net
, peer
, &no
);
218 static void rtnl_net_notifyid(struct net
*net
, int cmd
, int id
);
219 /* This function returns the id of a peer netns. If no id is assigned, one will
220 * be allocated and returned.
222 int peernet2id_alloc(struct net
*net
, struct net
*peer
)
227 if (atomic_read(&net
->count
) == 0)
228 return NETNSA_NSID_NOT_ASSIGNED
;
229 spin_lock_bh(&net
->nsid_lock
);
230 alloc
= atomic_read(&peer
->count
) == 0 ? false : true;
231 id
= __peernet2id_alloc(net
, peer
, &alloc
);
232 spin_unlock_bh(&net
->nsid_lock
);
233 if (alloc
&& id
>= 0)
234 rtnl_net_notifyid(net
, RTM_NEWNSID
, id
);
238 /* This function returns, if assigned, the id of a peer netns. */
239 int peernet2id(struct net
*net
, struct net
*peer
)
243 spin_lock_bh(&net
->nsid_lock
);
244 id
= __peernet2id(net
, peer
);
245 spin_unlock_bh(&net
->nsid_lock
);
248 EXPORT_SYMBOL(peernet2id
);
250 /* This function returns true is the peer netns has an id assigned into the
253 bool peernet_has_id(struct net
*net
, struct net
*peer
)
255 return peernet2id(net
, peer
) >= 0;
258 struct net
*get_net_ns_by_id(struct net
*net
, int id
)
266 spin_lock_bh(&net
->nsid_lock
);
267 peer
= idr_find(&net
->netns_ids
, id
);
270 spin_unlock_bh(&net
->nsid_lock
);
277 * setup_net runs the initializers for the network namespace object.
279 static __net_init
int setup_net(struct net
*net
, struct user_namespace
*user_ns
)
281 /* Must be called with net_mutex held */
282 const struct pernet_operations
*ops
, *saved_ops
;
284 LIST_HEAD(net_exit_list
);
286 atomic_set(&net
->count
, 1);
287 atomic_set(&net
->passive
, 1);
288 net
->dev_base_seq
= 1;
289 net
->user_ns
= user_ns
;
290 idr_init(&net
->netns_ids
);
291 spin_lock_init(&net
->nsid_lock
);
293 list_for_each_entry(ops
, &pernet_list
, list
) {
294 error
= ops_init(ops
, net
);
302 /* Walk through the list backwards calling the exit functions
303 * for the pernet modules whose init functions did not fail.
305 list_add(&net
->exit_list
, &net_exit_list
);
307 list_for_each_entry_continue_reverse(ops
, &pernet_list
, list
)
308 ops_exit_list(ops
, &net_exit_list
);
311 list_for_each_entry_continue_reverse(ops
, &pernet_list
, list
)
312 ops_free_list(ops
, &net_exit_list
);
320 static struct ucounts
*inc_net_namespaces(struct user_namespace
*ns
)
322 return inc_ucount(ns
, current_euid(), UCOUNT_NET_NAMESPACES
);
325 static void dec_net_namespaces(struct ucounts
*ucounts
)
327 dec_ucount(ucounts
, UCOUNT_NET_NAMESPACES
);
330 static struct kmem_cache
*net_cachep
;
331 static struct workqueue_struct
*netns_wq
;
333 static struct net
*net_alloc(void)
335 struct net
*net
= NULL
;
336 struct net_generic
*ng
;
338 ng
= net_alloc_generic();
342 net
= kmem_cache_zalloc(net_cachep
, GFP_KERNEL
);
346 rcu_assign_pointer(net
->gen
, ng
);
355 static void net_free(struct net
*net
)
357 kfree(rcu_access_pointer(net
->gen
));
358 kmem_cache_free(net_cachep
, net
);
361 void net_drop_ns(void *p
)
364 if (ns
&& atomic_dec_and_test(&ns
->passive
))
368 struct net
*copy_net_ns(unsigned long flags
,
369 struct user_namespace
*user_ns
, struct net
*old_net
)
371 struct ucounts
*ucounts
;
375 if (!(flags
& CLONE_NEWNET
))
376 return get_net(old_net
);
378 ucounts
= inc_net_namespaces(user_ns
);
380 return ERR_PTR(-ENOSPC
);
384 dec_net_namespaces(ucounts
);
385 return ERR_PTR(-ENOMEM
);
388 get_user_ns(user_ns
);
390 rv
= mutex_lock_killable(&net_mutex
);
393 dec_net_namespaces(ucounts
);
394 put_user_ns(user_ns
);
398 net
->ucounts
= ucounts
;
399 rv
= setup_net(net
, user_ns
);
402 list_add_tail_rcu(&net
->list
, &net_namespace_list
);
405 mutex_unlock(&net_mutex
);
407 dec_net_namespaces(ucounts
);
408 put_user_ns(user_ns
);
415 static DEFINE_SPINLOCK(cleanup_list_lock
);
416 static LIST_HEAD(cleanup_list
); /* Must hold cleanup_list_lock to touch */
418 static void cleanup_net(struct work_struct
*work
)
420 const struct pernet_operations
*ops
;
421 struct net
*net
, *tmp
;
422 struct list_head net_kill_list
;
423 LIST_HEAD(net_exit_list
);
425 /* Atomically snapshot the list of namespaces to cleanup */
426 spin_lock_irq(&cleanup_list_lock
);
427 list_replace_init(&cleanup_list
, &net_kill_list
);
428 spin_unlock_irq(&cleanup_list_lock
);
430 mutex_lock(&net_mutex
);
432 /* Don't let anyone else find us. */
434 list_for_each_entry(net
, &net_kill_list
, cleanup_list
) {
435 list_del_rcu(&net
->list
);
436 list_add_tail(&net
->exit_list
, &net_exit_list
);
440 spin_lock_bh(&tmp
->nsid_lock
);
441 id
= __peernet2id(tmp
, net
);
443 idr_remove(&tmp
->netns_ids
, id
);
444 spin_unlock_bh(&tmp
->nsid_lock
);
446 rtnl_net_notifyid(tmp
, RTM_DELNSID
, id
);
448 spin_lock_bh(&net
->nsid_lock
);
449 idr_destroy(&net
->netns_ids
);
450 spin_unlock_bh(&net
->nsid_lock
);
456 * Another CPU might be rcu-iterating the list, wait for it.
457 * This needs to be before calling the exit() notifiers, so
458 * the rcu_barrier() below isn't sufficient alone.
462 /* Run all of the network namespace exit methods */
463 list_for_each_entry_reverse(ops
, &pernet_list
, list
)
464 ops_exit_list(ops
, &net_exit_list
);
466 /* Free the net generic variables */
467 list_for_each_entry_reverse(ops
, &pernet_list
, list
)
468 ops_free_list(ops
, &net_exit_list
);
470 mutex_unlock(&net_mutex
);
472 /* Ensure there are no outstanding rcu callbacks using this
477 /* Finally it is safe to free my network namespace structure */
478 list_for_each_entry_safe(net
, tmp
, &net_exit_list
, exit_list
) {
479 list_del_init(&net
->exit_list
);
480 dec_net_namespaces(net
->ucounts
);
481 put_user_ns(net
->user_ns
);
485 static DECLARE_WORK(net_cleanup_work
, cleanup_net
);
487 void __put_net(struct net
*net
)
489 /* Cleanup the network namespace in process context */
492 spin_lock_irqsave(&cleanup_list_lock
, flags
);
493 list_add(&net
->cleanup_list
, &cleanup_list
);
494 spin_unlock_irqrestore(&cleanup_list_lock
, flags
);
496 queue_work(netns_wq
, &net_cleanup_work
);
498 EXPORT_SYMBOL_GPL(__put_net
);
500 struct net
*get_net_ns_by_fd(int fd
)
503 struct ns_common
*ns
;
506 file
= proc_ns_fget(fd
);
508 return ERR_CAST(file
);
510 ns
= get_proc_ns(file_inode(file
));
511 if (ns
->ops
== &netns_operations
)
512 net
= get_net(container_of(ns
, struct net
, ns
));
514 net
= ERR_PTR(-EINVAL
);
521 struct net
*get_net_ns_by_fd(int fd
)
523 return ERR_PTR(-EINVAL
);
526 EXPORT_SYMBOL_GPL(get_net_ns_by_fd
);
528 struct net
*get_net_ns_by_pid(pid_t pid
)
530 struct task_struct
*tsk
;
533 /* Lookup the network namespace */
534 net
= ERR_PTR(-ESRCH
);
536 tsk
= find_task_by_vpid(pid
);
538 struct nsproxy
*nsproxy
;
540 nsproxy
= tsk
->nsproxy
;
542 net
= get_net(nsproxy
->net_ns
);
548 EXPORT_SYMBOL_GPL(get_net_ns_by_pid
);
550 static __net_init
int net_ns_net_init(struct net
*net
)
553 net
->ns
.ops
= &netns_operations
;
555 return ns_alloc_inum(&net
->ns
);
558 static __net_exit
void net_ns_net_exit(struct net
*net
)
560 ns_free_inum(&net
->ns
);
563 static struct pernet_operations __net_initdata net_ns_ops
= {
564 .init
= net_ns_net_init
,
565 .exit
= net_ns_net_exit
,
568 static const struct nla_policy rtnl_net_policy
[NETNSA_MAX
+ 1] = {
569 [NETNSA_NONE
] = { .type
= NLA_UNSPEC
},
570 [NETNSA_NSID
] = { .type
= NLA_S32
},
571 [NETNSA_PID
] = { .type
= NLA_U32
},
572 [NETNSA_FD
] = { .type
= NLA_U32
},
575 static int rtnl_net_newid(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
576 struct netlink_ext_ack
*extack
)
578 struct net
*net
= sock_net(skb
->sk
);
579 struct nlattr
*tb
[NETNSA_MAX
+ 1];
583 err
= nlmsg_parse(nlh
, sizeof(struct rtgenmsg
), tb
, NETNSA_MAX
,
584 rtnl_net_policy
, extack
);
587 if (!tb
[NETNSA_NSID
])
589 nsid
= nla_get_s32(tb
[NETNSA_NSID
]);
592 peer
= get_net_ns_by_pid(nla_get_u32(tb
[NETNSA_PID
]));
593 else if (tb
[NETNSA_FD
])
594 peer
= get_net_ns_by_fd(nla_get_u32(tb
[NETNSA_FD
]));
598 return PTR_ERR(peer
);
600 spin_lock_bh(&net
->nsid_lock
);
601 if (__peernet2id(net
, peer
) >= 0) {
602 spin_unlock_bh(&net
->nsid_lock
);
607 err
= alloc_netid(net
, peer
, nsid
);
608 spin_unlock_bh(&net
->nsid_lock
);
610 rtnl_net_notifyid(net
, RTM_NEWNSID
, err
);
618 static int rtnl_net_get_size(void)
620 return NLMSG_ALIGN(sizeof(struct rtgenmsg
))
621 + nla_total_size(sizeof(s32
)) /* NETNSA_NSID */
625 static int rtnl_net_fill(struct sk_buff
*skb
, u32 portid
, u32 seq
, int flags
,
626 int cmd
, struct net
*net
, int nsid
)
628 struct nlmsghdr
*nlh
;
629 struct rtgenmsg
*rth
;
631 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rth
), flags
);
635 rth
= nlmsg_data(nlh
);
636 rth
->rtgen_family
= AF_UNSPEC
;
638 if (nla_put_s32(skb
, NETNSA_NSID
, nsid
))
639 goto nla_put_failure
;
645 nlmsg_cancel(skb
, nlh
);
649 static int rtnl_net_getid(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
650 struct netlink_ext_ack
*extack
)
652 struct net
*net
= sock_net(skb
->sk
);
653 struct nlattr
*tb
[NETNSA_MAX
+ 1];
658 err
= nlmsg_parse(nlh
, sizeof(struct rtgenmsg
), tb
, NETNSA_MAX
,
659 rtnl_net_policy
, extack
);
663 peer
= get_net_ns_by_pid(nla_get_u32(tb
[NETNSA_PID
]));
664 else if (tb
[NETNSA_FD
])
665 peer
= get_net_ns_by_fd(nla_get_u32(tb
[NETNSA_FD
]));
670 return PTR_ERR(peer
);
672 msg
= nlmsg_new(rtnl_net_get_size(), GFP_KERNEL
);
678 id
= peernet2id(net
, peer
);
679 err
= rtnl_net_fill(msg
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
680 RTM_NEWNSID
, net
, id
);
684 err
= rtnl_unicast(msg
, net
, NETLINK_CB(skb
).portid
);
694 struct rtnl_net_dump_cb
{
697 struct netlink_callback
*cb
;
702 static int rtnl_net_dumpid_one(int id
, void *peer
, void *data
)
704 struct rtnl_net_dump_cb
*net_cb
= (struct rtnl_net_dump_cb
*)data
;
707 if (net_cb
->idx
< net_cb
->s_idx
)
710 ret
= rtnl_net_fill(net_cb
->skb
, NETLINK_CB(net_cb
->cb
->skb
).portid
,
711 net_cb
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
712 RTM_NEWNSID
, net_cb
->net
, id
);
721 static int rtnl_net_dumpid(struct sk_buff
*skb
, struct netlink_callback
*cb
)
723 struct net
*net
= sock_net(skb
->sk
);
724 struct rtnl_net_dump_cb net_cb
= {
729 .s_idx
= cb
->args
[0],
732 spin_lock_bh(&net
->nsid_lock
);
733 idr_for_each(&net
->netns_ids
, rtnl_net_dumpid_one
, &net_cb
);
734 spin_unlock_bh(&net
->nsid_lock
);
736 cb
->args
[0] = net_cb
.idx
;
740 static void rtnl_net_notifyid(struct net
*net
, int cmd
, int id
)
745 msg
= nlmsg_new(rtnl_net_get_size(), GFP_KERNEL
);
749 err
= rtnl_net_fill(msg
, 0, 0, 0, cmd
, net
, id
);
753 rtnl_notify(msg
, net
, 0, RTNLGRP_NSID
, NULL
, 0);
759 rtnl_set_sk_err(net
, RTNLGRP_NSID
, err
);
762 static int __init
net_ns_init(void)
764 struct net_generic
*ng
;
767 net_cachep
= kmem_cache_create("net_namespace", sizeof(struct net
),
771 /* Create workqueue for cleanup */
772 netns_wq
= create_singlethread_workqueue("netns");
774 panic("Could not create netns workq");
777 ng
= net_alloc_generic();
779 panic("Could not allocate generic netns");
781 rcu_assign_pointer(init_net
.gen
, ng
);
783 mutex_lock(&net_mutex
);
784 if (setup_net(&init_net
, &init_user_ns
))
785 panic("Could not setup the initial network namespace");
787 init_net_initialized
= true;
790 list_add_tail_rcu(&init_net
.list
, &net_namespace_list
);
793 mutex_unlock(&net_mutex
);
795 register_pernet_subsys(&net_ns_ops
);
797 rtnl_register(PF_UNSPEC
, RTM_NEWNSID
, rtnl_net_newid
, NULL
, NULL
);
798 rtnl_register(PF_UNSPEC
, RTM_GETNSID
, rtnl_net_getid
, rtnl_net_dumpid
,
804 pure_initcall(net_ns_init
);
807 static int __register_pernet_operations(struct list_head
*list
,
808 struct pernet_operations
*ops
)
812 LIST_HEAD(net_exit_list
);
814 list_add_tail(&ops
->list
, list
);
815 if (ops
->init
|| (ops
->id
&& ops
->size
)) {
817 error
= ops_init(ops
, net
);
820 list_add_tail(&net
->exit_list
, &net_exit_list
);
826 /* If I have an error cleanup all namespaces I initialized */
827 list_del(&ops
->list
);
828 ops_exit_list(ops
, &net_exit_list
);
829 ops_free_list(ops
, &net_exit_list
);
833 static void __unregister_pernet_operations(struct pernet_operations
*ops
)
836 LIST_HEAD(net_exit_list
);
838 list_del(&ops
->list
);
840 list_add_tail(&net
->exit_list
, &net_exit_list
);
841 ops_exit_list(ops
, &net_exit_list
);
842 ops_free_list(ops
, &net_exit_list
);
847 static int __register_pernet_operations(struct list_head
*list
,
848 struct pernet_operations
*ops
)
850 if (!init_net_initialized
) {
851 list_add_tail(&ops
->list
, list
);
855 return ops_init(ops
, &init_net
);
858 static void __unregister_pernet_operations(struct pernet_operations
*ops
)
860 if (!init_net_initialized
) {
861 list_del(&ops
->list
);
863 LIST_HEAD(net_exit_list
);
864 list_add(&init_net
.exit_list
, &net_exit_list
);
865 ops_exit_list(ops
, &net_exit_list
);
866 ops_free_list(ops
, &net_exit_list
);
870 #endif /* CONFIG_NET_NS */
872 static DEFINE_IDA(net_generic_ids
);
874 static int register_pernet_operations(struct list_head
*list
,
875 struct pernet_operations
*ops
)
881 error
= ida_get_new_above(&net_generic_ids
, MIN_PERNET_OPS_ID
, ops
->id
);
883 if (error
== -EAGAIN
) {
884 ida_pre_get(&net_generic_ids
, GFP_KERNEL
);
889 max_gen_ptrs
= max(max_gen_ptrs
, *ops
->id
+ 1);
891 error
= __register_pernet_operations(list
, ops
);
895 ida_remove(&net_generic_ids
, *ops
->id
);
901 static void unregister_pernet_operations(struct pernet_operations
*ops
)
904 __unregister_pernet_operations(ops
);
907 ida_remove(&net_generic_ids
, *ops
->id
);
911 * register_pernet_subsys - register a network namespace subsystem
912 * @ops: pernet operations structure for the subsystem
914 * Register a subsystem which has init and exit functions
915 * that are called when network namespaces are created and
916 * destroyed respectively.
918 * When registered all network namespace init functions are
919 * called for every existing network namespace. Allowing kernel
920 * modules to have a race free view of the set of network namespaces.
922 * When a new network namespace is created all of the init
923 * methods are called in the order in which they were registered.
925 * When a network namespace is destroyed all of the exit methods
926 * are called in the reverse of the order with which they were
929 int register_pernet_subsys(struct pernet_operations
*ops
)
932 mutex_lock(&net_mutex
);
933 error
= register_pernet_operations(first_device
, ops
);
934 mutex_unlock(&net_mutex
);
937 EXPORT_SYMBOL_GPL(register_pernet_subsys
);
940 * unregister_pernet_subsys - unregister a network namespace subsystem
941 * @ops: pernet operations structure to manipulate
943 * Remove the pernet operations structure from the list to be
944 * used when network namespaces are created or destroyed. In
945 * addition run the exit method for all existing network
948 void unregister_pernet_subsys(struct pernet_operations
*ops
)
950 mutex_lock(&net_mutex
);
951 unregister_pernet_operations(ops
);
952 mutex_unlock(&net_mutex
);
954 EXPORT_SYMBOL_GPL(unregister_pernet_subsys
);
957 * register_pernet_device - register a network namespace device
958 * @ops: pernet operations structure for the subsystem
960 * Register a device which has init and exit functions
961 * that are called when network namespaces are created and
962 * destroyed respectively.
964 * When registered all network namespace init functions are
965 * called for every existing network namespace. Allowing kernel
966 * modules to have a race free view of the set of network namespaces.
968 * When a new network namespace is created all of the init
969 * methods are called in the order in which they were registered.
971 * When a network namespace is destroyed all of the exit methods
972 * are called in the reverse of the order with which they were
975 int register_pernet_device(struct pernet_operations
*ops
)
978 mutex_lock(&net_mutex
);
979 error
= register_pernet_operations(&pernet_list
, ops
);
980 if (!error
&& (first_device
== &pernet_list
))
981 first_device
= &ops
->list
;
982 mutex_unlock(&net_mutex
);
985 EXPORT_SYMBOL_GPL(register_pernet_device
);
988 * unregister_pernet_device - unregister a network namespace netdevice
989 * @ops: pernet operations structure to manipulate
991 * Remove the pernet operations structure from the list to be
992 * used when network namespaces are created or destroyed. In
993 * addition run the exit method for all existing network
996 void unregister_pernet_device(struct pernet_operations
*ops
)
998 mutex_lock(&net_mutex
);
999 if (&ops
->list
== first_device
)
1000 first_device
= first_device
->next
;
1001 unregister_pernet_operations(ops
);
1002 mutex_unlock(&net_mutex
);
1004 EXPORT_SYMBOL_GPL(unregister_pernet_device
);
1006 #ifdef CONFIG_NET_NS
1007 static struct ns_common
*netns_get(struct task_struct
*task
)
1009 struct net
*net
= NULL
;
1010 struct nsproxy
*nsproxy
;
1013 nsproxy
= task
->nsproxy
;
1015 net
= get_net(nsproxy
->net_ns
);
1018 return net
? &net
->ns
: NULL
;
1021 static inline struct net
*to_net_ns(struct ns_common
*ns
)
1023 return container_of(ns
, struct net
, ns
);
1026 static void netns_put(struct ns_common
*ns
)
1028 put_net(to_net_ns(ns
));
1031 static int netns_install(struct nsproxy
*nsproxy
, struct ns_common
*ns
)
1033 struct net
*net
= to_net_ns(ns
);
1035 if (!ns_capable(net
->user_ns
, CAP_SYS_ADMIN
) ||
1036 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
1039 put_net(nsproxy
->net_ns
);
1040 nsproxy
->net_ns
= get_net(net
);
1044 static struct user_namespace
*netns_owner(struct ns_common
*ns
)
1046 return to_net_ns(ns
)->user_ns
;
1049 const struct proc_ns_operations netns_operations
= {
1051 .type
= CLONE_NEWNET
,
1054 .install
= netns_install
,
1055 .owner
= netns_owner
,