]>
Commit | Line | Data |
---|---|---|
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
2 | ||
3 | #include <linux/workqueue.h> | |
4 | #include <linux/rtnetlink.h> | |
5 | #include <linux/cache.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/list.h> | |
8 | #include <linux/delay.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/idr.h> | |
11 | #include <linux/rculist.h> | |
12 | #include <linux/nsproxy.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/proc_ns.h> | |
15 | #include <linux/file.h> | |
16 | #include <linux/export.h> | |
17 | #include <linux/user_namespace.h> | |
18 | #include <linux/net_namespace.h> | |
19 | #include <linux/sched/task.h> | |
20 | ||
21 | #include <net/sock.h> | |
22 | #include <net/netlink.h> | |
23 | #include <net/net_namespace.h> | |
24 | #include <net/netns/generic.h> | |
25 | ||
26 | /* | |
27 | * Our network namespace constructor/destructor lists | |
28 | */ | |
29 | ||
30 | static LIST_HEAD(pernet_list); | |
31 | static struct list_head *first_device = &pernet_list; | |
32 | DEFINE_MUTEX(net_mutex); | |
33 | ||
34 | LIST_HEAD(net_namespace_list); | |
35 | EXPORT_SYMBOL_GPL(net_namespace_list); | |
36 | ||
37 | struct net init_net = { | |
38 | .count = ATOMIC_INIT(1), | |
39 | .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head), | |
40 | }; | |
41 | EXPORT_SYMBOL(init_net); | |
42 | ||
43 | static bool init_net_initialized; | |
44 | ||
45 | #define MIN_PERNET_OPS_ID \ | |
46 | ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) | |
47 | ||
48 | #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ | |
49 | ||
50 | static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; | |
51 | ||
52 | static struct net_generic *net_alloc_generic(void) | |
53 | { | |
54 | struct net_generic *ng; | |
55 | unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); | |
56 | ||
57 | ng = kzalloc(generic_size, GFP_KERNEL); | |
58 | if (ng) | |
59 | ng->s.len = max_gen_ptrs; | |
60 | ||
61 | return ng; | |
62 | } | |
63 | ||
64 | static int net_assign_generic(struct net *net, unsigned int id, void *data) | |
65 | { | |
66 | struct net_generic *ng, *old_ng; | |
67 | ||
68 | BUG_ON(!mutex_is_locked(&net_mutex)); | |
69 | BUG_ON(id < MIN_PERNET_OPS_ID); | |
70 | ||
71 | old_ng = rcu_dereference_protected(net->gen, | |
72 | lockdep_is_held(&net_mutex)); | |
73 | if (old_ng->s.len > id) { | |
74 | old_ng->ptr[id] = data; | |
75 | return 0; | |
76 | } | |
77 | ||
78 | ng = net_alloc_generic(); | |
79 | if (ng == NULL) | |
80 | return -ENOMEM; | |
81 | ||
82 | /* | |
83 | * Some synchronisation notes: | |
84 | * | |
85 | * The net_generic explores the net->gen array inside rcu | |
86 | * read section. Besides once set the net->gen->ptr[x] | |
87 | * pointer never changes (see rules in netns/generic.h). | |
88 | * | |
89 | * That said, we simply duplicate this array and schedule | |
90 | * the old copy for kfree after a grace period. | |
91 | */ | |
92 | ||
93 | memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], | |
94 | (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); | |
95 | ng->ptr[id] = data; | |
96 | ||
97 | rcu_assign_pointer(net->gen, ng); | |
98 | kfree_rcu(old_ng, s.rcu); | |
99 | return 0; | |
100 | } | |
101 | ||
102 | static int ops_init(const struct pernet_operations *ops, struct net *net) | |
103 | { | |
104 | int err = -ENOMEM; | |
105 | void *data = NULL; | |
106 | ||
107 | if (ops->id && ops->size) { | |
108 | data = kzalloc(ops->size, GFP_KERNEL); | |
109 | if (!data) | |
110 | goto out; | |
111 | ||
112 | err = net_assign_generic(net, *ops->id, data); | |
113 | if (err) | |
114 | goto cleanup; | |
115 | } | |
116 | err = 0; | |
117 | if (ops->init) | |
118 | err = ops->init(net); | |
119 | if (!err) | |
120 | return 0; | |
121 | ||
122 | cleanup: | |
123 | kfree(data); | |
124 | ||
125 | out: | |
126 | return err; | |
127 | } | |
128 | ||
129 | static void ops_free(const struct pernet_operations *ops, struct net *net) | |
130 | { | |
131 | if (ops->id && ops->size) { | |
132 | kfree(net_generic(net, *ops->id)); | |
133 | } | |
134 | } | |
135 | ||
136 | static void ops_exit_list(const struct pernet_operations *ops, | |
137 | struct list_head *net_exit_list) | |
138 | { | |
139 | struct net *net; | |
140 | if (ops->exit) { | |
141 | list_for_each_entry(net, net_exit_list, exit_list) | |
142 | ops->exit(net); | |
143 | } | |
144 | if (ops->exit_batch) | |
145 | ops->exit_batch(net_exit_list); | |
146 | } | |
147 | ||
148 | static void ops_free_list(const struct pernet_operations *ops, | |
149 | struct list_head *net_exit_list) | |
150 | { | |
151 | struct net *net; | |
152 | if (ops->size && ops->id) { | |
153 | list_for_each_entry(net, net_exit_list, exit_list) | |
154 | ops_free(ops, net); | |
155 | } | |
156 | } | |
157 | ||
158 | /* should be called with nsid_lock held */ | |
159 | static int alloc_netid(struct net *net, struct net *peer, int reqid) | |
160 | { | |
161 | int min = 0, max = 0; | |
162 | ||
163 | if (reqid >= 0) { | |
164 | min = reqid; | |
165 | max = reqid + 1; | |
166 | } | |
167 | ||
168 | return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); | |
169 | } | |
170 | ||
171 | /* This function is used by idr_for_each(). If net is equal to peer, the | |
172 | * function returns the id so that idr_for_each() stops. Because we cannot | |
173 | * returns the id 0 (idr_for_each() will not stop), we return the magic value | |
174 | * NET_ID_ZERO (-1) for it. | |
175 | */ | |
176 | #define NET_ID_ZERO -1 | |
177 | static int net_eq_idr(int id, void *net, void *peer) | |
178 | { | |
179 | if (net_eq(net, peer)) | |
180 | return id ? : NET_ID_ZERO; | |
181 | return 0; | |
182 | } | |
183 | ||
184 | /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc | |
185 | * is set to true, thus the caller knows that the new id must be notified via | |
186 | * rtnl. | |
187 | */ | |
188 | static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) | |
189 | { | |
190 | int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); | |
191 | bool alloc_it = *alloc; | |
192 | ||
193 | *alloc = false; | |
194 | ||
195 | /* Magic value for id 0. */ | |
196 | if (id == NET_ID_ZERO) | |
197 | return 0; | |
198 | if (id > 0) | |
199 | return id; | |
200 | ||
201 | if (alloc_it) { | |
202 | id = alloc_netid(net, peer, -1); | |
203 | *alloc = true; | |
204 | return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED; | |
205 | } | |
206 | ||
207 | return NETNSA_NSID_NOT_ASSIGNED; | |
208 | } | |
209 | ||
210 | /* should be called with nsid_lock held */ | |
211 | static int __peernet2id(struct net *net, struct net *peer) | |
212 | { | |
213 | bool no = false; | |
214 | ||
215 | return __peernet2id_alloc(net, peer, &no); | |
216 | } | |
217 | ||
218 | static void rtnl_net_notifyid(struct net *net, int cmd, int id); | |
219 | /* This function returns the id of a peer netns. If no id is assigned, one will | |
220 | * be allocated and returned. | |
221 | */ | |
222 | int peernet2id_alloc(struct net *net, struct net *peer) | |
223 | { | |
224 | bool alloc; | |
225 | int id; | |
226 | ||
227 | if (atomic_read(&net->count) == 0) | |
228 | return NETNSA_NSID_NOT_ASSIGNED; | |
229 | spin_lock_bh(&net->nsid_lock); | |
230 | alloc = atomic_read(&peer->count) == 0 ? false : true; | |
231 | id = __peernet2id_alloc(net, peer, &alloc); | |
232 | spin_unlock_bh(&net->nsid_lock); | |
233 | if (alloc && id >= 0) | |
234 | rtnl_net_notifyid(net, RTM_NEWNSID, id); | |
235 | return id; | |
236 | } | |
237 | ||
238 | /* This function returns, if assigned, the id of a peer netns. */ | |
239 | int peernet2id(struct net *net, struct net *peer) | |
240 | { | |
241 | int id; | |
242 | ||
243 | spin_lock_bh(&net->nsid_lock); | |
244 | id = __peernet2id(net, peer); | |
245 | spin_unlock_bh(&net->nsid_lock); | |
246 | return id; | |
247 | } | |
248 | EXPORT_SYMBOL(peernet2id); | |
249 | ||
250 | /* This function returns true is the peer netns has an id assigned into the | |
251 | * current netns. | |
252 | */ | |
253 | bool peernet_has_id(struct net *net, struct net *peer) | |
254 | { | |
255 | return peernet2id(net, peer) >= 0; | |
256 | } | |
257 | ||
258 | struct net *get_net_ns_by_id(struct net *net, int id) | |
259 | { | |
260 | struct net *peer; | |
261 | ||
262 | if (id < 0) | |
263 | return NULL; | |
264 | ||
265 | rcu_read_lock(); | |
266 | spin_lock_bh(&net->nsid_lock); | |
267 | peer = idr_find(&net->netns_ids, id); | |
268 | if (peer) | |
269 | get_net(peer); | |
270 | spin_unlock_bh(&net->nsid_lock); | |
271 | rcu_read_unlock(); | |
272 | ||
273 | return peer; | |
274 | } | |
275 | ||
276 | /* | |
277 | * setup_net runs the initializers for the network namespace object. | |
278 | */ | |
279 | static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) | |
280 | { | |
281 | /* Must be called with net_mutex held */ | |
282 | const struct pernet_operations *ops, *saved_ops; | |
283 | int error = 0; | |
284 | LIST_HEAD(net_exit_list); | |
285 | ||
286 | atomic_set(&net->count, 1); | |
287 | atomic_set(&net->passive, 1); | |
288 | net->dev_base_seq = 1; | |
289 | net->user_ns = user_ns; | |
290 | idr_init(&net->netns_ids); | |
291 | spin_lock_init(&net->nsid_lock); | |
292 | ||
293 | list_for_each_entry(ops, &pernet_list, list) { | |
294 | error = ops_init(ops, net); | |
295 | if (error < 0) | |
296 | goto out_undo; | |
297 | } | |
298 | out: | |
299 | return error; | |
300 | ||
301 | out_undo: | |
302 | /* Walk through the list backwards calling the exit functions | |
303 | * for the pernet modules whose init functions did not fail. | |
304 | */ | |
305 | list_add(&net->exit_list, &net_exit_list); | |
306 | saved_ops = ops; | |
307 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) | |
308 | ops_exit_list(ops, &net_exit_list); | |
309 | ||
310 | ops = saved_ops; | |
311 | list_for_each_entry_continue_reverse(ops, &pernet_list, list) | |
312 | ops_free_list(ops, &net_exit_list); | |
313 | ||
314 | rcu_barrier(); | |
315 | goto out; | |
316 | } | |
317 | ||
318 | ||
319 | #ifdef CONFIG_NET_NS | |
320 | static struct ucounts *inc_net_namespaces(struct user_namespace *ns) | |
321 | { | |
322 | return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); | |
323 | } | |
324 | ||
325 | static void dec_net_namespaces(struct ucounts *ucounts) | |
326 | { | |
327 | dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); | |
328 | } | |
329 | ||
330 | static struct kmem_cache *net_cachep; | |
331 | static struct workqueue_struct *netns_wq; | |
332 | ||
333 | static struct net *net_alloc(void) | |
334 | { | |
335 | struct net *net = NULL; | |
336 | struct net_generic *ng; | |
337 | ||
338 | ng = net_alloc_generic(); | |
339 | if (!ng) | |
340 | goto out; | |
341 | ||
342 | net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); | |
343 | if (!net) | |
344 | goto out_free; | |
345 | ||
346 | rcu_assign_pointer(net->gen, ng); | |
347 | out: | |
348 | return net; | |
349 | ||
350 | out_free: | |
351 | kfree(ng); | |
352 | goto out; | |
353 | } | |
354 | ||
355 | static void net_free(struct net *net) | |
356 | { | |
357 | kfree(rcu_access_pointer(net->gen)); | |
358 | kmem_cache_free(net_cachep, net); | |
359 | } | |
360 | ||
361 | void net_drop_ns(void *p) | |
362 | { | |
363 | struct net *ns = p; | |
364 | if (ns && atomic_dec_and_test(&ns->passive)) | |
365 | net_free(ns); | |
366 | } | |
367 | ||
368 | struct net *copy_net_ns(unsigned long flags, | |
369 | struct user_namespace *user_ns, struct net *old_net) | |
370 | { | |
371 | struct ucounts *ucounts; | |
372 | struct net *net; | |
373 | int rv; | |
374 | ||
375 | if (!(flags & CLONE_NEWNET)) | |
376 | return get_net(old_net); | |
377 | ||
378 | ucounts = inc_net_namespaces(user_ns); | |
379 | if (!ucounts) | |
380 | return ERR_PTR(-ENOSPC); | |
381 | ||
382 | net = net_alloc(); | |
383 | if (!net) { | |
384 | dec_net_namespaces(ucounts); | |
385 | return ERR_PTR(-ENOMEM); | |
386 | } | |
387 | ||
388 | get_user_ns(user_ns); | |
389 | ||
390 | rv = mutex_lock_killable(&net_mutex); | |
391 | if (rv < 0) { | |
392 | net_free(net); | |
393 | dec_net_namespaces(ucounts); | |
394 | put_user_ns(user_ns); | |
395 | return ERR_PTR(rv); | |
396 | } | |
397 | ||
398 | net->ucounts = ucounts; | |
399 | rv = setup_net(net, user_ns); | |
400 | if (rv == 0) { | |
401 | rtnl_lock(); | |
402 | list_add_tail_rcu(&net->list, &net_namespace_list); | |
403 | rtnl_unlock(); | |
404 | } | |
405 | mutex_unlock(&net_mutex); | |
406 | if (rv < 0) { | |
407 | dec_net_namespaces(ucounts); | |
408 | put_user_ns(user_ns); | |
409 | net_drop_ns(net); | |
410 | return ERR_PTR(rv); | |
411 | } | |
412 | return net; | |
413 | } | |
414 | ||
415 | static DEFINE_SPINLOCK(cleanup_list_lock); | |
416 | static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ | |
417 | ||
418 | static void cleanup_net(struct work_struct *work) | |
419 | { | |
420 | const struct pernet_operations *ops; | |
421 | struct net *net, *tmp; | |
422 | struct list_head net_kill_list; | |
423 | LIST_HEAD(net_exit_list); | |
424 | ||
425 | /* Atomically snapshot the list of namespaces to cleanup */ | |
426 | spin_lock_irq(&cleanup_list_lock); | |
427 | list_replace_init(&cleanup_list, &net_kill_list); | |
428 | spin_unlock_irq(&cleanup_list_lock); | |
429 | ||
430 | mutex_lock(&net_mutex); | |
431 | ||
432 | /* Don't let anyone else find us. */ | |
433 | rtnl_lock(); | |
434 | list_for_each_entry(net, &net_kill_list, cleanup_list) { | |
435 | list_del_rcu(&net->list); | |
436 | list_add_tail(&net->exit_list, &net_exit_list); | |
437 | for_each_net(tmp) { | |
438 | int id; | |
439 | ||
440 | spin_lock_bh(&tmp->nsid_lock); | |
441 | id = __peernet2id(tmp, net); | |
442 | if (id >= 0) | |
443 | idr_remove(&tmp->netns_ids, id); | |
444 | spin_unlock_bh(&tmp->nsid_lock); | |
445 | if (id >= 0) | |
446 | rtnl_net_notifyid(tmp, RTM_DELNSID, id); | |
447 | } | |
448 | spin_lock_bh(&net->nsid_lock); | |
449 | idr_destroy(&net->netns_ids); | |
450 | spin_unlock_bh(&net->nsid_lock); | |
451 | ||
452 | } | |
453 | rtnl_unlock(); | |
454 | ||
455 | /* | |
456 | * Another CPU might be rcu-iterating the list, wait for it. | |
457 | * This needs to be before calling the exit() notifiers, so | |
458 | * the rcu_barrier() below isn't sufficient alone. | |
459 | */ | |
460 | synchronize_rcu(); | |
461 | ||
462 | /* Run all of the network namespace exit methods */ | |
463 | list_for_each_entry_reverse(ops, &pernet_list, list) | |
464 | ops_exit_list(ops, &net_exit_list); | |
465 | ||
466 | /* Free the net generic variables */ | |
467 | list_for_each_entry_reverse(ops, &pernet_list, list) | |
468 | ops_free_list(ops, &net_exit_list); | |
469 | ||
470 | mutex_unlock(&net_mutex); | |
471 | ||
472 | /* Ensure there are no outstanding rcu callbacks using this | |
473 | * network namespace. | |
474 | */ | |
475 | rcu_barrier(); | |
476 | ||
477 | /* Finally it is safe to free my network namespace structure */ | |
478 | list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { | |
479 | list_del_init(&net->exit_list); | |
480 | dec_net_namespaces(net->ucounts); | |
481 | put_user_ns(net->user_ns); | |
482 | net_drop_ns(net); | |
483 | } | |
484 | } | |
485 | ||
486 | /** | |
487 | * net_ns_barrier - wait until concurrent net_cleanup_work is done | |
488 | * | |
489 | * cleanup_net runs from work queue and will first remove namespaces | |
490 | * from the global list, then run net exit functions. | |
491 | * | |
492 | * Call this in module exit path to make sure that all netns | |
493 | * ->exit ops have been invoked before the function is removed. | |
494 | */ | |
495 | void net_ns_barrier(void) | |
496 | { | |
497 | mutex_lock(&net_mutex); | |
498 | mutex_unlock(&net_mutex); | |
499 | } | |
500 | EXPORT_SYMBOL(net_ns_barrier); | |
501 | ||
502 | static DECLARE_WORK(net_cleanup_work, cleanup_net); | |
503 | ||
504 | void __put_net(struct net *net) | |
505 | { | |
506 | /* Cleanup the network namespace in process context */ | |
507 | unsigned long flags; | |
508 | ||
509 | spin_lock_irqsave(&cleanup_list_lock, flags); | |
510 | list_add(&net->cleanup_list, &cleanup_list); | |
511 | spin_unlock_irqrestore(&cleanup_list_lock, flags); | |
512 | ||
513 | queue_work(netns_wq, &net_cleanup_work); | |
514 | } | |
515 | EXPORT_SYMBOL_GPL(__put_net); | |
516 | ||
517 | struct net *get_net_ns_by_fd(int fd) | |
518 | { | |
519 | struct file *file; | |
520 | struct ns_common *ns; | |
521 | struct net *net; | |
522 | ||
523 | file = proc_ns_fget(fd); | |
524 | if (IS_ERR(file)) | |
525 | return ERR_CAST(file); | |
526 | ||
527 | ns = get_proc_ns(file_inode(file)); | |
528 | if (ns->ops == &netns_operations) | |
529 | net = get_net(container_of(ns, struct net, ns)); | |
530 | else | |
531 | net = ERR_PTR(-EINVAL); | |
532 | ||
533 | fput(file); | |
534 | return net; | |
535 | } | |
536 | ||
537 | #else | |
538 | struct net *get_net_ns_by_fd(int fd) | |
539 | { | |
540 | return ERR_PTR(-EINVAL); | |
541 | } | |
542 | #endif | |
543 | EXPORT_SYMBOL_GPL(get_net_ns_by_fd); | |
544 | ||
545 | struct net *get_net_ns_by_pid(pid_t pid) | |
546 | { | |
547 | struct task_struct *tsk; | |
548 | struct net *net; | |
549 | ||
550 | /* Lookup the network namespace */ | |
551 | net = ERR_PTR(-ESRCH); | |
552 | rcu_read_lock(); | |
553 | tsk = find_task_by_vpid(pid); | |
554 | if (tsk) { | |
555 | struct nsproxy *nsproxy; | |
556 | task_lock(tsk); | |
557 | nsproxy = tsk->nsproxy; | |
558 | if (nsproxy) | |
559 | net = get_net(nsproxy->net_ns); | |
560 | task_unlock(tsk); | |
561 | } | |
562 | rcu_read_unlock(); | |
563 | return net; | |
564 | } | |
565 | EXPORT_SYMBOL_GPL(get_net_ns_by_pid); | |
566 | ||
567 | static __net_init int net_ns_net_init(struct net *net) | |
568 | { | |
569 | #ifdef CONFIG_NET_NS | |
570 | net->ns.ops = &netns_operations; | |
571 | #endif | |
572 | return ns_alloc_inum(&net->ns); | |
573 | } | |
574 | ||
575 | static __net_exit void net_ns_net_exit(struct net *net) | |
576 | { | |
577 | ns_free_inum(&net->ns); | |
578 | } | |
579 | ||
580 | static struct pernet_operations __net_initdata net_ns_ops = { | |
581 | .init = net_ns_net_init, | |
582 | .exit = net_ns_net_exit, | |
583 | }; | |
584 | ||
585 | static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { | |
586 | [NETNSA_NONE] = { .type = NLA_UNSPEC }, | |
587 | [NETNSA_NSID] = { .type = NLA_S32 }, | |
588 | [NETNSA_PID] = { .type = NLA_U32 }, | |
589 | [NETNSA_FD] = { .type = NLA_U32 }, | |
590 | }; | |
591 | ||
592 | static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, | |
593 | struct netlink_ext_ack *extack) | |
594 | { | |
595 | struct net *net = sock_net(skb->sk); | |
596 | struct nlattr *tb[NETNSA_MAX + 1]; | |
597 | struct net *peer; | |
598 | int nsid, err; | |
599 | ||
600 | err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, | |
601 | rtnl_net_policy, extack); | |
602 | if (err < 0) | |
603 | return err; | |
604 | if (!tb[NETNSA_NSID]) | |
605 | return -EINVAL; | |
606 | nsid = nla_get_s32(tb[NETNSA_NSID]); | |
607 | ||
608 | if (tb[NETNSA_PID]) | |
609 | peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); | |
610 | else if (tb[NETNSA_FD]) | |
611 | peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); | |
612 | else | |
613 | return -EINVAL; | |
614 | if (IS_ERR(peer)) | |
615 | return PTR_ERR(peer); | |
616 | ||
617 | spin_lock_bh(&net->nsid_lock); | |
618 | if (__peernet2id(net, peer) >= 0) { | |
619 | spin_unlock_bh(&net->nsid_lock); | |
620 | err = -EEXIST; | |
621 | goto out; | |
622 | } | |
623 | ||
624 | err = alloc_netid(net, peer, nsid); | |
625 | spin_unlock_bh(&net->nsid_lock); | |
626 | if (err >= 0) { | |
627 | rtnl_net_notifyid(net, RTM_NEWNSID, err); | |
628 | err = 0; | |
629 | } | |
630 | out: | |
631 | put_net(peer); | |
632 | return err; | |
633 | } | |
634 | ||
635 | static int rtnl_net_get_size(void) | |
636 | { | |
637 | return NLMSG_ALIGN(sizeof(struct rtgenmsg)) | |
638 | + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ | |
639 | ; | |
640 | } | |
641 | ||
642 | static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags, | |
643 | int cmd, struct net *net, int nsid) | |
644 | { | |
645 | struct nlmsghdr *nlh; | |
646 | struct rtgenmsg *rth; | |
647 | ||
648 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags); | |
649 | if (!nlh) | |
650 | return -EMSGSIZE; | |
651 | ||
652 | rth = nlmsg_data(nlh); | |
653 | rth->rtgen_family = AF_UNSPEC; | |
654 | ||
655 | if (nla_put_s32(skb, NETNSA_NSID, nsid)) | |
656 | goto nla_put_failure; | |
657 | ||
658 | nlmsg_end(skb, nlh); | |
659 | return 0; | |
660 | ||
661 | nla_put_failure: | |
662 | nlmsg_cancel(skb, nlh); | |
663 | return -EMSGSIZE; | |
664 | } | |
665 | ||
666 | static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, | |
667 | struct netlink_ext_ack *extack) | |
668 | { | |
669 | struct net *net = sock_net(skb->sk); | |
670 | struct nlattr *tb[NETNSA_MAX + 1]; | |
671 | struct sk_buff *msg; | |
672 | struct net *peer; | |
673 | int err, id; | |
674 | ||
675 | err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, | |
676 | rtnl_net_policy, extack); | |
677 | if (err < 0) | |
678 | return err; | |
679 | if (tb[NETNSA_PID]) | |
680 | peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); | |
681 | else if (tb[NETNSA_FD]) | |
682 | peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); | |
683 | else | |
684 | return -EINVAL; | |
685 | ||
686 | if (IS_ERR(peer)) | |
687 | return PTR_ERR(peer); | |
688 | ||
689 | msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); | |
690 | if (!msg) { | |
691 | err = -ENOMEM; | |
692 | goto out; | |
693 | } | |
694 | ||
695 | id = peernet2id(net, peer); | |
696 | err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, | |
697 | RTM_NEWNSID, net, id); | |
698 | if (err < 0) | |
699 | goto err_out; | |
700 | ||
701 | err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); | |
702 | goto out; | |
703 | ||
704 | err_out: | |
705 | nlmsg_free(msg); | |
706 | out: | |
707 | put_net(peer); | |
708 | return err; | |
709 | } | |
710 | ||
711 | struct rtnl_net_dump_cb { | |
712 | struct net *net; | |
713 | struct sk_buff *skb; | |
714 | struct netlink_callback *cb; | |
715 | int idx; | |
716 | int s_idx; | |
717 | }; | |
718 | ||
719 | static int rtnl_net_dumpid_one(int id, void *peer, void *data) | |
720 | { | |
721 | struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; | |
722 | int ret; | |
723 | ||
724 | if (net_cb->idx < net_cb->s_idx) | |
725 | goto cont; | |
726 | ||
727 | ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid, | |
728 | net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI, | |
729 | RTM_NEWNSID, net_cb->net, id); | |
730 | if (ret < 0) | |
731 | return ret; | |
732 | ||
733 | cont: | |
734 | net_cb->idx++; | |
735 | return 0; | |
736 | } | |
737 | ||
738 | static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) | |
739 | { | |
740 | struct net *net = sock_net(skb->sk); | |
741 | struct rtnl_net_dump_cb net_cb = { | |
742 | .net = net, | |
743 | .skb = skb, | |
744 | .cb = cb, | |
745 | .idx = 0, | |
746 | .s_idx = cb->args[0], | |
747 | }; | |
748 | ||
749 | spin_lock_bh(&net->nsid_lock); | |
750 | idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb); | |
751 | spin_unlock_bh(&net->nsid_lock); | |
752 | ||
753 | cb->args[0] = net_cb.idx; | |
754 | return skb->len; | |
755 | } | |
756 | ||
757 | static void rtnl_net_notifyid(struct net *net, int cmd, int id) | |
758 | { | |
759 | struct sk_buff *msg; | |
760 | int err = -ENOMEM; | |
761 | ||
762 | msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); | |
763 | if (!msg) | |
764 | goto out; | |
765 | ||
766 | err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id); | |
767 | if (err < 0) | |
768 | goto err_out; | |
769 | ||
770 | rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); | |
771 | return; | |
772 | ||
773 | err_out: | |
774 | nlmsg_free(msg); | |
775 | out: | |
776 | rtnl_set_sk_err(net, RTNLGRP_NSID, err); | |
777 | } | |
778 | ||
779 | static int __init net_ns_init(void) | |
780 | { | |
781 | struct net_generic *ng; | |
782 | ||
783 | #ifdef CONFIG_NET_NS | |
784 | net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), | |
785 | SMP_CACHE_BYTES, | |
786 | SLAB_PANIC, NULL); | |
787 | ||
788 | /* Create workqueue for cleanup */ | |
789 | netns_wq = create_singlethread_workqueue("netns"); | |
790 | if (!netns_wq) | |
791 | panic("Could not create netns workq"); | |
792 | #endif | |
793 | ||
794 | ng = net_alloc_generic(); | |
795 | if (!ng) | |
796 | panic("Could not allocate generic netns"); | |
797 | ||
798 | rcu_assign_pointer(init_net.gen, ng); | |
799 | ||
800 | mutex_lock(&net_mutex); | |
801 | if (setup_net(&init_net, &init_user_ns)) | |
802 | panic("Could not setup the initial network namespace"); | |
803 | ||
804 | init_net_initialized = true; | |
805 | ||
806 | rtnl_lock(); | |
807 | list_add_tail_rcu(&init_net.list, &net_namespace_list); | |
808 | rtnl_unlock(); | |
809 | ||
810 | mutex_unlock(&net_mutex); | |
811 | ||
812 | register_pernet_subsys(&net_ns_ops); | |
813 | ||
814 | rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); | |
815 | rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, | |
816 | NULL); | |
817 | ||
818 | return 0; | |
819 | } | |
820 | ||
821 | pure_initcall(net_ns_init); | |
822 | ||
823 | #ifdef CONFIG_NET_NS | |
824 | static int __register_pernet_operations(struct list_head *list, | |
825 | struct pernet_operations *ops) | |
826 | { | |
827 | struct net *net; | |
828 | int error; | |
829 | LIST_HEAD(net_exit_list); | |
830 | ||
831 | list_add_tail(&ops->list, list); | |
832 | if (ops->init || (ops->id && ops->size)) { | |
833 | for_each_net(net) { | |
834 | error = ops_init(ops, net); | |
835 | if (error) | |
836 | goto out_undo; | |
837 | list_add_tail(&net->exit_list, &net_exit_list); | |
838 | } | |
839 | } | |
840 | return 0; | |
841 | ||
842 | out_undo: | |
843 | /* If I have an error cleanup all namespaces I initialized */ | |
844 | list_del(&ops->list); | |
845 | ops_exit_list(ops, &net_exit_list); | |
846 | ops_free_list(ops, &net_exit_list); | |
847 | return error; | |
848 | } | |
849 | ||
850 | static void __unregister_pernet_operations(struct pernet_operations *ops) | |
851 | { | |
852 | struct net *net; | |
853 | LIST_HEAD(net_exit_list); | |
854 | ||
855 | list_del(&ops->list); | |
856 | for_each_net(net) | |
857 | list_add_tail(&net->exit_list, &net_exit_list); | |
858 | ops_exit_list(ops, &net_exit_list); | |
859 | ops_free_list(ops, &net_exit_list); | |
860 | } | |
861 | ||
862 | #else | |
863 | ||
864 | static int __register_pernet_operations(struct list_head *list, | |
865 | struct pernet_operations *ops) | |
866 | { | |
867 | if (!init_net_initialized) { | |
868 | list_add_tail(&ops->list, list); | |
869 | return 0; | |
870 | } | |
871 | ||
872 | return ops_init(ops, &init_net); | |
873 | } | |
874 | ||
875 | static void __unregister_pernet_operations(struct pernet_operations *ops) | |
876 | { | |
877 | if (!init_net_initialized) { | |
878 | list_del(&ops->list); | |
879 | } else { | |
880 | LIST_HEAD(net_exit_list); | |
881 | list_add(&init_net.exit_list, &net_exit_list); | |
882 | ops_exit_list(ops, &net_exit_list); | |
883 | ops_free_list(ops, &net_exit_list); | |
884 | } | |
885 | } | |
886 | ||
887 | #endif /* CONFIG_NET_NS */ | |
888 | ||
889 | static DEFINE_IDA(net_generic_ids); | |
890 | ||
891 | static int register_pernet_operations(struct list_head *list, | |
892 | struct pernet_operations *ops) | |
893 | { | |
894 | int error; | |
895 | ||
896 | if (ops->id) { | |
897 | again: | |
898 | error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id); | |
899 | if (error < 0) { | |
900 | if (error == -EAGAIN) { | |
901 | ida_pre_get(&net_generic_ids, GFP_KERNEL); | |
902 | goto again; | |
903 | } | |
904 | return error; | |
905 | } | |
906 | max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); | |
907 | } | |
908 | error = __register_pernet_operations(list, ops); | |
909 | if (error) { | |
910 | rcu_barrier(); | |
911 | if (ops->id) | |
912 | ida_remove(&net_generic_ids, *ops->id); | |
913 | } | |
914 | ||
915 | return error; | |
916 | } | |
917 | ||
918 | static void unregister_pernet_operations(struct pernet_operations *ops) | |
919 | { | |
920 | ||
921 | __unregister_pernet_operations(ops); | |
922 | rcu_barrier(); | |
923 | if (ops->id) | |
924 | ida_remove(&net_generic_ids, *ops->id); | |
925 | } | |
926 | ||
927 | /** | |
928 | * register_pernet_subsys - register a network namespace subsystem | |
929 | * @ops: pernet operations structure for the subsystem | |
930 | * | |
931 | * Register a subsystem which has init and exit functions | |
932 | * that are called when network namespaces are created and | |
933 | * destroyed respectively. | |
934 | * | |
935 | * When registered all network namespace init functions are | |
936 | * called for every existing network namespace. Allowing kernel | |
937 | * modules to have a race free view of the set of network namespaces. | |
938 | * | |
939 | * When a new network namespace is created all of the init | |
940 | * methods are called in the order in which they were registered. | |
941 | * | |
942 | * When a network namespace is destroyed all of the exit methods | |
943 | * are called in the reverse of the order with which they were | |
944 | * registered. | |
945 | */ | |
946 | int register_pernet_subsys(struct pernet_operations *ops) | |
947 | { | |
948 | int error; | |
949 | mutex_lock(&net_mutex); | |
950 | error = register_pernet_operations(first_device, ops); | |
951 | mutex_unlock(&net_mutex); | |
952 | return error; | |
953 | } | |
954 | EXPORT_SYMBOL_GPL(register_pernet_subsys); | |
955 | ||
956 | /** | |
957 | * unregister_pernet_subsys - unregister a network namespace subsystem | |
958 | * @ops: pernet operations structure to manipulate | |
959 | * | |
960 | * Remove the pernet operations structure from the list to be | |
961 | * used when network namespaces are created or destroyed. In | |
962 | * addition run the exit method for all existing network | |
963 | * namespaces. | |
964 | */ | |
965 | void unregister_pernet_subsys(struct pernet_operations *ops) | |
966 | { | |
967 | mutex_lock(&net_mutex); | |
968 | unregister_pernet_operations(ops); | |
969 | mutex_unlock(&net_mutex); | |
970 | } | |
971 | EXPORT_SYMBOL_GPL(unregister_pernet_subsys); | |
972 | ||
973 | /** | |
974 | * register_pernet_device - register a network namespace device | |
975 | * @ops: pernet operations structure for the subsystem | |
976 | * | |
977 | * Register a device which has init and exit functions | |
978 | * that are called when network namespaces are created and | |
979 | * destroyed respectively. | |
980 | * | |
981 | * When registered all network namespace init functions are | |
982 | * called for every existing network namespace. Allowing kernel | |
983 | * modules to have a race free view of the set of network namespaces. | |
984 | * | |
985 | * When a new network namespace is created all of the init | |
986 | * methods are called in the order in which they were registered. | |
987 | * | |
988 | * When a network namespace is destroyed all of the exit methods | |
989 | * are called in the reverse of the order with which they were | |
990 | * registered. | |
991 | */ | |
992 | int register_pernet_device(struct pernet_operations *ops) | |
993 | { | |
994 | int error; | |
995 | mutex_lock(&net_mutex); | |
996 | error = register_pernet_operations(&pernet_list, ops); | |
997 | if (!error && (first_device == &pernet_list)) | |
998 | first_device = &ops->list; | |
999 | mutex_unlock(&net_mutex); | |
1000 | return error; | |
1001 | } | |
1002 | EXPORT_SYMBOL_GPL(register_pernet_device); | |
1003 | ||
1004 | /** | |
1005 | * unregister_pernet_device - unregister a network namespace netdevice | |
1006 | * @ops: pernet operations structure to manipulate | |
1007 | * | |
1008 | * Remove the pernet operations structure from the list to be | |
1009 | * used when network namespaces are created or destroyed. In | |
1010 | * addition run the exit method for all existing network | |
1011 | * namespaces. | |
1012 | */ | |
1013 | void unregister_pernet_device(struct pernet_operations *ops) | |
1014 | { | |
1015 | mutex_lock(&net_mutex); | |
1016 | if (&ops->list == first_device) | |
1017 | first_device = first_device->next; | |
1018 | unregister_pernet_operations(ops); | |
1019 | mutex_unlock(&net_mutex); | |
1020 | } | |
1021 | EXPORT_SYMBOL_GPL(unregister_pernet_device); | |
1022 | ||
1023 | #ifdef CONFIG_NET_NS | |
1024 | static struct ns_common *netns_get(struct task_struct *task) | |
1025 | { | |
1026 | struct net *net = NULL; | |
1027 | struct nsproxy *nsproxy; | |
1028 | ||
1029 | task_lock(task); | |
1030 | nsproxy = task->nsproxy; | |
1031 | if (nsproxy) | |
1032 | net = get_net(nsproxy->net_ns); | |
1033 | task_unlock(task); | |
1034 | ||
1035 | return net ? &net->ns : NULL; | |
1036 | } | |
1037 | ||
1038 | static inline struct net *to_net_ns(struct ns_common *ns) | |
1039 | { | |
1040 | return container_of(ns, struct net, ns); | |
1041 | } | |
1042 | ||
1043 | static void netns_put(struct ns_common *ns) | |
1044 | { | |
1045 | put_net(to_net_ns(ns)); | |
1046 | } | |
1047 | ||
1048 | static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns) | |
1049 | { | |
1050 | struct net *net = to_net_ns(ns); | |
1051 | ||
1052 | if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || | |
1053 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) | |
1054 | return -EPERM; | |
1055 | ||
1056 | put_net(nsproxy->net_ns); | |
1057 | nsproxy->net_ns = get_net(net); | |
1058 | return 0; | |
1059 | } | |
1060 | ||
1061 | static struct user_namespace *netns_owner(struct ns_common *ns) | |
1062 | { | |
1063 | return to_net_ns(ns)->user_ns; | |
1064 | } | |
1065 | ||
1066 | const struct proc_ns_operations netns_operations = { | |
1067 | .name = "net", | |
1068 | .type = CLONE_NEWNET, | |
1069 | .get = netns_get, | |
1070 | .put = netns_put, | |
1071 | .install = netns_install, | |
1072 | .owner = netns_owner, | |
1073 | }; | |
1074 | #endif |