1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
13 #include <linux/fs.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
19 #include <linux/sched/task.h>
20 #include <linux/uidgid.h>
21
22 #include <net/sock.h>
23 #include <net/netlink.h>
24 #include <net/net_namespace.h>
25 #include <net/netns/generic.h>
26
27 /*
28 * Our network namespace constructor/destructor lists
29 */
30
31 static LIST_HEAD(pernet_list);
32 static struct list_head *first_device = &pernet_list;
33
34 LIST_HEAD(net_namespace_list);
35 EXPORT_SYMBOL_GPL(net_namespace_list);
36
37 /* Protects net_namespace_list. Nests iside rtnl_lock() */
38 DECLARE_RWSEM(net_rwsem);
39 EXPORT_SYMBOL_GPL(net_rwsem);
40
41 struct net init_net = {
42 .count = REFCOUNT_INIT(1),
43 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
44 };
45 EXPORT_SYMBOL(init_net);
46
47 static bool init_net_initialized;
48 /*
49 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
50 * init_net_initialized and first_device pointer.
51 * This is internal net namespace object. Please, don't use it
52 * outside.
53 */
54 DECLARE_RWSEM(pernet_ops_rwsem);
55 EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
56
57 #define MIN_PERNET_OPS_ID \
58 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
59
60 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
61
62 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
63
net_alloc_generic(void)64 static struct net_generic *net_alloc_generic(void)
65 {
66 struct net_generic *ng;
67 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
68
69 ng = kzalloc(generic_size, GFP_KERNEL);
70 if (ng)
71 ng->s.len = max_gen_ptrs;
72
73 return ng;
74 }
75
net_assign_generic(struct net * net,unsigned int id,void * data)76 static int net_assign_generic(struct net *net, unsigned int id, void *data)
77 {
78 struct net_generic *ng, *old_ng;
79
80 BUG_ON(id < MIN_PERNET_OPS_ID);
81
82 old_ng = rcu_dereference_protected(net->gen,
83 lockdep_is_held(&pernet_ops_rwsem));
84 if (old_ng->s.len > id) {
85 old_ng->ptr[id] = data;
86 return 0;
87 }
88
89 ng = net_alloc_generic();
90 if (ng == NULL)
91 return -ENOMEM;
92
93 /*
94 * Some synchronisation notes:
95 *
96 * The net_generic explores the net->gen array inside rcu
97 * read section. Besides once set the net->gen->ptr[x]
98 * pointer never changes (see rules in netns/generic.h).
99 *
100 * That said, we simply duplicate this array and schedule
101 * the old copy for kfree after a grace period.
102 */
103
104 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
105 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
106 ng->ptr[id] = data;
107
108 rcu_assign_pointer(net->gen, ng);
109 kfree_rcu(old_ng, s.rcu);
110 return 0;
111 }
112
ops_init(const struct pernet_operations * ops,struct net * net)113 static int ops_init(const struct pernet_operations *ops, struct net *net)
114 {
115 int err = -ENOMEM;
116 void *data = NULL;
117
118 if (ops->id && ops->size) {
119 data = kzalloc(ops->size, GFP_KERNEL);
120 if (!data)
121 goto out;
122
123 err = net_assign_generic(net, *ops->id, data);
124 if (err)
125 goto cleanup;
126 }
127 err = 0;
128 if (ops->init)
129 err = ops->init(net);
130 if (!err)
131 return 0;
132
133 cleanup:
134 kfree(data);
135
136 out:
137 return err;
138 }
139
ops_free(const struct pernet_operations * ops,struct net * net)140 static void ops_free(const struct pernet_operations *ops, struct net *net)
141 {
142 if (ops->id && ops->size) {
143 kfree(net_generic(net, *ops->id));
144 }
145 }
146
ops_exit_list(const struct pernet_operations * ops,struct list_head * net_exit_list)147 static void ops_exit_list(const struct pernet_operations *ops,
148 struct list_head *net_exit_list)
149 {
150 struct net *net;
151 if (ops->exit) {
152 list_for_each_entry(net, net_exit_list, exit_list)
153 ops->exit(net);
154 }
155 if (ops->exit_batch)
156 ops->exit_batch(net_exit_list);
157 }
158
ops_free_list(const struct pernet_operations * ops,struct list_head * net_exit_list)159 static void ops_free_list(const struct pernet_operations *ops,
160 struct list_head *net_exit_list)
161 {
162 struct net *net;
163 if (ops->size && ops->id) {
164 list_for_each_entry(net, net_exit_list, exit_list)
165 ops_free(ops, net);
166 }
167 }
168
169 /* should be called with nsid_lock held */
alloc_netid(struct net * net,struct net * peer,int reqid)170 static int alloc_netid(struct net *net, struct net *peer, int reqid)
171 {
172 int min = 0, max = 0;
173
174 if (reqid >= 0) {
175 min = reqid;
176 max = reqid + 1;
177 }
178
179 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
180 }
181
182 /* This function is used by idr_for_each(). If net is equal to peer, the
183 * function returns the id so that idr_for_each() stops. Because we cannot
184 * returns the id 0 (idr_for_each() will not stop), we return the magic value
185 * NET_ID_ZERO (-1) for it.
186 */
187 #define NET_ID_ZERO -1
net_eq_idr(int id,void * net,void * peer)188 static int net_eq_idr(int id, void *net, void *peer)
189 {
190 if (net_eq(net, peer))
191 return id ? : NET_ID_ZERO;
192 return 0;
193 }
194
195 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
196 * is set to true, thus the caller knows that the new id must be notified via
197 * rtnl.
198 */
__peernet2id_alloc(struct net * net,struct net * peer,bool * alloc)199 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
200 {
201 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
202 bool alloc_it = *alloc;
203
204 *alloc = false;
205
206 /* Magic value for id 0. */
207 if (id == NET_ID_ZERO)
208 return 0;
209 if (id > 0)
210 return id;
211
212 if (alloc_it) {
213 id = alloc_netid(net, peer, -1);
214 *alloc = true;
215 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
216 }
217
218 return NETNSA_NSID_NOT_ASSIGNED;
219 }
220
221 /* should be called with nsid_lock held */
__peernet2id(struct net * net,struct net * peer)222 static int __peernet2id(struct net *net, struct net *peer)
223 {
224 bool no = false;
225
226 return __peernet2id_alloc(net, peer, &no);
227 }
228
229 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
230 /* This function returns the id of a peer netns. If no id is assigned, one will
231 * be allocated and returned.
232 */
peernet2id_alloc(struct net * net,struct net * peer)233 int peernet2id_alloc(struct net *net, struct net *peer)
234 {
235 bool alloc = false, alive = false;
236 int id;
237
238 if (refcount_read(&net->count) == 0)
239 return NETNSA_NSID_NOT_ASSIGNED;
240 spin_lock_bh(&net->nsid_lock);
241 /*
242 * When peer is obtained from RCU lists, we may race with
243 * its cleanup. Check whether it's alive, and this guarantees
244 * we never hash a peer back to net->netns_ids, after it has
245 * just been idr_remove()'d from there in cleanup_net().
246 */
247 if (maybe_get_net(peer))
248 alive = alloc = true;
249 id = __peernet2id_alloc(net, peer, &alloc);
250 spin_unlock_bh(&net->nsid_lock);
251 if (alloc && id >= 0)
252 rtnl_net_notifyid(net, RTM_NEWNSID, id);
253 if (alive)
254 put_net(peer);
255 return id;
256 }
257 EXPORT_SYMBOL_GPL(peernet2id_alloc);
258
259 /* This function returns, if assigned, the id of a peer netns. */
peernet2id(struct net * net,struct net * peer)260 int peernet2id(struct net *net, struct net *peer)
261 {
262 int id;
263
264 spin_lock_bh(&net->nsid_lock);
265 id = __peernet2id(net, peer);
266 spin_unlock_bh(&net->nsid_lock);
267 return id;
268 }
269 EXPORT_SYMBOL(peernet2id);
270
271 /* This function returns true is the peer netns has an id assigned into the
272 * current netns.
273 */
peernet_has_id(struct net * net,struct net * peer)274 bool peernet_has_id(struct net *net, struct net *peer)
275 {
276 return peernet2id(net, peer) >= 0;
277 }
278
get_net_ns_by_id(struct net * net,int id)279 struct net *get_net_ns_by_id(struct net *net, int id)
280 {
281 struct net *peer;
282
283 if (id < 0)
284 return NULL;
285
286 rcu_read_lock();
287 peer = idr_find(&net->netns_ids, id);
288 if (peer)
289 peer = maybe_get_net(peer);
290 rcu_read_unlock();
291
292 return peer;
293 }
294
295 /*
296 * setup_net runs the initializers for the network namespace object.
297 */
setup_net(struct net * net,struct user_namespace * user_ns)298 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
299 {
300 /* Must be called with pernet_ops_rwsem held */
301 const struct pernet_operations *ops, *saved_ops;
302 int error = 0;
303 LIST_HEAD(net_exit_list);
304
305 refcount_set(&net->count, 1);
306 refcount_set(&net->passive, 1);
307 net->dev_base_seq = 1;
308 net->user_ns = user_ns;
309 idr_init(&net->netns_ids);
310 spin_lock_init(&net->nsid_lock);
311 mutex_init(&net->ipv4.ra_mutex);
312
313 list_for_each_entry(ops, &pernet_list, list) {
314 error = ops_init(ops, net);
315 if (error < 0)
316 goto out_undo;
317 }
318 down_write(&net_rwsem);
319 list_add_tail_rcu(&net->list, &net_namespace_list);
320 up_write(&net_rwsem);
321 out:
322 return error;
323
324 out_undo:
325 /* Walk through the list backwards calling the exit functions
326 * for the pernet modules whose init functions did not fail.
327 */
328 list_add(&net->exit_list, &net_exit_list);
329 saved_ops = ops;
330 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
331 ops_exit_list(ops, &net_exit_list);
332
333 ops = saved_ops;
334 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
335 ops_free_list(ops, &net_exit_list);
336
337 rcu_barrier();
338 goto out;
339 }
340
net_defaults_init_net(struct net * net)341 static int __net_init net_defaults_init_net(struct net *net)
342 {
343 net->core.sysctl_somaxconn = SOMAXCONN;
344 return 0;
345 }
346
347 static struct pernet_operations net_defaults_ops = {
348 .init = net_defaults_init_net,
349 };
350
net_defaults_init(void)351 static __init int net_defaults_init(void)
352 {
353 if (register_pernet_subsys(&net_defaults_ops))
354 panic("Cannot initialize net default settings");
355
356 return 0;
357 }
358
359 core_initcall(net_defaults_init);
360
361 #ifdef CONFIG_NET_NS
inc_net_namespaces(struct user_namespace * ns)362 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
363 {
364 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
365 }
366
dec_net_namespaces(struct ucounts * ucounts)367 static void dec_net_namespaces(struct ucounts *ucounts)
368 {
369 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
370 }
371
372 static struct kmem_cache *net_cachep __ro_after_init;
373 static struct workqueue_struct *netns_wq;
374
net_alloc(void)375 static struct net *net_alloc(void)
376 {
377 struct net *net = NULL;
378 struct net_generic *ng;
379
380 ng = net_alloc_generic();
381 if (!ng)
382 goto out;
383
384 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
385 if (!net)
386 goto out_free;
387
388 rcu_assign_pointer(net->gen, ng);
389 out:
390 return net;
391
392 out_free:
393 kfree(ng);
394 goto out;
395 }
396
net_free(struct net * net)397 static void net_free(struct net *net)
398 {
399 kfree(rcu_access_pointer(net->gen));
400 kmem_cache_free(net_cachep, net);
401 }
402
net_drop_ns(void * p)403 void net_drop_ns(void *p)
404 {
405 struct net *ns = p;
406 if (ns && refcount_dec_and_test(&ns->passive))
407 net_free(ns);
408 }
409
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)410 struct net *copy_net_ns(unsigned long flags,
411 struct user_namespace *user_ns, struct net *old_net)
412 {
413 struct ucounts *ucounts;
414 struct net *net;
415 int rv;
416
417 if (!(flags & CLONE_NEWNET))
418 return get_net(old_net);
419
420 ucounts = inc_net_namespaces(user_ns);
421 if (!ucounts)
422 return ERR_PTR(-ENOSPC);
423
424 net = net_alloc();
425 if (!net) {
426 rv = -ENOMEM;
427 goto dec_ucounts;
428 }
429 refcount_set(&net->passive, 1);
430 net->ucounts = ucounts;
431 get_user_ns(user_ns);
432
433 rv = down_read_killable(&pernet_ops_rwsem);
434 if (rv < 0)
435 goto put_userns;
436
437 rv = setup_net(net, user_ns);
438
439 up_read(&pernet_ops_rwsem);
440
441 if (rv < 0) {
442 put_userns:
443 put_user_ns(user_ns);
444 net_drop_ns(net);
445 dec_ucounts:
446 dec_net_namespaces(ucounts);
447 return ERR_PTR(rv);
448 }
449 return net;
450 }
451
452 /**
453 * net_ns_get_ownership - get sysfs ownership data for @net
454 * @net: network namespace in question (can be NULL)
455 * @uid: kernel user ID for sysfs objects
456 * @gid: kernel group ID for sysfs objects
457 *
458 * Returns the uid/gid pair of root in the user namespace associated with the
459 * given network namespace.
460 */
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)461 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
462 {
463 if (net) {
464 kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
465 kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
466
467 if (uid_valid(ns_root_uid))
468 *uid = ns_root_uid;
469
470 if (gid_valid(ns_root_gid))
471 *gid = ns_root_gid;
472 } else {
473 *uid = GLOBAL_ROOT_UID;
474 *gid = GLOBAL_ROOT_GID;
475 }
476 }
477 EXPORT_SYMBOL_GPL(net_ns_get_ownership);
478
unhash_nsid(struct net * net,struct net * last)479 static void unhash_nsid(struct net *net, struct net *last)
480 {
481 struct net *tmp;
482 /* This function is only called from cleanup_net() work,
483 * and this work is the only process, that may delete
484 * a net from net_namespace_list. So, when the below
485 * is executing, the list may only grow. Thus, we do not
486 * use for_each_net_rcu() or net_rwsem.
487 */
488 for_each_net(tmp) {
489 int id;
490
491 spin_lock_bh(&tmp->nsid_lock);
492 id = __peernet2id(tmp, net);
493 if (id >= 0)
494 idr_remove(&tmp->netns_ids, id);
495 spin_unlock_bh(&tmp->nsid_lock);
496 if (id >= 0)
497 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
498 if (tmp == last)
499 break;
500 }
501 spin_lock_bh(&net->nsid_lock);
502 idr_destroy(&net->netns_ids);
503 spin_unlock_bh(&net->nsid_lock);
504 }
505
506 static LLIST_HEAD(cleanup_list);
507
cleanup_net(struct work_struct * work)508 static void cleanup_net(struct work_struct *work)
509 {
510 const struct pernet_operations *ops;
511 struct net *net, *tmp, *last;
512 struct llist_node *net_kill_list;
513 LIST_HEAD(net_exit_list);
514
515 /* Atomically snapshot the list of namespaces to cleanup */
516 net_kill_list = llist_del_all(&cleanup_list);
517
518 down_read(&pernet_ops_rwsem);
519
520 /* Don't let anyone else find us. */
521 down_write(&net_rwsem);
522 llist_for_each_entry(net, net_kill_list, cleanup_list)
523 list_del_rcu(&net->list);
524 /* Cache last net. After we unlock rtnl, no one new net
525 * added to net_namespace_list can assign nsid pointer
526 * to a net from net_kill_list (see peernet2id_alloc()).
527 * So, we skip them in unhash_nsid().
528 *
529 * Note, that unhash_nsid() does not delete nsid links
530 * between net_kill_list's nets, as they've already
531 * deleted from net_namespace_list. But, this would be
532 * useless anyway, as netns_ids are destroyed there.
533 */
534 last = list_last_entry(&net_namespace_list, struct net, list);
535 up_write(&net_rwsem);
536
537 llist_for_each_entry(net, net_kill_list, cleanup_list) {
538 unhash_nsid(net, last);
539 list_add_tail(&net->exit_list, &net_exit_list);
540 }
541
542 /*
543 * Another CPU might be rcu-iterating the list, wait for it.
544 * This needs to be before calling the exit() notifiers, so
545 * the rcu_barrier() below isn't sufficient alone.
546 */
547 synchronize_rcu();
548
549 /* Run all of the network namespace exit methods */
550 list_for_each_entry_reverse(ops, &pernet_list, list)
551 ops_exit_list(ops, &net_exit_list);
552
553 /* Free the net generic variables */
554 list_for_each_entry_reverse(ops, &pernet_list, list)
555 ops_free_list(ops, &net_exit_list);
556
557 up_read(&pernet_ops_rwsem);
558
559 /* Ensure there are no outstanding rcu callbacks using this
560 * network namespace.
561 */
562 rcu_barrier();
563
564 /* Finally it is safe to free my network namespace structure */
565 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
566 list_del_init(&net->exit_list);
567 dec_net_namespaces(net->ucounts);
568 put_user_ns(net->user_ns);
569 net_drop_ns(net);
570 }
571 }
572
573 /**
574 * net_ns_barrier - wait until concurrent net_cleanup_work is done
575 *
576 * cleanup_net runs from work queue and will first remove namespaces
577 * from the global list, then run net exit functions.
578 *
579 * Call this in module exit path to make sure that all netns
580 * ->exit ops have been invoked before the function is removed.
581 */
net_ns_barrier(void)582 void net_ns_barrier(void)
583 {
584 down_write(&pernet_ops_rwsem);
585 up_write(&pernet_ops_rwsem);
586 }
587 EXPORT_SYMBOL(net_ns_barrier);
588
589 static DECLARE_WORK(net_cleanup_work, cleanup_net);
590
__put_net(struct net * net)591 void __put_net(struct net *net)
592 {
593 /* Cleanup the network namespace in process context */
594 if (llist_add(&net->cleanup_list, &cleanup_list))
595 queue_work(netns_wq, &net_cleanup_work);
596 }
597 EXPORT_SYMBOL_GPL(__put_net);
598
get_net_ns_by_fd(int fd)599 struct net *get_net_ns_by_fd(int fd)
600 {
601 struct file *file;
602 struct ns_common *ns;
603 struct net *net;
604
605 file = proc_ns_fget(fd);
606 if (IS_ERR(file))
607 return ERR_CAST(file);
608
609 ns = get_proc_ns(file_inode(file));
610 if (ns->ops == &netns_operations)
611 net = get_net(container_of(ns, struct net, ns));
612 else
613 net = ERR_PTR(-EINVAL);
614
615 fput(file);
616 return net;
617 }
618
619 #else
get_net_ns_by_fd(int fd)620 struct net *get_net_ns_by_fd(int fd)
621 {
622 return ERR_PTR(-EINVAL);
623 }
624 #endif
625 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
626
get_net_ns_by_pid(pid_t pid)627 struct net *get_net_ns_by_pid(pid_t pid)
628 {
629 struct task_struct *tsk;
630 struct net *net;
631
632 /* Lookup the network namespace */
633 net = ERR_PTR(-ESRCH);
634 rcu_read_lock();
635 tsk = find_task_by_vpid(pid);
636 if (tsk) {
637 struct nsproxy *nsproxy;
638 task_lock(tsk);
639 nsproxy = tsk->nsproxy;
640 if (nsproxy)
641 net = get_net(nsproxy->net_ns);
642 task_unlock(tsk);
643 }
644 rcu_read_unlock();
645 return net;
646 }
647 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
648
net_ns_net_init(struct net * net)649 static __net_init int net_ns_net_init(struct net *net)
650 {
651 #ifdef CONFIG_NET_NS
652 net->ns.ops = &netns_operations;
653 #endif
654 return ns_alloc_inum(&net->ns);
655 }
656
net_ns_net_exit(struct net * net)657 static __net_exit void net_ns_net_exit(struct net *net)
658 {
659 ns_free_inum(&net->ns);
660 }
661
662 static struct pernet_operations __net_initdata net_ns_ops = {
663 .init = net_ns_net_init,
664 .exit = net_ns_net_exit,
665 };
666
667 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
668 [NETNSA_NONE] = { .type = NLA_UNSPEC },
669 [NETNSA_NSID] = { .type = NLA_S32 },
670 [NETNSA_PID] = { .type = NLA_U32 },
671 [NETNSA_FD] = { .type = NLA_U32 },
672 };
673
rtnl_net_newid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)674 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
675 struct netlink_ext_ack *extack)
676 {
677 struct net *net = sock_net(skb->sk);
678 struct nlattr *tb[NETNSA_MAX + 1];
679 struct nlattr *nla;
680 struct net *peer;
681 int nsid, err;
682
683 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
684 rtnl_net_policy, extack);
685 if (err < 0)
686 return err;
687 if (!tb[NETNSA_NSID]) {
688 NL_SET_ERR_MSG(extack, "nsid is missing");
689 return -EINVAL;
690 }
691 nsid = nla_get_s32(tb[NETNSA_NSID]);
692
693 if (tb[NETNSA_PID]) {
694 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
695 nla = tb[NETNSA_PID];
696 } else if (tb[NETNSA_FD]) {
697 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
698 nla = tb[NETNSA_FD];
699 } else {
700 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
701 return -EINVAL;
702 }
703 if (IS_ERR(peer)) {
704 NL_SET_BAD_ATTR(extack, nla);
705 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
706 return PTR_ERR(peer);
707 }
708
709 spin_lock_bh(&net->nsid_lock);
710 if (__peernet2id(net, peer) >= 0) {
711 spin_unlock_bh(&net->nsid_lock);
712 err = -EEXIST;
713 NL_SET_BAD_ATTR(extack, nla);
714 NL_SET_ERR_MSG(extack,
715 "Peer netns already has a nsid assigned");
716 goto out;
717 }
718
719 err = alloc_netid(net, peer, nsid);
720 spin_unlock_bh(&net->nsid_lock);
721 if (err >= 0) {
722 rtnl_net_notifyid(net, RTM_NEWNSID, err);
723 err = 0;
724 } else if (err == -ENOSPC && nsid >= 0) {
725 err = -EEXIST;
726 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
727 NL_SET_ERR_MSG(extack, "The specified nsid is already used");
728 }
729 out:
730 put_net(peer);
731 return err;
732 }
733
rtnl_net_get_size(void)734 static int rtnl_net_get_size(void)
735 {
736 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
737 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
738 ;
739 }
740
rtnl_net_fill(struct sk_buff * skb,u32 portid,u32 seq,int flags,int cmd,struct net * net,int nsid)741 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
742 int cmd, struct net *net, int nsid)
743 {
744 struct nlmsghdr *nlh;
745 struct rtgenmsg *rth;
746
747 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
748 if (!nlh)
749 return -EMSGSIZE;
750
751 rth = nlmsg_data(nlh);
752 rth->rtgen_family = AF_UNSPEC;
753
754 if (nla_put_s32(skb, NETNSA_NSID, nsid))
755 goto nla_put_failure;
756
757 nlmsg_end(skb, nlh);
758 return 0;
759
760 nla_put_failure:
761 nlmsg_cancel(skb, nlh);
762 return -EMSGSIZE;
763 }
764
rtnl_net_getid(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)765 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
766 struct netlink_ext_ack *extack)
767 {
768 struct net *net = sock_net(skb->sk);
769 struct nlattr *tb[NETNSA_MAX + 1];
770 struct nlattr *nla;
771 struct sk_buff *msg;
772 struct net *peer;
773 int err, id;
774
775 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
776 rtnl_net_policy, extack);
777 if (err < 0)
778 return err;
779 if (tb[NETNSA_PID]) {
780 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
781 nla = tb[NETNSA_PID];
782 } else if (tb[NETNSA_FD]) {
783 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
784 nla = tb[NETNSA_FD];
785 } else {
786 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
787 return -EINVAL;
788 }
789
790 if (IS_ERR(peer)) {
791 NL_SET_BAD_ATTR(extack, nla);
792 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
793 return PTR_ERR(peer);
794 }
795
796 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
797 if (!msg) {
798 err = -ENOMEM;
799 goto out;
800 }
801
802 id = peernet2id(net, peer);
803 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
804 RTM_NEWNSID, net, id);
805 if (err < 0)
806 goto err_out;
807
808 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
809 goto out;
810
811 err_out:
812 nlmsg_free(msg);
813 out:
814 put_net(peer);
815 return err;
816 }
817
818 struct rtnl_net_dump_cb {
819 struct net *net;
820 struct sk_buff *skb;
821 struct netlink_callback *cb;
822 int idx;
823 int s_idx;
824 };
825
rtnl_net_dumpid_one(int id,void * peer,void * data)826 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
827 {
828 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
829 int ret;
830
831 if (net_cb->idx < net_cb->s_idx)
832 goto cont;
833
834 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
835 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
836 RTM_NEWNSID, net_cb->net, id);
837 if (ret < 0)
838 return ret;
839
840 cont:
841 net_cb->idx++;
842 return 0;
843 }
844
rtnl_net_dumpid(struct sk_buff * skb,struct netlink_callback * cb)845 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
846 {
847 struct net *net = sock_net(skb->sk);
848 struct rtnl_net_dump_cb net_cb = {
849 .net = net,
850 .skb = skb,
851 .cb = cb,
852 .idx = 0,
853 .s_idx = cb->args[0],
854 };
855
856 spin_lock_bh(&net->nsid_lock);
857 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
858 spin_unlock_bh(&net->nsid_lock);
859
860 cb->args[0] = net_cb.idx;
861 return skb->len;
862 }
863
rtnl_net_notifyid(struct net * net,int cmd,int id)864 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
865 {
866 struct sk_buff *msg;
867 int err = -ENOMEM;
868
869 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
870 if (!msg)
871 goto out;
872
873 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
874 if (err < 0)
875 goto err_out;
876
877 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
878 return;
879
880 err_out:
881 nlmsg_free(msg);
882 out:
883 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
884 }
885
net_ns_init(void)886 static int __init net_ns_init(void)
887 {
888 struct net_generic *ng;
889
890 #ifdef CONFIG_NET_NS
891 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
892 SMP_CACHE_BYTES,
893 SLAB_PANIC|SLAB_ACCOUNT, NULL);
894
895 /* Create workqueue for cleanup */
896 netns_wq = create_singlethread_workqueue("netns");
897 if (!netns_wq)
898 panic("Could not create netns workq");
899 #endif
900
901 ng = net_alloc_generic();
902 if (!ng)
903 panic("Could not allocate generic netns");
904
905 rcu_assign_pointer(init_net.gen, ng);
906
907 down_write(&pernet_ops_rwsem);
908 if (setup_net(&init_net, &init_user_ns))
909 panic("Could not setup the initial network namespace");
910
911 init_net_initialized = true;
912 up_write(&pernet_ops_rwsem);
913
914 register_pernet_subsys(&net_ns_ops);
915
916 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
917 RTNL_FLAG_DOIT_UNLOCKED);
918 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
919 RTNL_FLAG_DOIT_UNLOCKED);
920
921 return 0;
922 }
923
924 pure_initcall(net_ns_init);
925
926 #ifdef CONFIG_NET_NS
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)927 static int __register_pernet_operations(struct list_head *list,
928 struct pernet_operations *ops)
929 {
930 struct net *net;
931 int error;
932 LIST_HEAD(net_exit_list);
933
934 list_add_tail(&ops->list, list);
935 if (ops->init || (ops->id && ops->size)) {
936 /* We held write locked pernet_ops_rwsem, and parallel
937 * setup_net() and cleanup_net() are not possible.
938 */
939 for_each_net(net) {
940 error = ops_init(ops, net);
941 if (error)
942 goto out_undo;
943 list_add_tail(&net->exit_list, &net_exit_list);
944 }
945 }
946 return 0;
947
948 out_undo:
949 /* If I have an error cleanup all namespaces I initialized */
950 list_del(&ops->list);
951 ops_exit_list(ops, &net_exit_list);
952 ops_free_list(ops, &net_exit_list);
953 return error;
954 }
955
__unregister_pernet_operations(struct pernet_operations * ops)956 static void __unregister_pernet_operations(struct pernet_operations *ops)
957 {
958 struct net *net;
959 LIST_HEAD(net_exit_list);
960
961 list_del(&ops->list);
962 /* See comment in __register_pernet_operations() */
963 for_each_net(net)
964 list_add_tail(&net->exit_list, &net_exit_list);
965 ops_exit_list(ops, &net_exit_list);
966 ops_free_list(ops, &net_exit_list);
967 }
968
969 #else
970
__register_pernet_operations(struct list_head * list,struct pernet_operations * ops)971 static int __register_pernet_operations(struct list_head *list,
972 struct pernet_operations *ops)
973 {
974 if (!init_net_initialized) {
975 list_add_tail(&ops->list, list);
976 return 0;
977 }
978
979 return ops_init(ops, &init_net);
980 }
981
__unregister_pernet_operations(struct pernet_operations * ops)982 static void __unregister_pernet_operations(struct pernet_operations *ops)
983 {
984 if (!init_net_initialized) {
985 list_del(&ops->list);
986 } else {
987 LIST_HEAD(net_exit_list);
988 list_add(&init_net.exit_list, &net_exit_list);
989 ops_exit_list(ops, &net_exit_list);
990 ops_free_list(ops, &net_exit_list);
991 }
992 }
993
994 #endif /* CONFIG_NET_NS */
995
996 static DEFINE_IDA(net_generic_ids);
997
register_pernet_operations(struct list_head * list,struct pernet_operations * ops)998 static int register_pernet_operations(struct list_head *list,
999 struct pernet_operations *ops)
1000 {
1001 int error;
1002
1003 if (ops->id) {
1004 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1005 GFP_KERNEL);
1006 if (error < 0)
1007 return error;
1008 *ops->id = error;
1009 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
1010 }
1011 error = __register_pernet_operations(list, ops);
1012 if (error) {
1013 rcu_barrier();
1014 if (ops->id)
1015 ida_free(&net_generic_ids, *ops->id);
1016 }
1017
1018 return error;
1019 }
1020
unregister_pernet_operations(struct pernet_operations * ops)1021 static void unregister_pernet_operations(struct pernet_operations *ops)
1022 {
1023 __unregister_pernet_operations(ops);
1024 rcu_barrier();
1025 if (ops->id)
1026 ida_free(&net_generic_ids, *ops->id);
1027 }
1028
1029 /**
1030 * register_pernet_subsys - register a network namespace subsystem
1031 * @ops: pernet operations structure for the subsystem
1032 *
1033 * Register a subsystem which has init and exit functions
1034 * that are called when network namespaces are created and
1035 * destroyed respectively.
1036 *
1037 * When registered all network namespace init functions are
1038 * called for every existing network namespace. Allowing kernel
1039 * modules to have a race free view of the set of network namespaces.
1040 *
1041 * When a new network namespace is created all of the init
1042 * methods are called in the order in which they were registered.
1043 *
1044 * When a network namespace is destroyed all of the exit methods
1045 * are called in the reverse of the order with which they were
1046 * registered.
1047 */
register_pernet_subsys(struct pernet_operations * ops)1048 int register_pernet_subsys(struct pernet_operations *ops)
1049 {
1050 int error;
1051 down_write(&pernet_ops_rwsem);
1052 error = register_pernet_operations(first_device, ops);
1053 up_write(&pernet_ops_rwsem);
1054 return error;
1055 }
1056 EXPORT_SYMBOL_GPL(register_pernet_subsys);
1057
1058 /**
1059 * unregister_pernet_subsys - unregister a network namespace subsystem
1060 * @ops: pernet operations structure to manipulate
1061 *
1062 * Remove the pernet operations structure from the list to be
1063 * used when network namespaces are created or destroyed. In
1064 * addition run the exit method for all existing network
1065 * namespaces.
1066 */
unregister_pernet_subsys(struct pernet_operations * ops)1067 void unregister_pernet_subsys(struct pernet_operations *ops)
1068 {
1069 down_write(&pernet_ops_rwsem);
1070 unregister_pernet_operations(ops);
1071 up_write(&pernet_ops_rwsem);
1072 }
1073 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1074
1075 /**
1076 * register_pernet_device - register a network namespace device
1077 * @ops: pernet operations structure for the subsystem
1078 *
1079 * Register a device which has init and exit functions
1080 * that are called when network namespaces are created and
1081 * destroyed respectively.
1082 *
1083 * When registered all network namespace init functions are
1084 * called for every existing network namespace. Allowing kernel
1085 * modules to have a race free view of the set of network namespaces.
1086 *
1087 * When a new network namespace is created all of the init
1088 * methods are called in the order in which they were registered.
1089 *
1090 * When a network namespace is destroyed all of the exit methods
1091 * are called in the reverse of the order with which they were
1092 * registered.
1093 */
register_pernet_device(struct pernet_operations * ops)1094 int register_pernet_device(struct pernet_operations *ops)
1095 {
1096 int error;
1097 down_write(&pernet_ops_rwsem);
1098 error = register_pernet_operations(&pernet_list, ops);
1099 if (!error && (first_device == &pernet_list))
1100 first_device = &ops->list;
1101 up_write(&pernet_ops_rwsem);
1102 return error;
1103 }
1104 EXPORT_SYMBOL_GPL(register_pernet_device);
1105
1106 /**
1107 * unregister_pernet_device - unregister a network namespace netdevice
1108 * @ops: pernet operations structure to manipulate
1109 *
1110 * Remove the pernet operations structure from the list to be
1111 * used when network namespaces are created or destroyed. In
1112 * addition run the exit method for all existing network
1113 * namespaces.
1114 */
unregister_pernet_device(struct pernet_operations * ops)1115 void unregister_pernet_device(struct pernet_operations *ops)
1116 {
1117 down_write(&pernet_ops_rwsem);
1118 if (&ops->list == first_device)
1119 first_device = first_device->next;
1120 unregister_pernet_operations(ops);
1121 up_write(&pernet_ops_rwsem);
1122 }
1123 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1124
1125 #ifdef CONFIG_NET_NS
netns_get(struct task_struct * task)1126 static struct ns_common *netns_get(struct task_struct *task)
1127 {
1128 struct net *net = NULL;
1129 struct nsproxy *nsproxy;
1130
1131 task_lock(task);
1132 nsproxy = task->nsproxy;
1133 if (nsproxy)
1134 net = get_net(nsproxy->net_ns);
1135 task_unlock(task);
1136
1137 return net ? &net->ns : NULL;
1138 }
1139
to_net_ns(struct ns_common * ns)1140 static inline struct net *to_net_ns(struct ns_common *ns)
1141 {
1142 return container_of(ns, struct net, ns);
1143 }
1144
netns_put(struct ns_common * ns)1145 static void netns_put(struct ns_common *ns)
1146 {
1147 put_net(to_net_ns(ns));
1148 }
1149
netns_install(struct nsproxy * nsproxy,struct ns_common * ns)1150 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1151 {
1152 struct net *net = to_net_ns(ns);
1153
1154 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1155 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1156 return -EPERM;
1157
1158 put_net(nsproxy->net_ns);
1159 nsproxy->net_ns = get_net(net);
1160 return 0;
1161 }
1162
netns_owner(struct ns_common * ns)1163 static struct user_namespace *netns_owner(struct ns_common *ns)
1164 {
1165 return to_net_ns(ns)->user_ns;
1166 }
1167
1168 const struct proc_ns_operations netns_operations = {
1169 .name = "net",
1170 .type = CLONE_NEWNET,
1171 .get = netns_get,
1172 .put = netns_put,
1173 .install = netns_install,
1174 .owner = netns_owner,
1175 };
1176 #endif
1177