Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/workqueue.h>
5#include <linux/rtnetlink.h>
6#include <linux/cache.h>
7#include <linux/slab.h>
8#include <linux/list.h>
9#include <linux/delay.h>
10#include <linux/sched.h>
11#include <linux/idr.h>
12#include <linux/rculist.h>
13#include <linux/nsproxy.h>
14#include <linux/fs.h>
15#include <linux/proc_ns.h>
16#include <linux/file.h>
17#include <linux/export.h>
18#include <linux/user_namespace.h>
19#include <linux/net_namespace.h>
20#include <linux/sched/task.h>
21#include <linux/uidgid.h>
22#include <linux/proc_fs.h>
23#include <linux/nstree.h>
24
25#include <net/aligned_data.h>
26#include <net/sock.h>
27#include <net/netlink.h>
28#include <net/net_namespace.h>
29#include <net/netns/generic.h>
30
31/*
32 * Our network namespace constructor/destructor lists
33 */
34
35static LIST_HEAD(pernet_list);
36static struct list_head *first_device = &pernet_list;
37
38LIST_HEAD(net_namespace_list);
39EXPORT_SYMBOL_GPL(net_namespace_list);
40
41/* Protects net_namespace_list. Nests iside rtnl_lock() */
42DECLARE_RWSEM(net_rwsem);
43EXPORT_SYMBOL_GPL(net_rwsem);
44
45#ifdef CONFIG_KEYS
46static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) };
47#endif
48
49struct net init_net;
50EXPORT_SYMBOL(init_net);
51
52static bool init_net_initialized;
53/*
54 * pernet_ops_rwsem: protects: pernet_list, net_generic_ids,
55 * init_net_initialized and first_device pointer.
56 * This is internal net namespace object. Please, don't use it
57 * outside.
58 */
59DECLARE_RWSEM(pernet_ops_rwsem);
60
61#define MIN_PERNET_OPS_ID \
62 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
63
64#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
65
66static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
67
68static struct net_generic *net_alloc_generic(void)
69{
70 unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs);
71 unsigned int generic_size;
72 struct net_generic *ng;
73
74 generic_size = offsetof(struct net_generic, ptr[gen_ptrs]);
75
76 ng = kzalloc(generic_size, GFP_KERNEL);
77 if (ng)
78 ng->s.len = gen_ptrs;
79
80 return ng;
81}
82
83static int net_assign_generic(struct net *net, unsigned int id, void *data)
84{
85 struct net_generic *ng, *old_ng;
86
87 BUG_ON(id < MIN_PERNET_OPS_ID);
88
89 old_ng = rcu_dereference_protected(net->gen,
90 lockdep_is_held(&pernet_ops_rwsem));
91 if (old_ng->s.len > id) {
92 old_ng->ptr[id] = data;
93 return 0;
94 }
95
96 ng = net_alloc_generic();
97 if (!ng)
98 return -ENOMEM;
99
100 /*
101 * Some synchronisation notes:
102 *
103 * The net_generic explores the net->gen array inside rcu
104 * read section. Besides once set the net->gen->ptr[x]
105 * pointer never changes (see rules in netns/generic.h).
106 *
107 * That said, we simply duplicate this array and schedule
108 * the old copy for kfree after a grace period.
109 */
110
111 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
112 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
113 ng->ptr[id] = data;
114
115 rcu_assign_pointer(net->gen, ng);
116 kfree_rcu(old_ng, s.rcu);
117 return 0;
118}
119
120static int ops_init(const struct pernet_operations *ops, struct net *net)
121{
122 struct net_generic *ng;
123 int err = -ENOMEM;
124 void *data = NULL;
125
126 if (ops->id) {
127 data = kzalloc(ops->size, GFP_KERNEL);
128 if (!data)
129 goto out;
130
131 err = net_assign_generic(net, *ops->id, data);
132 if (err)
133 goto cleanup;
134 }
135 err = 0;
136 if (ops->init)
137 err = ops->init(net);
138 if (!err)
139 return 0;
140
141 if (ops->id) {
142 ng = rcu_dereference_protected(net->gen,
143 lockdep_is_held(&pernet_ops_rwsem));
144 ng->ptr[*ops->id] = NULL;
145 }
146
147cleanup:
148 kfree(data);
149
150out:
151 return err;
152}
153
154static void ops_pre_exit_list(const struct pernet_operations *ops,
155 struct list_head *net_exit_list)
156{
157 struct net *net;
158
159 if (ops->pre_exit) {
160 list_for_each_entry(net, net_exit_list, exit_list)
161 ops->pre_exit(net);
162 }
163}
164
165static void ops_exit_rtnl_list(const struct list_head *ops_list,
166 const struct pernet_operations *ops,
167 struct list_head *net_exit_list)
168{
169 const struct pernet_operations *saved_ops = ops;
170 LIST_HEAD(dev_kill_list);
171 struct net *net;
172
173 rtnl_lock();
174
175 list_for_each_entry(net, net_exit_list, exit_list) {
176 __rtnl_net_lock(net);
177
178 ops = saved_ops;
179 list_for_each_entry_continue_reverse(ops, ops_list, list) {
180 if (ops->exit_rtnl)
181 ops->exit_rtnl(net, &dev_kill_list);
182 }
183
184 __rtnl_net_unlock(net);
185 }
186
187 unregister_netdevice_many(&dev_kill_list);
188
189 rtnl_unlock();
190}
191
192static void ops_exit_list(const struct pernet_operations *ops,
193 struct list_head *net_exit_list)
194{
195 if (ops->exit) {
196 struct net *net;
197
198 list_for_each_entry(net, net_exit_list, exit_list) {
199 ops->exit(net);
200 cond_resched();
201 }
202 }
203
204 if (ops->exit_batch)
205 ops->exit_batch(net_exit_list);
206}
207
208static void ops_free_list(const struct pernet_operations *ops,
209 struct list_head *net_exit_list)
210{
211 struct net *net;
212
213 if (ops->id) {
214 list_for_each_entry(net, net_exit_list, exit_list)
215 kfree(net_generic(net, *ops->id));
216 }
217}
218
219static void ops_undo_list(const struct list_head *ops_list,
220 const struct pernet_operations *ops,
221 struct list_head *net_exit_list,
222 bool expedite_rcu)
223{
224 const struct pernet_operations *saved_ops;
225 bool hold_rtnl = false;
226
227 if (!ops)
228 ops = list_entry(ops_list, typeof(*ops), list);
229
230 saved_ops = ops;
231
232 list_for_each_entry_continue_reverse(ops, ops_list, list) {
233 hold_rtnl |= !!ops->exit_rtnl;
234 ops_pre_exit_list(ops, net_exit_list);
235 }
236
237 /* Another CPU might be rcu-iterating the list, wait for it.
238 * This needs to be before calling the exit() notifiers, so the
239 * rcu_barrier() after ops_undo_list() isn't sufficient alone.
240 * Also the pre_exit() and exit() methods need this barrier.
241 */
242 if (expedite_rcu)
243 synchronize_rcu_expedited();
244 else
245 synchronize_rcu();
246
247 if (hold_rtnl)
248 ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list);
249
250 ops = saved_ops;
251 list_for_each_entry_continue_reverse(ops, ops_list, list)
252 ops_exit_list(ops, net_exit_list);
253
254 ops = saved_ops;
255 list_for_each_entry_continue_reverse(ops, ops_list, list)
256 ops_free_list(ops, net_exit_list);
257}
258
259static void ops_undo_single(struct pernet_operations *ops,
260 struct list_head *net_exit_list)
261{
262 LIST_HEAD(ops_list);
263
264 list_add(&ops->list, &ops_list);
265 ops_undo_list(&ops_list, NULL, net_exit_list, false);
266 list_del(&ops->list);
267}
268
269/* should be called with nsid_lock held */
270static int alloc_netid(struct net *net, struct net *peer, int reqid)
271{
272 int min = 0, max = 0;
273
274 if (reqid >= 0) {
275 min = reqid;
276 max = reqid + 1;
277 }
278
279 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
280}
281
282/* This function is used by idr_for_each(). If net is equal to peer, the
283 * function returns the id so that idr_for_each() stops. Because we cannot
284 * returns the id 0 (idr_for_each() will not stop), we return the magic value
285 * NET_ID_ZERO (-1) for it.
286 */
287#define NET_ID_ZERO -1
288static int net_eq_idr(int id, void *net, void *peer)
289{
290 if (net_eq(net, peer))
291 return id ? : NET_ID_ZERO;
292 return 0;
293}
294
295/* Must be called from RCU-critical section or with nsid_lock held */
296static int __peernet2id(const struct net *net, struct net *peer)
297{
298 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
299
300 /* Magic value for id 0. */
301 if (id == NET_ID_ZERO)
302 return 0;
303 if (id > 0)
304 return id;
305
306 return NETNSA_NSID_NOT_ASSIGNED;
307}
308
309static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
310 struct nlmsghdr *nlh, gfp_t gfp);
311/* This function returns the id of a peer netns. If no id is assigned, one will
312 * be allocated and returned.
313 */
314int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
315{
316 int id;
317
318 if (!check_net(net))
319 return NETNSA_NSID_NOT_ASSIGNED;
320
321 spin_lock(&net->nsid_lock);
322 id = __peernet2id(net, peer);
323 if (id >= 0) {
324 spin_unlock(&net->nsid_lock);
325 return id;
326 }
327
328 /* When peer is obtained from RCU lists, we may race with
329 * its cleanup. Check whether it's alive, and this guarantees
330 * we never hash a peer back to net->netns_ids, after it has
331 * just been idr_remove()'d from there in cleanup_net().
332 */
333 if (!maybe_get_net(peer)) {
334 spin_unlock(&net->nsid_lock);
335 return NETNSA_NSID_NOT_ASSIGNED;
336 }
337
338 id = alloc_netid(net, peer, -1);
339 spin_unlock(&net->nsid_lock);
340
341 put_net(peer);
342 if (id < 0)
343 return NETNSA_NSID_NOT_ASSIGNED;
344
345 rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
346
347 return id;
348}
349EXPORT_SYMBOL_GPL(peernet2id_alloc);
350
351/* This function returns, if assigned, the id of a peer netns. */
352int peernet2id(const struct net *net, struct net *peer)
353{
354 int id;
355
356 rcu_read_lock();
357 id = __peernet2id(net, peer);
358 rcu_read_unlock();
359
360 return id;
361}
362EXPORT_SYMBOL(peernet2id);
363
364/* This function returns true is the peer netns has an id assigned into the
365 * current netns.
366 */
367bool peernet_has_id(const struct net *net, struct net *peer)
368{
369 return peernet2id(net, peer) >= 0;
370}
371
372struct net *get_net_ns_by_id(const struct net *net, int id)
373{
374 struct net *peer;
375
376 if (id < 0)
377 return NULL;
378
379 rcu_read_lock();
380 peer = idr_find(&net->netns_ids, id);
381 if (peer)
382 peer = maybe_get_net(peer);
383 rcu_read_unlock();
384
385 return peer;
386}
387EXPORT_SYMBOL_GPL(get_net_ns_by_id);
388
389static __net_init void preinit_net_sysctl(struct net *net)
390{
391 net->core.sysctl_somaxconn = SOMAXCONN;
392 /* Limits per socket sk_omem_alloc usage.
393 * TCP zerocopy regular usage needs 128 KB.
394 */
395 net->core.sysctl_optmem_max = 128 * 1024;
396 net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED;
397 net->core.sysctl_tstamp_allow_data = 1;
398 net->core.sysctl_txq_reselection = msecs_to_jiffies(1000);
399}
400
401/* init code that must occur even if setup_net() is not called. */
402static __net_init int preinit_net(struct net *net, struct user_namespace *user_ns)
403{
404 int ret;
405
406 ret = ns_common_init(net);
407 if (ret)
408 return ret;
409
410 refcount_set(&net->passive, 1);
411 ref_tracker_dir_init(&net->refcnt_tracker, 128, "net_refcnt");
412 ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net_notrefcnt");
413
414 get_random_bytes(&net->hash_mix, sizeof(u32));
415 net->dev_base_seq = 1;
416 net->user_ns = user_ns;
417
418 idr_init(&net->netns_ids);
419 spin_lock_init(&net->nsid_lock);
420 mutex_init(&net->ipv4.ra_mutex);
421
422#ifdef CONFIG_DEBUG_NET_SMALL_RTNL
423 mutex_init(&net->rtnl_mutex);
424 lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL);
425#endif
426
427 INIT_LIST_HEAD(&net->ptype_all);
428 INIT_LIST_HEAD(&net->ptype_specific);
429 preinit_net_sysctl(net);
430 return 0;
431}
432
433/*
434 * setup_net runs the initializers for the network namespace object.
435 */
436static __net_init int setup_net(struct net *net)
437{
438 /* Must be called with pernet_ops_rwsem held */
439 const struct pernet_operations *ops;
440 LIST_HEAD(net_exit_list);
441 int error = 0;
442
443 net->net_cookie = ns_tree_gen_id(net);
444
445 list_for_each_entry(ops, &pernet_list, list) {
446 error = ops_init(ops, net);
447 if (error < 0)
448 goto out_undo;
449 }
450 down_write(&net_rwsem);
451 list_add_tail_rcu(&net->list, &net_namespace_list);
452 up_write(&net_rwsem);
453 ns_tree_add_raw(net);
454out:
455 return error;
456
457out_undo:
458 /* Walk through the list backwards calling the exit functions
459 * for the pernet modules whose init functions did not fail.
460 */
461 list_add(&net->exit_list, &net_exit_list);
462 ops_undo_list(&pernet_list, ops, &net_exit_list, false);
463 rcu_barrier();
464 goto out;
465}
466
467#ifdef CONFIG_NET_NS
468static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
469{
470 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
471}
472
473static void dec_net_namespaces(struct ucounts *ucounts)
474{
475 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
476}
477
478static struct kmem_cache *net_cachep __ro_after_init;
479static struct workqueue_struct *netns_wq;
480
481static struct net *net_alloc(void)
482{
483 struct net *net = NULL;
484 struct net_generic *ng;
485
486 ng = net_alloc_generic();
487 if (!ng)
488 goto out;
489
490 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
491 if (!net)
492 goto out_free;
493
494#ifdef CONFIG_KEYS
495 net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL);
496 if (!net->key_domain)
497 goto out_free_2;
498 refcount_set(&net->key_domain->usage, 1);
499#endif
500
501 rcu_assign_pointer(net->gen, ng);
502out:
503 return net;
504
505#ifdef CONFIG_KEYS
506out_free_2:
507 kmem_cache_free(net_cachep, net);
508 net = NULL;
509#endif
510out_free:
511 kfree(ng);
512 goto out;
513}
514
515static LLIST_HEAD(defer_free_list);
516
517static void net_complete_free(void)
518{
519 struct llist_node *kill_list;
520 struct net *net, *next;
521
522 /* Get the list of namespaces to free from last round. */
523 kill_list = llist_del_all(&defer_free_list);
524
525 llist_for_each_entry_safe(net, next, kill_list, defer_free_list)
526 kmem_cache_free(net_cachep, net);
527
528}
529
530void net_passive_dec(struct net *net)
531{
532 if (refcount_dec_and_test(&net->passive)) {
533 kfree(rcu_access_pointer(net->gen));
534
535 /* There should not be any trackers left there. */
536 ref_tracker_dir_exit(&net->notrefcnt_tracker);
537
538 /* Wait for an extra rcu_barrier() before final free. */
539 llist_add(&net->defer_free_list, &defer_free_list);
540 }
541}
542
543void net_drop_ns(void *p)
544{
545 struct net *net = (struct net *)p;
546
547 if (net)
548 net_passive_dec(net);
549}
550
551struct net *copy_net_ns(u64 flags,
552 struct user_namespace *user_ns, struct net *old_net)
553{
554 struct ucounts *ucounts;
555 struct net *net;
556 int rv;
557
558 if (!(flags & CLONE_NEWNET))
559 return get_net(old_net);
560
561 ucounts = inc_net_namespaces(user_ns);
562 if (!ucounts)
563 return ERR_PTR(-ENOSPC);
564
565 net = net_alloc();
566 if (!net) {
567 rv = -ENOMEM;
568 goto dec_ucounts;
569 }
570
571 rv = preinit_net(net, user_ns);
572 if (rv < 0)
573 goto dec_ucounts;
574 net->ucounts = ucounts;
575 get_user_ns(user_ns);
576
577 rv = down_read_killable(&pernet_ops_rwsem);
578 if (rv < 0)
579 goto put_userns;
580
581 rv = setup_net(net);
582
583 up_read(&pernet_ops_rwsem);
584
585 if (rv < 0) {
586put_userns:
587 ns_common_free(net);
588#ifdef CONFIG_KEYS
589 key_remove_domain(net->key_domain);
590#endif
591 put_user_ns(user_ns);
592 net_passive_dec(net);
593dec_ucounts:
594 dec_net_namespaces(ucounts);
595 return ERR_PTR(rv);
596 }
597 return net;
598}
599
600/**
601 * net_ns_get_ownership - get sysfs ownership data for @net
602 * @net: network namespace in question (can be NULL)
603 * @uid: kernel user ID for sysfs objects
604 * @gid: kernel group ID for sysfs objects
605 *
606 * Returns the uid/gid pair of root in the user namespace associated with the
607 * given network namespace.
608 */
609void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid)
610{
611 if (net) {
612 kuid_t ns_root_uid = make_kuid(net->user_ns, 0);
613 kgid_t ns_root_gid = make_kgid(net->user_ns, 0);
614
615 if (uid_valid(ns_root_uid))
616 *uid = ns_root_uid;
617
618 if (gid_valid(ns_root_gid))
619 *gid = ns_root_gid;
620 } else {
621 *uid = GLOBAL_ROOT_UID;
622 *gid = GLOBAL_ROOT_GID;
623 }
624}
625EXPORT_SYMBOL_GPL(net_ns_get_ownership);
626
627static void unhash_nsid(struct net *net, struct net *last)
628{
629 struct net *tmp;
630 /* This function is only called from cleanup_net() work,
631 * and this work is the only process, that may delete
632 * a net from net_namespace_list. So, when the below
633 * is executing, the list may only grow. Thus, we do not
634 * use for_each_net_rcu() or net_rwsem.
635 */
636 for_each_net(tmp) {
637 int id;
638
639 spin_lock(&tmp->nsid_lock);
640 id = __peernet2id(tmp, net);
641 if (id >= 0)
642 idr_remove(&tmp->netns_ids, id);
643 spin_unlock(&tmp->nsid_lock);
644 if (id >= 0)
645 rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
646 GFP_KERNEL);
647 if (tmp == last)
648 break;
649 }
650 spin_lock(&net->nsid_lock);
651 idr_destroy(&net->netns_ids);
652 spin_unlock(&net->nsid_lock);
653}
654
655static LLIST_HEAD(cleanup_list);
656
657struct task_struct *cleanup_net_task;
658
659static void cleanup_net(struct work_struct *work)
660{
661 struct llist_node *net_kill_list;
662 struct net *net, *tmp, *last;
663 LIST_HEAD(net_exit_list);
664
665 WRITE_ONCE(cleanup_net_task, current);
666
667 /* Atomically snapshot the list of namespaces to cleanup */
668 net_kill_list = llist_del_all(&cleanup_list);
669
670 down_read(&pernet_ops_rwsem);
671
672 /* Don't let anyone else find us. */
673 down_write(&net_rwsem);
674 llist_for_each_entry(net, net_kill_list, cleanup_list) {
675 ns_tree_remove(net);
676 list_del_rcu(&net->list);
677 }
678 /* Cache last net. After we unlock rtnl, no one new net
679 * added to net_namespace_list can assign nsid pointer
680 * to a net from net_kill_list (see peernet2id_alloc()).
681 * So, we skip them in unhash_nsid().
682 *
683 * Note, that unhash_nsid() does not delete nsid links
684 * between net_kill_list's nets, as they've already
685 * deleted from net_namespace_list. But, this would be
686 * useless anyway, as netns_ids are destroyed there.
687 */
688 last = list_last_entry(&net_namespace_list, struct net, list);
689 up_write(&net_rwsem);
690
691 llist_for_each_entry(net, net_kill_list, cleanup_list) {
692 unhash_nsid(net, last);
693 list_add_tail(&net->exit_list, &net_exit_list);
694 }
695
696 ops_undo_list(&pernet_list, NULL, &net_exit_list, true);
697
698 up_read(&pernet_ops_rwsem);
699
700 /* Ensure there are no outstanding rcu callbacks using this
701 * network namespace.
702 */
703 rcu_barrier();
704
705 net_complete_free();
706
707 /* Finally it is safe to free my network namespace structure */
708 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
709 list_del_init(&net->exit_list);
710 ns_common_free(net);
711 dec_net_namespaces(net->ucounts);
712#ifdef CONFIG_KEYS
713 key_remove_domain(net->key_domain);
714#endif
715 put_user_ns(net->user_ns);
716 net_passive_dec(net);
717 }
718 WRITE_ONCE(cleanup_net_task, NULL);
719}
720
721/**
722 * net_ns_barrier - wait until concurrent net_cleanup_work is done
723 *
724 * cleanup_net runs from work queue and will first remove namespaces
725 * from the global list, then run net exit functions.
726 *
727 * Call this in module exit path to make sure that all netns
728 * ->exit ops have been invoked before the function is removed.
729 */
730void net_ns_barrier(void)
731{
732 down_write(&pernet_ops_rwsem);
733 up_write(&pernet_ops_rwsem);
734}
735EXPORT_SYMBOL(net_ns_barrier);
736
737static DECLARE_WORK(net_cleanup_work, cleanup_net);
738
739void __put_net(struct net *net)
740{
741 ref_tracker_dir_exit(&net->refcnt_tracker);
742 /* Cleanup the network namespace in process context */
743 if (llist_add(&net->cleanup_list, &cleanup_list))
744 queue_work(netns_wq, &net_cleanup_work);
745}
746EXPORT_SYMBOL_GPL(__put_net);
747
748/**
749 * get_net_ns - increment the refcount of the network namespace
750 * @ns: common namespace (net)
751 *
752 * Returns the net's common namespace or ERR_PTR() if ref is zero.
753 */
754struct ns_common *get_net_ns(struct ns_common *ns)
755{
756 struct net *net;
757
758 net = maybe_get_net(container_of(ns, struct net, ns));
759 if (net)
760 return &net->ns;
761 return ERR_PTR(-EINVAL);
762}
763EXPORT_SYMBOL_GPL(get_net_ns);
764
765struct net *get_net_ns_by_fd(int fd)
766{
767 CLASS(fd, f)(fd);
768
769 if (fd_empty(f))
770 return ERR_PTR(-EBADF);
771
772 if (proc_ns_file(fd_file(f))) {
773 struct ns_common *ns = get_proc_ns(file_inode(fd_file(f)));
774 if (ns->ops == &netns_operations)
775 return get_net(container_of(ns, struct net, ns));
776 }
777
778 return ERR_PTR(-EINVAL);
779}
780EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
781#endif
782
783struct net *get_net_ns_by_pid(pid_t pid)
784{
785 struct task_struct *tsk;
786 struct net *net;
787
788 /* Lookup the network namespace */
789 net = ERR_PTR(-ESRCH);
790 rcu_read_lock();
791 tsk = find_task_by_vpid(pid);
792 if (tsk) {
793 struct nsproxy *nsproxy;
794 task_lock(tsk);
795 nsproxy = tsk->nsproxy;
796 if (nsproxy)
797 net = get_net(nsproxy->net_ns);
798 task_unlock(tsk);
799 }
800 rcu_read_unlock();
801 return net;
802}
803EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
804
805#ifdef CONFIG_NET_NS_REFCNT_TRACKER
806static void net_ns_net_debugfs(struct net *net)
807{
808 ref_tracker_dir_symlink(&net->refcnt_tracker, "netns-%llx-%u-refcnt",
809 net->net_cookie, net->ns.inum);
810 ref_tracker_dir_symlink(&net->notrefcnt_tracker, "netns-%llx-%u-notrefcnt",
811 net->net_cookie, net->ns.inum);
812}
813
814static int __init init_net_debugfs(void)
815{
816 ref_tracker_dir_debugfs(&init_net.refcnt_tracker);
817 ref_tracker_dir_debugfs(&init_net.notrefcnt_tracker);
818 net_ns_net_debugfs(&init_net);
819 return 0;
820}
821late_initcall(init_net_debugfs);
822#else
823static void net_ns_net_debugfs(struct net *net)
824{
825}
826#endif
827
828static __net_init int net_ns_net_init(struct net *net)
829{
830 net_ns_net_debugfs(net);
831 return 0;
832}
833
834static struct pernet_operations __net_initdata net_ns_ops = {
835 .init = net_ns_net_init,
836};
837
838static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
839 [NETNSA_NONE] = { .type = NLA_UNSPEC },
840 [NETNSA_NSID] = { .type = NLA_S32 },
841 [NETNSA_PID] = { .type = NLA_U32 },
842 [NETNSA_FD] = { .type = NLA_U32 },
843 [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
844};
845
846static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
847 struct netlink_ext_ack *extack)
848{
849 struct net *net = sock_net(skb->sk);
850 struct nlattr *tb[NETNSA_MAX + 1];
851 struct nlattr *nla;
852 struct net *peer;
853 int nsid, err;
854
855 err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb,
856 NETNSA_MAX, rtnl_net_policy, extack);
857 if (err < 0)
858 return err;
859 if (!tb[NETNSA_NSID]) {
860 NL_SET_ERR_MSG(extack, "nsid is missing");
861 return -EINVAL;
862 }
863 nsid = nla_get_s32(tb[NETNSA_NSID]);
864
865 if (tb[NETNSA_PID]) {
866 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
867 nla = tb[NETNSA_PID];
868 } else if (tb[NETNSA_FD]) {
869 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
870 nla = tb[NETNSA_FD];
871 } else {
872 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
873 return -EINVAL;
874 }
875 if (IS_ERR(peer)) {
876 NL_SET_BAD_ATTR(extack, nla);
877 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
878 return PTR_ERR(peer);
879 }
880
881 spin_lock(&net->nsid_lock);
882 if (__peernet2id(net, peer) >= 0) {
883 spin_unlock(&net->nsid_lock);
884 err = -EEXIST;
885 NL_SET_BAD_ATTR(extack, nla);
886 NL_SET_ERR_MSG(extack,
887 "Peer netns already has a nsid assigned");
888 goto out;
889 }
890
891 err = alloc_netid(net, peer, nsid);
892 spin_unlock(&net->nsid_lock);
893 if (err >= 0) {
894 rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
895 nlh, GFP_KERNEL);
896 err = 0;
897 } else if (err == -ENOSPC && nsid >= 0) {
898 err = -EEXIST;
899 NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
900 NL_SET_ERR_MSG(extack, "The specified nsid is already used");
901 }
902out:
903 put_net(peer);
904 return err;
905}
906
907static int rtnl_net_get_size(void)
908{
909 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
910 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
911 + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
912 ;
913}
914
915struct net_fill_args {
916 u32 portid;
917 u32 seq;
918 int flags;
919 int cmd;
920 int nsid;
921 bool add_ref;
922 int ref_nsid;
923};
924
925static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
926{
927 struct nlmsghdr *nlh;
928 struct rtgenmsg *rth;
929
930 nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
931 args->flags);
932 if (!nlh)
933 return -EMSGSIZE;
934
935 rth = nlmsg_data(nlh);
936 rth->rtgen_family = AF_UNSPEC;
937
938 if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
939 goto nla_put_failure;
940
941 if (args->add_ref &&
942 nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
943 goto nla_put_failure;
944
945 nlmsg_end(skb, nlh);
946 return 0;
947
948nla_put_failure:
949 nlmsg_cancel(skb, nlh);
950 return -EMSGSIZE;
951}
952
953static int rtnl_net_valid_getid_req(struct sk_buff *skb,
954 const struct nlmsghdr *nlh,
955 struct nlattr **tb,
956 struct netlink_ext_ack *extack)
957{
958 int i, err;
959
960 if (!netlink_strict_get_check(skb))
961 return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg),
962 tb, NETNSA_MAX, rtnl_net_policy,
963 extack);
964
965 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
966 NETNSA_MAX, rtnl_net_policy,
967 extack);
968 if (err)
969 return err;
970
971 for (i = 0; i <= NETNSA_MAX; i++) {
972 if (!tb[i])
973 continue;
974
975 switch (i) {
976 case NETNSA_PID:
977 case NETNSA_FD:
978 case NETNSA_NSID:
979 case NETNSA_TARGET_NSID:
980 break;
981 default:
982 NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request");
983 return -EINVAL;
984 }
985 }
986
987 return 0;
988}
989
990static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
991 struct netlink_ext_ack *extack)
992{
993 struct net *net = sock_net(skb->sk);
994 struct nlattr *tb[NETNSA_MAX + 1];
995 struct net_fill_args fillargs = {
996 .portid = NETLINK_CB(skb).portid,
997 .seq = nlh->nlmsg_seq,
998 .cmd = RTM_NEWNSID,
999 };
1000 struct net *peer, *target = net;
1001 struct nlattr *nla;
1002 struct sk_buff *msg;
1003 int err;
1004
1005 err = rtnl_net_valid_getid_req(skb, nlh, tb, extack);
1006 if (err < 0)
1007 return err;
1008 if (tb[NETNSA_PID]) {
1009 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
1010 nla = tb[NETNSA_PID];
1011 } else if (tb[NETNSA_FD]) {
1012 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
1013 nla = tb[NETNSA_FD];
1014 } else if (tb[NETNSA_NSID]) {
1015 peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
1016 if (!peer)
1017 peer = ERR_PTR(-ENOENT);
1018 nla = tb[NETNSA_NSID];
1019 } else {
1020 NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
1021 return -EINVAL;
1022 }
1023
1024 if (IS_ERR(peer)) {
1025 NL_SET_BAD_ATTR(extack, nla);
1026 NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
1027 return PTR_ERR(peer);
1028 }
1029
1030 if (tb[NETNSA_TARGET_NSID]) {
1031 int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
1032
1033 target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
1034 if (IS_ERR(target)) {
1035 NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
1036 NL_SET_ERR_MSG(extack,
1037 "Target netns reference is invalid");
1038 err = PTR_ERR(target);
1039 goto out;
1040 }
1041 fillargs.add_ref = true;
1042 fillargs.ref_nsid = peernet2id(net, peer);
1043 }
1044
1045 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
1046 if (!msg) {
1047 err = -ENOMEM;
1048 goto out;
1049 }
1050
1051 fillargs.nsid = peernet2id(target, peer);
1052 err = rtnl_net_fill(msg, &fillargs);
1053 if (err < 0)
1054 goto err_out;
1055
1056 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
1057 goto out;
1058
1059err_out:
1060 nlmsg_free(msg);
1061out:
1062 if (fillargs.add_ref)
1063 put_net(target);
1064 put_net(peer);
1065 return err;
1066}
1067
1068struct rtnl_net_dump_cb {
1069 struct net *tgt_net;
1070 struct net *ref_net;
1071 struct sk_buff *skb;
1072 struct net_fill_args fillargs;
1073 int idx;
1074 int s_idx;
1075};
1076
1077/* Runs in RCU-critical section. */
1078static int rtnl_net_dumpid_one(int id, void *peer, void *data)
1079{
1080 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
1081 int ret;
1082
1083 if (net_cb->idx < net_cb->s_idx)
1084 goto cont;
1085
1086 net_cb->fillargs.nsid = id;
1087 if (net_cb->fillargs.add_ref)
1088 net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
1089 ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
1090 if (ret < 0)
1091 return ret;
1092
1093cont:
1094 net_cb->idx++;
1095 return 0;
1096}
1097
1098static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
1099 struct rtnl_net_dump_cb *net_cb,
1100 struct netlink_callback *cb)
1101{
1102 struct netlink_ext_ack *extack = cb->extack;
1103 struct nlattr *tb[NETNSA_MAX + 1];
1104 int err, i;
1105
1106 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb,
1107 NETNSA_MAX, rtnl_net_policy,
1108 extack);
1109 if (err < 0)
1110 return err;
1111
1112 for (i = 0; i <= NETNSA_MAX; i++) {
1113 if (!tb[i])
1114 continue;
1115
1116 if (i == NETNSA_TARGET_NSID) {
1117 struct net *net;
1118
1119 net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
1120 if (IS_ERR(net)) {
1121 NL_SET_BAD_ATTR(extack, tb[i]);
1122 NL_SET_ERR_MSG(extack,
1123 "Invalid target network namespace id");
1124 return PTR_ERR(net);
1125 }
1126 net_cb->fillargs.add_ref = true;
1127 net_cb->ref_net = net_cb->tgt_net;
1128 net_cb->tgt_net = net;
1129 } else {
1130 NL_SET_BAD_ATTR(extack, tb[i]);
1131 NL_SET_ERR_MSG(extack,
1132 "Unsupported attribute in dump request");
1133 return -EINVAL;
1134 }
1135 }
1136
1137 return 0;
1138}
1139
1140static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
1141{
1142 struct rtnl_net_dump_cb net_cb = {
1143 .tgt_net = sock_net(skb->sk),
1144 .skb = skb,
1145 .fillargs = {
1146 .portid = NETLINK_CB(cb->skb).portid,
1147 .seq = cb->nlh->nlmsg_seq,
1148 .flags = NLM_F_MULTI,
1149 .cmd = RTM_NEWNSID,
1150 },
1151 .idx = 0,
1152 .s_idx = cb->args[0],
1153 };
1154 int err = 0;
1155
1156 if (cb->strict_check) {
1157 err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
1158 if (err < 0)
1159 goto end;
1160 }
1161
1162 rcu_read_lock();
1163 idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
1164 rcu_read_unlock();
1165
1166 cb->args[0] = net_cb.idx;
1167end:
1168 if (net_cb.fillargs.add_ref)
1169 put_net(net_cb.tgt_net);
1170 return err;
1171}
1172
1173static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
1174 struct nlmsghdr *nlh, gfp_t gfp)
1175{
1176 struct net_fill_args fillargs = {
1177 .portid = portid,
1178 .seq = nlh ? nlh->nlmsg_seq : 0,
1179 .cmd = cmd,
1180 .nsid = id,
1181 };
1182 struct sk_buff *msg;
1183 int err = -ENOMEM;
1184
1185 msg = nlmsg_new(rtnl_net_get_size(), gfp);
1186 if (!msg)
1187 goto out;
1188
1189 err = rtnl_net_fill(msg, &fillargs);
1190 if (err < 0)
1191 goto err_out;
1192
1193 rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
1194 return;
1195
1196err_out:
1197 nlmsg_free(msg);
1198out:
1199 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
1200}
1201
1202#ifdef CONFIG_NET_NS
1203static void __init netns_ipv4_struct_check(void)
1204{
1205 /* TX readonly hotpath cache lines */
1206 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1207 sysctl_tcp_early_retrans);
1208 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1209 sysctl_tcp_tso_win_divisor);
1210 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1211 sysctl_tcp_tso_rtt_log);
1212 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1213 sysctl_tcp_autocorking);
1214 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1215 sysctl_tcp_min_snd_mss);
1216 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1217 sysctl_tcp_notsent_lowat);
1218 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1219 sysctl_tcp_limit_output_bytes);
1220 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1221 sysctl_tcp_min_rtt_wlen);
1222 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1223 sysctl_tcp_wmem);
1224 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx,
1225 sysctl_ip_fwd_use_pmtu);
1226
1227 /* RX readonly hotpath cache line */
1228 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1229 sysctl_tcp_moderate_rcvbuf);
1230 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1231 sysctl_tcp_rcvbuf_low_rtt);
1232 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1233 sysctl_ip_early_demux);
1234 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1235 sysctl_tcp_early_demux);
1236 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1237 sysctl_tcp_l3mdev_accept);
1238 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1239 sysctl_tcp_reordering);
1240 CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx,
1241 sysctl_tcp_rmem);
1242}
1243#endif
1244
1245static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = {
1246 {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid,
1247 .flags = RTNL_FLAG_DOIT_UNLOCKED},
1248 {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid,
1249 .dumpit = rtnl_net_dumpid,
1250 .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
1251};
1252
1253void __init net_ns_init(void)
1254{
1255 struct net_generic *ng;
1256
1257#ifdef CONFIG_NET_NS
1258 netns_ipv4_struct_check();
1259 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
1260 SMP_CACHE_BYTES,
1261 SLAB_PANIC|SLAB_ACCOUNT, NULL);
1262
1263 /* Create workqueue for cleanup */
1264 netns_wq = create_singlethread_workqueue("netns");
1265 if (!netns_wq)
1266 panic("Could not create netns workq");
1267#endif
1268
1269 ng = net_alloc_generic();
1270 if (!ng)
1271 panic("Could not allocate generic netns");
1272
1273 rcu_assign_pointer(init_net.gen, ng);
1274
1275#ifdef CONFIG_KEYS
1276 init_net.key_domain = &init_net_key_domain;
1277#endif
1278 /*
1279 * This currently cannot fail as the initial network namespace
1280 * has a static inode number.
1281 */
1282 if (preinit_net(&init_net, &init_user_ns))
1283 panic("Could not preinitialize the initial network namespace");
1284
1285 down_write(&pernet_ops_rwsem);
1286 if (setup_net(&init_net))
1287 panic("Could not setup the initial network namespace");
1288
1289 init_net_initialized = true;
1290 up_write(&pernet_ops_rwsem);
1291
1292 if (register_pernet_subsys(&net_ns_ops))
1293 panic("Could not register network namespace subsystems");
1294
1295 rtnl_register_many(net_ns_rtnl_msg_handlers);
1296}
1297
1298#ifdef CONFIG_NET_NS
1299static int __register_pernet_operations(struct list_head *list,
1300 struct pernet_operations *ops)
1301{
1302 LIST_HEAD(net_exit_list);
1303 struct net *net;
1304 int error;
1305
1306 list_add_tail(&ops->list, list);
1307 if (ops->init || ops->id) {
1308 /* We held write locked pernet_ops_rwsem, and parallel
1309 * setup_net() and cleanup_net() are not possible.
1310 */
1311 for_each_net(net) {
1312 error = ops_init(ops, net);
1313 if (error)
1314 goto out_undo;
1315 list_add_tail(&net->exit_list, &net_exit_list);
1316 }
1317 }
1318 return 0;
1319
1320out_undo:
1321 /* If I have an error cleanup all namespaces I initialized */
1322 list_del(&ops->list);
1323 ops_undo_single(ops, &net_exit_list);
1324 return error;
1325}
1326
1327static void __unregister_pernet_operations(struct pernet_operations *ops)
1328{
1329 LIST_HEAD(net_exit_list);
1330 struct net *net;
1331
1332 /* See comment in __register_pernet_operations() */
1333 for_each_net(net)
1334 list_add_tail(&net->exit_list, &net_exit_list);
1335
1336 list_del(&ops->list);
1337 ops_undo_single(ops, &net_exit_list);
1338}
1339
1340#else
1341
1342static int __register_pernet_operations(struct list_head *list,
1343 struct pernet_operations *ops)
1344{
1345 if (!init_net_initialized) {
1346 list_add_tail(&ops->list, list);
1347 return 0;
1348 }
1349
1350 return ops_init(ops, &init_net);
1351}
1352
1353static void __unregister_pernet_operations(struct pernet_operations *ops)
1354{
1355 if (!init_net_initialized) {
1356 list_del(&ops->list);
1357 } else {
1358 LIST_HEAD(net_exit_list);
1359
1360 list_add(&init_net.exit_list, &net_exit_list);
1361 ops_undo_single(ops, &net_exit_list);
1362 }
1363}
1364
1365#endif /* CONFIG_NET_NS */
1366
1367static DEFINE_IDA(net_generic_ids);
1368
1369static int register_pernet_operations(struct list_head *list,
1370 struct pernet_operations *ops)
1371{
1372 int error;
1373
1374 if (WARN_ON(!!ops->id ^ !!ops->size))
1375 return -EINVAL;
1376
1377 if (ops->id) {
1378 error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID,
1379 GFP_KERNEL);
1380 if (error < 0)
1381 return error;
1382 *ops->id = error;
1383 /* This does not require READ_ONCE as writers already hold
1384 * pernet_ops_rwsem. But WRITE_ONCE is needed to protect
1385 * net_alloc_generic.
1386 */
1387 WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1));
1388 }
1389 error = __register_pernet_operations(list, ops);
1390 if (error) {
1391 rcu_barrier();
1392 if (ops->id)
1393 ida_free(&net_generic_ids, *ops->id);
1394 }
1395
1396 return error;
1397}
1398
1399static void unregister_pernet_operations(struct pernet_operations *ops)
1400{
1401 __unregister_pernet_operations(ops);
1402 rcu_barrier();
1403 if (ops->id)
1404 ida_free(&net_generic_ids, *ops->id);
1405}
1406
1407/**
1408 * register_pernet_subsys - register a network namespace subsystem
1409 * @ops: pernet operations structure for the subsystem
1410 *
1411 * Register a subsystem which has init and exit functions
1412 * that are called when network namespaces are created and
1413 * destroyed respectively.
1414 *
1415 * When registered all network namespace init functions are
1416 * called for every existing network namespace. Allowing kernel
1417 * modules to have a race free view of the set of network namespaces.
1418 *
1419 * When a new network namespace is created all of the init
1420 * methods are called in the order in which they were registered.
1421 *
1422 * When a network namespace is destroyed all of the exit methods
1423 * are called in the reverse of the order with which they were
1424 * registered.
1425 */
1426int register_pernet_subsys(struct pernet_operations *ops)
1427{
1428 int error;
1429 down_write(&pernet_ops_rwsem);
1430 error = register_pernet_operations(first_device, ops);
1431 up_write(&pernet_ops_rwsem);
1432 return error;
1433}
1434EXPORT_SYMBOL_GPL(register_pernet_subsys);
1435
1436/**
1437 * unregister_pernet_subsys - unregister a network namespace subsystem
1438 * @ops: pernet operations structure to manipulate
1439 *
1440 * Remove the pernet operations structure from the list to be
1441 * used when network namespaces are created or destroyed. In
1442 * addition run the exit method for all existing network
1443 * namespaces.
1444 */
1445void unregister_pernet_subsys(struct pernet_operations *ops)
1446{
1447 down_write(&pernet_ops_rwsem);
1448 unregister_pernet_operations(ops);
1449 up_write(&pernet_ops_rwsem);
1450}
1451EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
1452
1453/**
1454 * register_pernet_device - register a network namespace device
1455 * @ops: pernet operations structure for the subsystem
1456 *
1457 * Register a device which has init and exit functions
1458 * that are called when network namespaces are created and
1459 * destroyed respectively.
1460 *
1461 * When registered all network namespace init functions are
1462 * called for every existing network namespace. Allowing kernel
1463 * modules to have a race free view of the set of network namespaces.
1464 *
1465 * When a new network namespace is created all of the init
1466 * methods are called in the order in which they were registered.
1467 *
1468 * When a network namespace is destroyed all of the exit methods
1469 * are called in the reverse of the order with which they were
1470 * registered.
1471 */
1472int register_pernet_device(struct pernet_operations *ops)
1473{
1474 int error;
1475 down_write(&pernet_ops_rwsem);
1476 error = register_pernet_operations(&pernet_list, ops);
1477 if (!error && (first_device == &pernet_list))
1478 first_device = &ops->list;
1479 up_write(&pernet_ops_rwsem);
1480 return error;
1481}
1482EXPORT_SYMBOL_GPL(register_pernet_device);
1483
1484/**
1485 * unregister_pernet_device - unregister a network namespace netdevice
1486 * @ops: pernet operations structure to manipulate
1487 *
1488 * Remove the pernet operations structure from the list to be
1489 * used when network namespaces are created or destroyed. In
1490 * addition run the exit method for all existing network
1491 * namespaces.
1492 */
1493void unregister_pernet_device(struct pernet_operations *ops)
1494{
1495 down_write(&pernet_ops_rwsem);
1496 if (&ops->list == first_device)
1497 first_device = first_device->next;
1498 unregister_pernet_operations(ops);
1499 up_write(&pernet_ops_rwsem);
1500}
1501EXPORT_SYMBOL_GPL(unregister_pernet_device);
1502
1503#ifdef CONFIG_NET_NS
1504static struct ns_common *netns_get(struct task_struct *task)
1505{
1506 struct net *net = NULL;
1507 struct nsproxy *nsproxy;
1508
1509 task_lock(task);
1510 nsproxy = task->nsproxy;
1511 if (nsproxy)
1512 net = get_net(nsproxy->net_ns);
1513 task_unlock(task);
1514
1515 return net ? &net->ns : NULL;
1516}
1517
1518static void netns_put(struct ns_common *ns)
1519{
1520 put_net(to_net_ns(ns));
1521}
1522
1523static int netns_install(struct nsset *nsset, struct ns_common *ns)
1524{
1525 struct nsproxy *nsproxy = nsset->nsproxy;
1526 struct net *net = to_net_ns(ns);
1527
1528 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1529 !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN))
1530 return -EPERM;
1531
1532 put_net(nsproxy->net_ns);
1533 nsproxy->net_ns = get_net(net);
1534 return 0;
1535}
1536
1537static struct user_namespace *netns_owner(struct ns_common *ns)
1538{
1539 return to_net_ns(ns)->user_ns;
1540}
1541
1542const struct proc_ns_operations netns_operations = {
1543 .name = "net",
1544 .get = netns_get,
1545 .put = netns_put,
1546 .install = netns_install,
1547 .owner = netns_owner,
1548};
1549#endif