Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Operations on the network namespace
4 */
5#ifndef __NET_NET_NAMESPACE_H
6#define __NET_NET_NAMESPACE_H
7
8#include <linux/atomic.h>
9#include <linux/refcount.h>
10#include <linux/workqueue.h>
11#include <linux/list.h>
12#include <linux/sysctl.h>
13#include <linux/uidgid.h>
14
15#include <net/flow.h>
16#include <net/netns/core.h>
17#include <net/netns/mib.h>
18#include <net/netns/unix.h>
19#include <net/netns/packet.h>
20#include <net/netns/ipv4.h>
21#include <net/netns/ipv6.h>
22#include <net/netns/nexthop.h>
23#include <net/netns/ieee802154_6lowpan.h>
24#include <net/netns/sctp.h>
25#include <net/netns/netfilter.h>
26#include <net/netns/x_tables.h>
27#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
28#include <net/netns/conntrack.h>
29#endif
30#include <net/netns/nftables.h>
31#include <net/netns/xfrm.h>
32#include <net/netns/mpls.h>
33#include <net/netns/can.h>
34#include <net/netns/xdp.h>
35#include <net/netns/bpf.h>
36#include <linux/ns_common.h>
37#include <linux/idr.h>
38#include <linux/skbuff.h>
39#include <linux/notifier.h>
40
41struct user_namespace;
42struct proc_dir_entry;
43struct net_device;
44struct sock;
45struct ctl_table_header;
46struct net_generic;
47struct uevent_sock;
48struct netns_ipvs;
49struct bpf_prog;
50
51
52#define NETDEV_HASHBITS 8
53#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
54
55struct net {
56 /* First cache line can be often dirtied.
57 * Do not place here read-mostly fields.
58 */
59 refcount_t passive; /* To decide when the network
60 * namespace should be freed.
61 */
62 spinlock_t rules_mod_lock;
63
64 unsigned int dev_unreg_count;
65
66 unsigned int dev_base_seq; /* protected by rtnl_mutex */
67 int ifindex;
68
69 spinlock_t nsid_lock;
70 atomic_t fnhe_genid;
71
72 struct list_head list; /* list of network namespaces */
73 struct list_head exit_list; /* To linked to call pernet exit
74 * methods on dead net (
75 * pernet_ops_rwsem read locked),
76 * or to unregister pernet ops
77 * (pernet_ops_rwsem write locked).
78 */
79 struct llist_node cleanup_list; /* namespaces on death row */
80
81#ifdef CONFIG_KEYS
82 struct key_tag *key_domain; /* Key domain of operation tag */
83#endif
84 struct user_namespace *user_ns; /* Owning user namespace */
85 struct ucounts *ucounts;
86 struct idr netns_ids;
87
88 struct ns_common ns;
89
90 struct list_head dev_base_head;
91 struct proc_dir_entry *proc_net;
92 struct proc_dir_entry *proc_net_stat;
93
94#ifdef CONFIG_SYSCTL
95 struct ctl_table_set sysctls;
96#endif
97
98 struct sock *rtnl; /* rtnetlink socket */
99 struct sock *genl_sock;
100
101 struct uevent_sock *uevent_sock; /* uevent socket */
102
103 struct hlist_head *dev_name_head;
104 struct hlist_head *dev_index_head;
105 struct raw_notifier_head netdev_chain;
106
107 /* Note that @hash_mix can be read millions times per second,
108 * it is critical that it is on a read_mostly cache line.
109 */
110 u32 hash_mix;
111
112 struct net_device *loopback_dev; /* The loopback */
113
114 /* core fib_rules */
115 struct list_head rules_ops;
116
117 struct netns_core core;
118 struct netns_mib mib;
119 struct netns_packet packet;
120 struct netns_unix unx;
121 struct netns_nexthop nexthop;
122 struct netns_ipv4 ipv4;
123#if IS_ENABLED(CONFIG_IPV6)
124 struct netns_ipv6 ipv6;
125#endif
126#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
127 struct netns_ieee802154_lowpan ieee802154_lowpan;
128#endif
129#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
130 struct netns_sctp sctp;
131#endif
132#ifdef CONFIG_NETFILTER
133 struct netns_nf nf;
134 struct netns_xt xt;
135#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
136 struct netns_ct ct;
137#endif
138#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
139 struct netns_nftables nft;
140#endif
141#endif
142#ifdef CONFIG_WEXT_CORE
143 struct sk_buff_head wext_nlevents;
144#endif
145 struct net_generic __rcu *gen;
146
147 /* Used to store attached BPF programs */
148 struct netns_bpf bpf;
149
150 /* Note : following structs are cache line aligned */
151#ifdef CONFIG_XFRM
152 struct netns_xfrm xfrm;
153#endif
154
155 u64 net_cookie; /* written once */
156
157#if IS_ENABLED(CONFIG_IP_VS)
158 struct netns_ipvs *ipvs;
159#endif
160#if IS_ENABLED(CONFIG_MPLS)
161 struct netns_mpls mpls;
162#endif
163#if IS_ENABLED(CONFIG_CAN)
164 struct netns_can can;
165#endif
166#ifdef CONFIG_XDP_SOCKETS
167 struct netns_xdp xdp;
168#endif
169#if IS_ENABLED(CONFIG_CRYPTO_USER)
170 struct sock *crypto_nlsk;
171#endif
172 struct sock *diag_nlsk;
173} __randomize_layout;
174
175#include <linux/seq_file_net.h>
176
177/* Init's network namespace */
178extern struct net init_net;
179
180#ifdef CONFIG_NET_NS
181struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
182 struct net *old_net);
183
184void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
185
186void net_ns_barrier(void);
187
188struct ns_common *get_net_ns(struct ns_common *ns);
189struct net *get_net_ns_by_fd(int fd);
190#else /* CONFIG_NET_NS */
191#include <linux/sched.h>
192#include <linux/nsproxy.h>
193static inline struct net *copy_net_ns(unsigned long flags,
194 struct user_namespace *user_ns, struct net *old_net)
195{
196 if (flags & CLONE_NEWNET)
197 return ERR_PTR(-EINVAL);
198 return old_net;
199}
200
201static inline void net_ns_get_ownership(const struct net *net,
202 kuid_t *uid, kgid_t *gid)
203{
204 *uid = GLOBAL_ROOT_UID;
205 *gid = GLOBAL_ROOT_GID;
206}
207
208static inline void net_ns_barrier(void) {}
209
210static inline struct ns_common *get_net_ns(struct ns_common *ns)
211{
212 return ERR_PTR(-EINVAL);
213}
214
215static inline struct net *get_net_ns_by_fd(int fd)
216{
217 return ERR_PTR(-EINVAL);
218}
219#endif /* CONFIG_NET_NS */
220
221
222extern struct list_head net_namespace_list;
223
224struct net *get_net_ns_by_pid(pid_t pid);
225
226#ifdef CONFIG_SYSCTL
227void ipx_register_sysctl(void);
228void ipx_unregister_sysctl(void);
229#else
230#define ipx_register_sysctl()
231#define ipx_unregister_sysctl()
232#endif
233
234#ifdef CONFIG_NET_NS
235void __put_net(struct net *net);
236
237static inline struct net *get_net(struct net *net)
238{
239 refcount_inc(&net->ns.count);
240 return net;
241}
242
243static inline struct net *maybe_get_net(struct net *net)
244{
245 /* Used when we know struct net exists but we
246 * aren't guaranteed a previous reference count
247 * exists. If the reference count is zero this
248 * function fails and returns NULL.
249 */
250 if (!refcount_inc_not_zero(&net->ns.count))
251 net = NULL;
252 return net;
253}
254
255static inline void put_net(struct net *net)
256{
257 if (refcount_dec_and_test(&net->ns.count))
258 __put_net(net);
259}
260
261static inline
262int net_eq(const struct net *net1, const struct net *net2)
263{
264 return net1 == net2;
265}
266
267static inline int check_net(const struct net *net)
268{
269 return refcount_read(&net->ns.count) != 0;
270}
271
272void net_drop_ns(void *);
273
274#else
275
276static inline struct net *get_net(struct net *net)
277{
278 return net;
279}
280
281static inline void put_net(struct net *net)
282{
283}
284
285static inline struct net *maybe_get_net(struct net *net)
286{
287 return net;
288}
289
290static inline
291int net_eq(const struct net *net1, const struct net *net2)
292{
293 return 1;
294}
295
296static inline int check_net(const struct net *net)
297{
298 return 1;
299}
300
301#define net_drop_ns NULL
302#endif
303
304
305typedef struct {
306#ifdef CONFIG_NET_NS
307 struct net *net;
308#endif
309} possible_net_t;
310
311static inline void write_pnet(possible_net_t *pnet, struct net *net)
312{
313#ifdef CONFIG_NET_NS
314 pnet->net = net;
315#endif
316}
317
318static inline struct net *read_pnet(const possible_net_t *pnet)
319{
320#ifdef CONFIG_NET_NS
321 return pnet->net;
322#else
323 return &init_net;
324#endif
325}
326
327/* Protected by net_rwsem */
328#define for_each_net(VAR) \
329 list_for_each_entry(VAR, &net_namespace_list, list)
330#define for_each_net_continue_reverse(VAR) \
331 list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
332#define for_each_net_rcu(VAR) \
333 list_for_each_entry_rcu(VAR, &net_namespace_list, list)
334
335#ifdef CONFIG_NET_NS
336#define __net_init
337#define __net_exit
338#define __net_initdata
339#define __net_initconst
340#else
341#define __net_init __init
342#define __net_exit __ref
343#define __net_initdata __initdata
344#define __net_initconst __initconst
345#endif
346
347int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
348int peernet2id(const struct net *net, struct net *peer);
349bool peernet_has_id(const struct net *net, struct net *peer);
350struct net *get_net_ns_by_id(const struct net *net, int id);
351
352struct pernet_operations {
353 struct list_head list;
354 /*
355 * Below methods are called without any exclusive locks.
356 * More than one net may be constructed and destructed
357 * in parallel on several cpus. Every pernet_operations
358 * have to keep in mind all other pernet_operations and
359 * to introduce a locking, if they share common resources.
360 *
361 * The only time they are called with exclusive lock is
362 * from register_pernet_subsys(), unregister_pernet_subsys()
363 * register_pernet_device() and unregister_pernet_device().
364 *
365 * Exit methods using blocking RCU primitives, such as
366 * synchronize_rcu(), should be implemented via exit_batch.
367 * Then, destruction of a group of net requires single
368 * synchronize_rcu() related to these pernet_operations,
369 * instead of separate synchronize_rcu() for every net.
370 * Please, avoid synchronize_rcu() at all, where it's possible.
371 *
372 * Note that a combination of pre_exit() and exit() can
373 * be used, since a synchronize_rcu() is guaranteed between
374 * the calls.
375 */
376 int (*init)(struct net *net);
377 void (*pre_exit)(struct net *net);
378 void (*exit)(struct net *net);
379 void (*exit_batch)(struct list_head *net_exit_list);
380 unsigned int *id;
381 size_t size;
382};
383
384/*
385 * Use these carefully. If you implement a network device and it
386 * needs per network namespace operations use device pernet operations,
387 * otherwise use pernet subsys operations.
388 *
389 * Network interfaces need to be removed from a dying netns _before_
390 * subsys notifiers can be called, as most of the network code cleanup
391 * (which is done from subsys notifiers) runs with the assumption that
392 * dev_remove_pack has been called so no new packets will arrive during
393 * and after the cleanup functions have been called. dev_remove_pack
394 * is not per namespace so instead the guarantee of no more packets
395 * arriving in a network namespace is provided by ensuring that all
396 * network devices and all sockets have left the network namespace
397 * before the cleanup methods are called.
398 *
399 * For the longest time the ipv4 icmp code was registered as a pernet
400 * device which caused kernel oops, and panics during network
401 * namespace cleanup. So please don't get this wrong.
402 */
403int register_pernet_subsys(struct pernet_operations *);
404void unregister_pernet_subsys(struct pernet_operations *);
405int register_pernet_device(struct pernet_operations *);
406void unregister_pernet_device(struct pernet_operations *);
407
408struct ctl_table;
409
410#ifdef CONFIG_SYSCTL
411int net_sysctl_init(void);
412struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
413 struct ctl_table *table);
414void unregister_net_sysctl_table(struct ctl_table_header *header);
415#else
416static inline int net_sysctl_init(void) { return 0; }
417static inline struct ctl_table_header *register_net_sysctl(struct net *net,
418 const char *path, struct ctl_table *table)
419{
420 return NULL;
421}
422static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
423{
424}
425#endif
426
427static inline int rt_genid_ipv4(const struct net *net)
428{
429 return atomic_read(&net->ipv4.rt_genid);
430}
431
432#if IS_ENABLED(CONFIG_IPV6)
433static inline int rt_genid_ipv6(const struct net *net)
434{
435 return atomic_read(&net->ipv6.fib6_sernum);
436}
437#endif
438
439static inline void rt_genid_bump_ipv4(struct net *net)
440{
441 atomic_inc(&net->ipv4.rt_genid);
442}
443
444extern void (*__fib6_flush_trees)(struct net *net);
445static inline void rt_genid_bump_ipv6(struct net *net)
446{
447 if (__fib6_flush_trees)
448 __fib6_flush_trees(net);
449}
450
451#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
452static inline struct netns_ieee802154_lowpan *
453net_ieee802154_lowpan(struct net *net)
454{
455 return &net->ieee802154_lowpan;
456}
457#endif
458
459/* For callers who don't really care about whether it's IPv4 or IPv6 */
460static inline void rt_genid_bump_all(struct net *net)
461{
462 rt_genid_bump_ipv4(net);
463 rt_genid_bump_ipv6(net);
464}
465
466static inline int fnhe_genid(const struct net *net)
467{
468 return atomic_read(&net->fnhe_genid);
469}
470
471static inline void fnhe_genid_bump(struct net *net)
472{
473 atomic_inc(&net->fnhe_genid);
474}
475
476#endif /* __NET_NET_NAMESPACE_H */