1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Operations on the network namespace
4 */
5 #ifndef __NET_NET_NAMESPACE_H
6 #define __NET_NET_NAMESPACE_H
7
8 #include <linux/atomic.h>
9 #include <linux/refcount.h>
10 #include <linux/workqueue.h>
11 #include <linux/list.h>
12 #include <linux/sysctl.h>
13 #include <linux/uidgid.h>
14
15 #include <net/flow.h>
16 #include <net/netns/core.h>
17 #include <net/netns/mib.h>
18 #include <net/netns/unix.h>
19 #include <net/netns/packet.h>
20 #include <net/netns/ipv4.h>
21 #include <net/netns/ipv6.h>
22 #include <net/netns/ieee802154_6lowpan.h>
23 #include <net/netns/sctp.h>
24 #include <net/netns/dccp.h>
25 #include <net/netns/netfilter.h>
26 #include <net/netns/x_tables.h>
27 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
28 #include <net/netns/conntrack.h>
29 #endif
30 #include <net/netns/nftables.h>
31 #include <net/netns/xfrm.h>
32 #include <net/netns/mpls.h>
33 #include <net/netns/can.h>
34 #include <linux/ns_common.h>
35 #include <linux/idr.h>
36 #include <linux/skbuff.h>
37
38 struct user_namespace;
39 struct proc_dir_entry;
40 struct net_device;
41 struct sock;
42 struct ctl_table_header;
43 struct net_generic;
44 struct uevent_sock;
45 struct netns_ipvs;
46
47
48 #define NETDEV_HASHBITS 8
49 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
50
51 struct net {
52 refcount_t passive; /* To decided when the network
53 * namespace should be freed.
54 */
55 refcount_t count; /* To decided when the network
56 * namespace should be shut down.
57 */
58 spinlock_t rules_mod_lock;
59
60 u32 hash_mix;
61 atomic64_t cookie_gen;
62
63 struct list_head list; /* list of network namespaces */
64 struct list_head exit_list; /* To linked to call pernet exit
65 * methods on dead net (
66 * pernet_ops_rwsem read locked),
67 * or to unregister pernet ops
68 * (pernet_ops_rwsem write locked).
69 */
70 struct llist_node cleanup_list; /* namespaces on death row */
71
72 struct user_namespace *user_ns; /* Owning user namespace */
73 struct ucounts *ucounts;
74 spinlock_t nsid_lock;
75 struct idr netns_ids;
76
77 struct ns_common ns;
78
79 struct proc_dir_entry *proc_net;
80 struct proc_dir_entry *proc_net_stat;
81
82 #ifdef CONFIG_SYSCTL
83 struct ctl_table_set sysctls;
84 #endif
85
86 struct sock *rtnl; /* rtnetlink socket */
87 struct sock *genl_sock;
88
89 struct uevent_sock *uevent_sock; /* uevent socket */
90
91 struct list_head dev_base_head;
92 struct hlist_head *dev_name_head;
93 struct hlist_head *dev_index_head;
94 unsigned int dev_base_seq; /* protected by rtnl_mutex */
95 int ifindex;
96 unsigned int dev_unreg_count;
97
98 /* core fib_rules */
99 struct list_head rules_ops;
100
101 struct list_head fib_notifier_ops; /* Populated by
102 * register_pernet_subsys()
103 */
104 struct net_device *loopback_dev; /* The loopback */
105 struct netns_core core;
106 struct netns_mib mib;
107 struct netns_packet packet;
108 struct netns_unix unx;
109 struct netns_ipv4 ipv4;
110 #if IS_ENABLED(CONFIG_IPV6)
111 struct netns_ipv6 ipv6;
112 #endif
113 #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
114 struct netns_ieee802154_lowpan ieee802154_lowpan;
115 #endif
116 #if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
117 struct netns_sctp sctp;
118 #endif
119 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
120 struct netns_dccp dccp;
121 #endif
122 #ifdef CONFIG_NETFILTER
123 struct netns_nf nf;
124 struct netns_xt xt;
125 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
126 struct netns_ct ct;
127 #endif
128 #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
129 struct netns_nftables nft;
130 #endif
131 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
132 struct netns_nf_frag nf_frag;
133 struct ctl_table_header *nf_frag_frags_hdr;
134 #endif
135 struct sock *nfnl;
136 struct sock *nfnl_stash;
137 #if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
138 struct list_head nfnl_acct_list;
139 #endif
140 #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
141 struct list_head nfct_timeout_list;
142 #endif
143 #endif
144 #ifdef CONFIG_WEXT_CORE
145 struct sk_buff_head wext_nlevents;
146 #endif
147 struct net_generic __rcu *gen;
148
149 /* Note : following structs are cache line aligned */
150 #ifdef CONFIG_XFRM
151 struct netns_xfrm xfrm;
152 #endif
153 #if IS_ENABLED(CONFIG_IP_VS)
154 struct netns_ipvs *ipvs;
155 #endif
156 #if IS_ENABLED(CONFIG_MPLS)
157 struct netns_mpls mpls;
158 #endif
159 #if IS_ENABLED(CONFIG_CAN)
160 struct netns_can can;
161 #endif
162 struct sock *diag_nlsk;
163 atomic_t fnhe_genid;
164 } __randomize_layout;
165
166 #include <linux/seq_file_net.h>
167
168 /* Init's network namespace */
169 extern struct net init_net;
170
171 #ifdef CONFIG_NET_NS
172 struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
173 struct net *old_net);
174
175 void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
176
177 void net_ns_barrier(void);
178
179 struct ns_common *get_net_ns(struct ns_common *ns);
180 #else /* CONFIG_NET_NS */
181 #include <linux/sched.h>
182 #include <linux/nsproxy.h>
copy_net_ns(unsigned long flags,struct user_namespace * user_ns,struct net * old_net)183 static inline struct net *copy_net_ns(unsigned long flags,
184 struct user_namespace *user_ns, struct net *old_net)
185 {
186 if (flags & CLONE_NEWNET)
187 return ERR_PTR(-EINVAL);
188 return old_net;
189 }
190
net_ns_get_ownership(const struct net * net,kuid_t * uid,kgid_t * gid)191 static inline void net_ns_get_ownership(const struct net *net,
192 kuid_t *uid, kgid_t *gid)
193 {
194 *uid = GLOBAL_ROOT_UID;
195 *gid = GLOBAL_ROOT_GID;
196 }
197
net_ns_barrier(void)198 static inline void net_ns_barrier(void) {}
199
get_net_ns(struct ns_common * ns)200 static inline struct ns_common *get_net_ns(struct ns_common *ns)
201 {
202 return ERR_PTR(-EINVAL);
203 }
204 #endif /* CONFIG_NET_NS */
205
206
207 extern struct list_head net_namespace_list;
208
209 struct net *get_net_ns_by_pid(pid_t pid);
210 struct net *get_net_ns_by_fd(int fd);
211
212 #ifdef CONFIG_SYSCTL
213 void ipx_register_sysctl(void);
214 void ipx_unregister_sysctl(void);
215 #else
216 #define ipx_register_sysctl()
217 #define ipx_unregister_sysctl()
218 #endif
219
220 #ifdef CONFIG_NET_NS
221 void __put_net(struct net *net);
222
get_net(struct net * net)223 static inline struct net *get_net(struct net *net)
224 {
225 refcount_inc(&net->count);
226 return net;
227 }
228
maybe_get_net(struct net * net)229 static inline struct net *maybe_get_net(struct net *net)
230 {
231 /* Used when we know struct net exists but we
232 * aren't guaranteed a previous reference count
233 * exists. If the reference count is zero this
234 * function fails and returns NULL.
235 */
236 if (!refcount_inc_not_zero(&net->count))
237 net = NULL;
238 return net;
239 }
240
put_net(struct net * net)241 static inline void put_net(struct net *net)
242 {
243 if (refcount_dec_and_test(&net->count))
244 __put_net(net);
245 }
246
247 static inline
net_eq(const struct net * net1,const struct net * net2)248 int net_eq(const struct net *net1, const struct net *net2)
249 {
250 return net1 == net2;
251 }
252
check_net(const struct net * net)253 static inline int check_net(const struct net *net)
254 {
255 return refcount_read(&net->count) != 0;
256 }
257
258 void net_drop_ns(void *);
259
260 #else
261
get_net(struct net * net)262 static inline struct net *get_net(struct net *net)
263 {
264 return net;
265 }
266
put_net(struct net * net)267 static inline void put_net(struct net *net)
268 {
269 }
270
maybe_get_net(struct net * net)271 static inline struct net *maybe_get_net(struct net *net)
272 {
273 return net;
274 }
275
276 static inline
net_eq(const struct net * net1,const struct net * net2)277 int net_eq(const struct net *net1, const struct net *net2)
278 {
279 return 1;
280 }
281
check_net(const struct net * net)282 static inline int check_net(const struct net *net)
283 {
284 return 1;
285 }
286
287 #define net_drop_ns NULL
288 #endif
289
290
291 typedef struct {
292 #ifdef CONFIG_NET_NS
293 struct net *net;
294 #endif
295 } possible_net_t;
296
write_pnet(possible_net_t * pnet,struct net * net)297 static inline void write_pnet(possible_net_t *pnet, struct net *net)
298 {
299 #ifdef CONFIG_NET_NS
300 pnet->net = net;
301 #endif
302 }
303
read_pnet(const possible_net_t * pnet)304 static inline struct net *read_pnet(const possible_net_t *pnet)
305 {
306 #ifdef CONFIG_NET_NS
307 return pnet->net;
308 #else
309 return &init_net;
310 #endif
311 }
312
313 /* Protected by net_rwsem */
314 #define for_each_net(VAR) \
315 list_for_each_entry(VAR, &net_namespace_list, list)
316
317 #define for_each_net_rcu(VAR) \
318 list_for_each_entry_rcu(VAR, &net_namespace_list, list)
319
320 #ifdef CONFIG_NET_NS
321 #define __net_init
322 #define __net_exit
323 #define __net_initdata
324 #define __net_initconst
325 #else
326 #define __net_init __init
327 #define __net_exit __ref
328 #define __net_initdata __initdata
329 #define __net_initconst __initconst
330 #endif
331
332 int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
333 int peernet2id(struct net *net, struct net *peer);
334 bool peernet_has_id(struct net *net, struct net *peer);
335 struct net *get_net_ns_by_id(struct net *net, int id);
336
337 struct pernet_operations {
338 struct list_head list;
339 /*
340 * Below methods are called without any exclusive locks.
341 * More than one net may be constructed and destructed
342 * in parallel on several cpus. Every pernet_operations
343 * have to keep in mind all other pernet_operations and
344 * to introduce a locking, if they share common resources.
345 *
346 * The only time they are called with exclusive lock is
347 * from register_pernet_subsys(), unregister_pernet_subsys()
348 * register_pernet_device() and unregister_pernet_device().
349 *
350 * Exit methods using blocking RCU primitives, such as
351 * synchronize_rcu(), should be implemented via exit_batch.
352 * Then, destruction of a group of net requires single
353 * synchronize_rcu() related to these pernet_operations,
354 * instead of separate synchronize_rcu() for every net.
355 * Please, avoid synchronize_rcu() at all, where it's possible.
356 */
357 int (*init)(struct net *net);
358 void (*exit)(struct net *net);
359 void (*exit_batch)(struct list_head *net_exit_list);
360 unsigned int *id;
361 size_t size;
362 };
363
364 /*
365 * Use these carefully. If you implement a network device and it
366 * needs per network namespace operations use device pernet operations,
367 * otherwise use pernet subsys operations.
368 *
369 * Network interfaces need to be removed from a dying netns _before_
370 * subsys notifiers can be called, as most of the network code cleanup
371 * (which is done from subsys notifiers) runs with the assumption that
372 * dev_remove_pack has been called so no new packets will arrive during
373 * and after the cleanup functions have been called. dev_remove_pack
374 * is not per namespace so instead the guarantee of no more packets
375 * arriving in a network namespace is provided by ensuring that all
376 * network devices and all sockets have left the network namespace
377 * before the cleanup methods are called.
378 *
379 * For the longest time the ipv4 icmp code was registered as a pernet
380 * device which caused kernel oops, and panics during network
381 * namespace cleanup. So please don't get this wrong.
382 */
383 int register_pernet_subsys(struct pernet_operations *);
384 void unregister_pernet_subsys(struct pernet_operations *);
385 int register_pernet_device(struct pernet_operations *);
386 void unregister_pernet_device(struct pernet_operations *);
387
388 struct ctl_table;
389 struct ctl_table_header;
390
391 #ifdef CONFIG_SYSCTL
392 int net_sysctl_init(void);
393 struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
394 struct ctl_table *table);
395 void unregister_net_sysctl_table(struct ctl_table_header *header);
396 #else
net_sysctl_init(void)397 static inline int net_sysctl_init(void) { return 0; }
register_net_sysctl(struct net * net,const char * path,struct ctl_table * table)398 static inline struct ctl_table_header *register_net_sysctl(struct net *net,
399 const char *path, struct ctl_table *table)
400 {
401 return NULL;
402 }
unregister_net_sysctl_table(struct ctl_table_header * header)403 static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
404 {
405 }
406 #endif
407
rt_genid_ipv4(struct net * net)408 static inline int rt_genid_ipv4(struct net *net)
409 {
410 return atomic_read(&net->ipv4.rt_genid);
411 }
412
rt_genid_bump_ipv4(struct net * net)413 static inline void rt_genid_bump_ipv4(struct net *net)
414 {
415 atomic_inc(&net->ipv4.rt_genid);
416 }
417
418 extern void (*__fib6_flush_trees)(struct net *net);
rt_genid_bump_ipv6(struct net * net)419 static inline void rt_genid_bump_ipv6(struct net *net)
420 {
421 if (__fib6_flush_trees)
422 __fib6_flush_trees(net);
423 }
424
425 #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
426 static inline struct netns_ieee802154_lowpan *
net_ieee802154_lowpan(struct net * net)427 net_ieee802154_lowpan(struct net *net)
428 {
429 return &net->ieee802154_lowpan;
430 }
431 #endif
432
433 /* For callers who don't really care about whether it's IPv4 or IPv6 */
rt_genid_bump_all(struct net * net)434 static inline void rt_genid_bump_all(struct net *net)
435 {
436 rt_genid_bump_ipv4(net);
437 rt_genid_bump_ipv6(net);
438 }
439
fnhe_genid(struct net * net)440 static inline int fnhe_genid(struct net *net)
441 {
442 return atomic_read(&net->fnhe_genid);
443 }
444
fnhe_genid_bump(struct net * net)445 static inline void fnhe_genid_bump(struct net *net)
446 {
447 atomic_inc(&net->fnhe_genid);
448 }
449
450 #endif /* __NET_NET_NAMESPACE_H */
451