1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
20 #include <net/netlink.h>
21 #include <net/net_namespace.h>
22 #include <net/netns/generic.h>
25 * Our network namespace constructor/destructor lists
28 static LIST_HEAD(pernet_list
);
29 static struct list_head
*first_device
= &pernet_list
;
30 DEFINE_MUTEX(net_mutex
);
32 LIST_HEAD(net_namespace_list
);
33 EXPORT_SYMBOL_GPL(net_namespace_list
);
35 struct net init_net
= {
36 .dev_base_head
= LIST_HEAD_INIT(init_net
.dev_base_head
),
38 EXPORT_SYMBOL(init_net
);
40 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
42 static unsigned int max_gen_ptrs
= INITIAL_NET_GEN_PTRS
;
44 static struct net_generic
*net_alloc_generic(void)
46 struct net_generic
*ng
;
47 size_t generic_size
= offsetof(struct net_generic
, ptr
[max_gen_ptrs
]);
49 ng
= kzalloc(generic_size
, GFP_KERNEL
);
51 ng
->len
= max_gen_ptrs
;
56 static int net_assign_generic(struct net
*net
, int id
, void *data
)
58 struct net_generic
*ng
, *old_ng
;
60 BUG_ON(!mutex_is_locked(&net_mutex
));
63 old_ng
= rcu_dereference_protected(net
->gen
,
64 lockdep_is_held(&net_mutex
));
66 if (old_ng
->len
>= id
)
69 ng
= net_alloc_generic();
74 * Some synchronisation notes:
76 * The net_generic explores the net->gen array inside rcu
77 * read section. Besides once set the net->gen->ptr[x]
78 * pointer never changes (see rules in netns/generic.h).
80 * That said, we simply duplicate this array and schedule
81 * the old copy for kfree after a grace period.
84 memcpy(&ng
->ptr
, &old_ng
->ptr
, old_ng
->len
* sizeof(void*));
86 rcu_assign_pointer(net
->gen
, ng
);
87 kfree_rcu(old_ng
, rcu
);
89 ng
->ptr
[id
- 1] = data
;
93 static int ops_init(const struct pernet_operations
*ops
, struct net
*net
)
98 if (ops
->id
&& ops
->size
) {
99 data
= kzalloc(ops
->size
, GFP_KERNEL
);
103 err
= net_assign_generic(net
, *ops
->id
, data
);
109 err
= ops
->init(net
);
120 static void ops_free(const struct pernet_operations
*ops
, struct net
*net
)
122 if (ops
->id
&& ops
->size
) {
124 kfree(net_generic(net
, id
));
128 static void ops_exit_list(const struct pernet_operations
*ops
,
129 struct list_head
*net_exit_list
)
133 list_for_each_entry(net
, net_exit_list
, exit_list
)
137 ops
->exit_batch(net_exit_list
);
140 static void ops_free_list(const struct pernet_operations
*ops
,
141 struct list_head
*net_exit_list
)
144 if (ops
->size
&& ops
->id
) {
145 list_for_each_entry(net
, net_exit_list
, exit_list
)
150 static void rtnl_net_notifyid(struct net
*net
, int cmd
, int id
);
151 static int alloc_netid(struct net
*net
, struct net
*peer
, int reqid
)
153 int min
= 0, max
= 0, id
;
162 id
= idr_alloc(&net
->netns_ids
, peer
, min
, max
, GFP_KERNEL
);
164 rtnl_net_notifyid(net
, RTM_NEWNSID
, id
);
169 /* This function is used by idr_for_each(). If net is equal to peer, the
170 * function returns the id so that idr_for_each() stops. Because we cannot
171 * returns the id 0 (idr_for_each() will not stop), we return the magic value
172 * NET_ID_ZERO (-1) for it.
174 #define NET_ID_ZERO -1
175 static int net_eq_idr(int id
, void *net
, void *peer
)
177 if (net_eq(net
, peer
))
178 return id
? : NET_ID_ZERO
;
182 static int __peernet2id(struct net
*net
, struct net
*peer
, bool alloc
)
184 int id
= idr_for_each(&net
->netns_ids
, net_eq_idr
, peer
);
188 /* Magic value for id 0. */
189 if (id
== NET_ID_ZERO
)
195 id
= alloc_netid(net
, peer
, -1);
196 return id
>= 0 ? id
: NETNSA_NSID_NOT_ASSIGNED
;
199 return NETNSA_NSID_NOT_ASSIGNED
;
202 /* This function returns the id of a peer netns. If no id is assigned, one will
203 * be allocated and returned.
205 int peernet2id_alloc(struct net
*net
, struct net
*peer
)
207 bool alloc
= atomic_read(&peer
->count
) == 0 ? false : true;
209 return __peernet2id(net
, peer
, alloc
);
211 EXPORT_SYMBOL(peernet2id_alloc
);
213 struct net
*get_net_ns_by_id(struct net
*net
, int id
)
221 peer
= idr_find(&net
->netns_ids
, id
);
230 * setup_net runs the initializers for the network namespace object.
232 static __net_init
int setup_net(struct net
*net
, struct user_namespace
*user_ns
)
234 /* Must be called with net_mutex held */
235 const struct pernet_operations
*ops
, *saved_ops
;
237 LIST_HEAD(net_exit_list
);
239 atomic_set(&net
->count
, 1);
240 atomic_set(&net
->passive
, 1);
241 net
->dev_base_seq
= 1;
242 net
->user_ns
= user_ns
;
243 idr_init(&net
->netns_ids
);
245 list_for_each_entry(ops
, &pernet_list
, list
) {
246 error
= ops_init(ops
, net
);
254 /* Walk through the list backwards calling the exit functions
255 * for the pernet modules whose init functions did not fail.
257 list_add(&net
->exit_list
, &net_exit_list
);
259 list_for_each_entry_continue_reverse(ops
, &pernet_list
, list
)
260 ops_exit_list(ops
, &net_exit_list
);
263 list_for_each_entry_continue_reverse(ops
, &pernet_list
, list
)
264 ops_free_list(ops
, &net_exit_list
);
272 static struct kmem_cache
*net_cachep
;
273 static struct workqueue_struct
*netns_wq
;
275 static struct net
*net_alloc(void)
277 struct net
*net
= NULL
;
278 struct net_generic
*ng
;
280 ng
= net_alloc_generic();
284 net
= kmem_cache_zalloc(net_cachep
, GFP_KERNEL
);
288 rcu_assign_pointer(net
->gen
, ng
);
297 static void net_free(struct net
*net
)
299 kfree(rcu_access_pointer(net
->gen
));
300 kmem_cache_free(net_cachep
, net
);
303 void net_drop_ns(void *p
)
306 if (ns
&& atomic_dec_and_test(&ns
->passive
))
310 struct net
*copy_net_ns(unsigned long flags
,
311 struct user_namespace
*user_ns
, struct net
*old_net
)
316 if (!(flags
& CLONE_NEWNET
))
317 return get_net(old_net
);
321 return ERR_PTR(-ENOMEM
);
323 get_user_ns(user_ns
);
325 mutex_lock(&net_mutex
);
326 rv
= setup_net(net
, user_ns
);
329 list_add_tail_rcu(&net
->list
, &net_namespace_list
);
332 mutex_unlock(&net_mutex
);
334 put_user_ns(user_ns
);
341 static DEFINE_SPINLOCK(cleanup_list_lock
);
342 static LIST_HEAD(cleanup_list
); /* Must hold cleanup_list_lock to touch */
344 static void cleanup_net(struct work_struct
*work
)
346 const struct pernet_operations
*ops
;
347 struct net
*net
, *tmp
;
348 struct list_head net_kill_list
;
349 LIST_HEAD(net_exit_list
);
351 /* Atomically snapshot the list of namespaces to cleanup */
352 spin_lock_irq(&cleanup_list_lock
);
353 list_replace_init(&cleanup_list
, &net_kill_list
);
354 spin_unlock_irq(&cleanup_list_lock
);
356 mutex_lock(&net_mutex
);
358 /* Don't let anyone else find us. */
360 list_for_each_entry(net
, &net_kill_list
, cleanup_list
) {
361 list_del_rcu(&net
->list
);
362 list_add_tail(&net
->exit_list
, &net_exit_list
);
364 int id
= __peernet2id(tmp
, net
, false);
367 rtnl_net_notifyid(tmp
, RTM_DELNSID
, id
);
368 idr_remove(&tmp
->netns_ids
, id
);
371 idr_destroy(&net
->netns_ids
);
377 * Another CPU might be rcu-iterating the list, wait for it.
378 * This needs to be before calling the exit() notifiers, so
379 * the rcu_barrier() below isn't sufficient alone.
383 /* Run all of the network namespace exit methods */
384 list_for_each_entry_reverse(ops
, &pernet_list
, list
)
385 ops_exit_list(ops
, &net_exit_list
);
387 /* Free the net generic variables */
388 list_for_each_entry_reverse(ops
, &pernet_list
, list
)
389 ops_free_list(ops
, &net_exit_list
);
391 mutex_unlock(&net_mutex
);
393 /* Ensure there are no outstanding rcu callbacks using this
398 /* Finally it is safe to free my network namespace structure */
399 list_for_each_entry_safe(net
, tmp
, &net_exit_list
, exit_list
) {
400 list_del_init(&net
->exit_list
);
401 put_user_ns(net
->user_ns
);
405 static DECLARE_WORK(net_cleanup_work
, cleanup_net
);
407 void __put_net(struct net
*net
)
409 /* Cleanup the network namespace in process context */
412 spin_lock_irqsave(&cleanup_list_lock
, flags
);
413 list_add(&net
->cleanup_list
, &cleanup_list
);
414 spin_unlock_irqrestore(&cleanup_list_lock
, flags
);
416 queue_work(netns_wq
, &net_cleanup_work
);
418 EXPORT_SYMBOL_GPL(__put_net
);
420 struct net
*get_net_ns_by_fd(int fd
)
423 struct ns_common
*ns
;
426 file
= proc_ns_fget(fd
);
428 return ERR_CAST(file
);
430 ns
= get_proc_ns(file_inode(file
));
431 if (ns
->ops
== &netns_operations
)
432 net
= get_net(container_of(ns
, struct net
, ns
));
434 net
= ERR_PTR(-EINVAL
);
441 struct net
*get_net_ns_by_fd(int fd
)
443 return ERR_PTR(-EINVAL
);
446 EXPORT_SYMBOL_GPL(get_net_ns_by_fd
);
448 struct net
*get_net_ns_by_pid(pid_t pid
)
450 struct task_struct
*tsk
;
453 /* Lookup the network namespace */
454 net
= ERR_PTR(-ESRCH
);
456 tsk
= find_task_by_vpid(pid
);
458 struct nsproxy
*nsproxy
;
460 nsproxy
= tsk
->nsproxy
;
462 net
= get_net(nsproxy
->net_ns
);
468 EXPORT_SYMBOL_GPL(get_net_ns_by_pid
);
470 static __net_init
int net_ns_net_init(struct net
*net
)
473 net
->ns
.ops
= &netns_operations
;
475 return ns_alloc_inum(&net
->ns
);
478 static __net_exit
void net_ns_net_exit(struct net
*net
)
480 ns_free_inum(&net
->ns
);
483 static struct pernet_operations __net_initdata net_ns_ops
= {
484 .init
= net_ns_net_init
,
485 .exit
= net_ns_net_exit
,
488 static struct nla_policy rtnl_net_policy
[NETNSA_MAX
+ 1] = {
489 [NETNSA_NONE
] = { .type
= NLA_UNSPEC
},
490 [NETNSA_NSID
] = { .type
= NLA_S32
},
491 [NETNSA_PID
] = { .type
= NLA_U32
},
492 [NETNSA_FD
] = { .type
= NLA_U32
},
495 static int rtnl_net_newid(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
497 struct net
*net
= sock_net(skb
->sk
);
498 struct nlattr
*tb
[NETNSA_MAX
+ 1];
502 err
= nlmsg_parse(nlh
, sizeof(struct rtgenmsg
), tb
, NETNSA_MAX
,
506 if (!tb
[NETNSA_NSID
])
508 nsid
= nla_get_s32(tb
[NETNSA_NSID
]);
511 peer
= get_net_ns_by_pid(nla_get_u32(tb
[NETNSA_PID
]));
512 else if (tb
[NETNSA_FD
])
513 peer
= get_net_ns_by_fd(nla_get_u32(tb
[NETNSA_FD
]));
517 return PTR_ERR(peer
);
519 if (__peernet2id(net
, peer
, false) >= 0) {
524 err
= alloc_netid(net
, peer
, nsid
);
532 static int rtnl_net_get_size(void)
534 return NLMSG_ALIGN(sizeof(struct rtgenmsg
))
535 + nla_total_size(sizeof(s32
)) /* NETNSA_NSID */
539 static int rtnl_net_fill(struct sk_buff
*skb
, u32 portid
, u32 seq
, int flags
,
540 int cmd
, struct net
*net
, int nsid
)
542 struct nlmsghdr
*nlh
;
543 struct rtgenmsg
*rth
;
545 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rth
), flags
);
549 rth
= nlmsg_data(nlh
);
550 rth
->rtgen_family
= AF_UNSPEC
;
552 if (nla_put_s32(skb
, NETNSA_NSID
, nsid
))
553 goto nla_put_failure
;
559 nlmsg_cancel(skb
, nlh
);
563 static int rtnl_net_getid(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
565 struct net
*net
= sock_net(skb
->sk
);
566 struct nlattr
*tb
[NETNSA_MAX
+ 1];
571 err
= nlmsg_parse(nlh
, sizeof(struct rtgenmsg
), tb
, NETNSA_MAX
,
576 peer
= get_net_ns_by_pid(nla_get_u32(tb
[NETNSA_PID
]));
577 else if (tb
[NETNSA_FD
])
578 peer
= get_net_ns_by_fd(nla_get_u32(tb
[NETNSA_FD
]));
583 return PTR_ERR(peer
);
585 msg
= nlmsg_new(rtnl_net_get_size(), GFP_KERNEL
);
591 id
= __peernet2id(net
, peer
, false);
592 err
= rtnl_net_fill(msg
, NETLINK_CB(skb
).portid
, nlh
->nlmsg_seq
, 0,
593 RTM_GETNSID
, net
, id
);
597 err
= rtnl_unicast(msg
, net
, NETLINK_CB(skb
).portid
);
607 struct rtnl_net_dump_cb
{
610 struct netlink_callback
*cb
;
615 static int rtnl_net_dumpid_one(int id
, void *peer
, void *data
)
617 struct rtnl_net_dump_cb
*net_cb
= (struct rtnl_net_dump_cb
*)data
;
620 if (net_cb
->idx
< net_cb
->s_idx
)
623 ret
= rtnl_net_fill(net_cb
->skb
, NETLINK_CB(net_cb
->cb
->skb
).portid
,
624 net_cb
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
625 RTM_NEWNSID
, net_cb
->net
, id
);
634 static int rtnl_net_dumpid(struct sk_buff
*skb
, struct netlink_callback
*cb
)
636 struct net
*net
= sock_net(skb
->sk
);
637 struct rtnl_net_dump_cb net_cb
= {
642 .s_idx
= cb
->args
[0],
647 idr_for_each(&net
->netns_ids
, rtnl_net_dumpid_one
, &net_cb
);
649 cb
->args
[0] = net_cb
.idx
;
653 static void rtnl_net_notifyid(struct net
*net
, int cmd
, int id
)
658 msg
= nlmsg_new(rtnl_net_get_size(), GFP_KERNEL
);
662 err
= rtnl_net_fill(msg
, 0, 0, 0, cmd
, net
, id
);
666 rtnl_notify(msg
, net
, 0, RTNLGRP_NSID
, NULL
, 0);
672 rtnl_set_sk_err(net
, RTNLGRP_NSID
, err
);
675 static int __init
net_ns_init(void)
677 struct net_generic
*ng
;
680 net_cachep
= kmem_cache_create("net_namespace", sizeof(struct net
),
684 /* Create workqueue for cleanup */
685 netns_wq
= create_singlethread_workqueue("netns");
687 panic("Could not create netns workq");
690 ng
= net_alloc_generic();
692 panic("Could not allocate generic netns");
694 rcu_assign_pointer(init_net
.gen
, ng
);
696 mutex_lock(&net_mutex
);
697 if (setup_net(&init_net
, &init_user_ns
))
698 panic("Could not setup the initial network namespace");
701 list_add_tail_rcu(&init_net
.list
, &net_namespace_list
);
704 mutex_unlock(&net_mutex
);
706 register_pernet_subsys(&net_ns_ops
);
708 rtnl_register(PF_UNSPEC
, RTM_NEWNSID
, rtnl_net_newid
, NULL
, NULL
);
709 rtnl_register(PF_UNSPEC
, RTM_GETNSID
, rtnl_net_getid
, rtnl_net_dumpid
,
715 pure_initcall(net_ns_init
);
718 static int __register_pernet_operations(struct list_head
*list
,
719 struct pernet_operations
*ops
)
723 LIST_HEAD(net_exit_list
);
725 list_add_tail(&ops
->list
, list
);
726 if (ops
->init
|| (ops
->id
&& ops
->size
)) {
728 error
= ops_init(ops
, net
);
731 list_add_tail(&net
->exit_list
, &net_exit_list
);
737 /* If I have an error cleanup all namespaces I initialized */
738 list_del(&ops
->list
);
739 ops_exit_list(ops
, &net_exit_list
);
740 ops_free_list(ops
, &net_exit_list
);
744 static void __unregister_pernet_operations(struct pernet_operations
*ops
)
747 LIST_HEAD(net_exit_list
);
749 list_del(&ops
->list
);
751 list_add_tail(&net
->exit_list
, &net_exit_list
);
752 ops_exit_list(ops
, &net_exit_list
);
753 ops_free_list(ops
, &net_exit_list
);
758 static int __register_pernet_operations(struct list_head
*list
,
759 struct pernet_operations
*ops
)
761 return ops_init(ops
, &init_net
);
764 static void __unregister_pernet_operations(struct pernet_operations
*ops
)
766 LIST_HEAD(net_exit_list
);
767 list_add(&init_net
.exit_list
, &net_exit_list
);
768 ops_exit_list(ops
, &net_exit_list
);
769 ops_free_list(ops
, &net_exit_list
);
772 #endif /* CONFIG_NET_NS */
774 static DEFINE_IDA(net_generic_ids
);
776 static int register_pernet_operations(struct list_head
*list
,
777 struct pernet_operations
*ops
)
783 error
= ida_get_new_above(&net_generic_ids
, 1, ops
->id
);
785 if (error
== -EAGAIN
) {
786 ida_pre_get(&net_generic_ids
, GFP_KERNEL
);
791 max_gen_ptrs
= max_t(unsigned int, max_gen_ptrs
, *ops
->id
);
793 error
= __register_pernet_operations(list
, ops
);
797 ida_remove(&net_generic_ids
, *ops
->id
);
803 static void unregister_pernet_operations(struct pernet_operations
*ops
)
806 __unregister_pernet_operations(ops
);
809 ida_remove(&net_generic_ids
, *ops
->id
);
813 * register_pernet_subsys - register a network namespace subsystem
814 * @ops: pernet operations structure for the subsystem
816 * Register a subsystem which has init and exit functions
817 * that are called when network namespaces are created and
818 * destroyed respectively.
820 * When registered all network namespace init functions are
821 * called for every existing network namespace. Allowing kernel
822 * modules to have a race free view of the set of network namespaces.
824 * When a new network namespace is created all of the init
825 * methods are called in the order in which they were registered.
827 * When a network namespace is destroyed all of the exit methods
828 * are called in the reverse of the order with which they were
831 int register_pernet_subsys(struct pernet_operations
*ops
)
834 mutex_lock(&net_mutex
);
835 error
= register_pernet_operations(first_device
, ops
);
836 mutex_unlock(&net_mutex
);
839 EXPORT_SYMBOL_GPL(register_pernet_subsys
);
842 * unregister_pernet_subsys - unregister a network namespace subsystem
843 * @ops: pernet operations structure to manipulate
845 * Remove the pernet operations structure from the list to be
846 * used when network namespaces are created or destroyed. In
847 * addition run the exit method for all existing network
850 void unregister_pernet_subsys(struct pernet_operations
*ops
)
852 mutex_lock(&net_mutex
);
853 unregister_pernet_operations(ops
);
854 mutex_unlock(&net_mutex
);
856 EXPORT_SYMBOL_GPL(unregister_pernet_subsys
);
859 * register_pernet_device - register a network namespace device
860 * @ops: pernet operations structure for the subsystem
862 * Register a device which has init and exit functions
863 * that are called when network namespaces are created and
864 * destroyed respectively.
866 * When registered all network namespace init functions are
867 * called for every existing network namespace. Allowing kernel
868 * modules to have a race free view of the set of network namespaces.
870 * When a new network namespace is created all of the init
871 * methods are called in the order in which they were registered.
873 * When a network namespace is destroyed all of the exit methods
874 * are called in the reverse of the order with which they were
877 int register_pernet_device(struct pernet_operations
*ops
)
880 mutex_lock(&net_mutex
);
881 error
= register_pernet_operations(&pernet_list
, ops
);
882 if (!error
&& (first_device
== &pernet_list
))
883 first_device
= &ops
->list
;
884 mutex_unlock(&net_mutex
);
887 EXPORT_SYMBOL_GPL(register_pernet_device
);
890 * unregister_pernet_device - unregister a network namespace netdevice
891 * @ops: pernet operations structure to manipulate
893 * Remove the pernet operations structure from the list to be
894 * used when network namespaces are created or destroyed. In
895 * addition run the exit method for all existing network
898 void unregister_pernet_device(struct pernet_operations
*ops
)
900 mutex_lock(&net_mutex
);
901 if (&ops
->list
== first_device
)
902 first_device
= first_device
->next
;
903 unregister_pernet_operations(ops
);
904 mutex_unlock(&net_mutex
);
906 EXPORT_SYMBOL_GPL(unregister_pernet_device
);
909 static struct ns_common
*netns_get(struct task_struct
*task
)
911 struct net
*net
= NULL
;
912 struct nsproxy
*nsproxy
;
915 nsproxy
= task
->nsproxy
;
917 net
= get_net(nsproxy
->net_ns
);
920 return net
? &net
->ns
: NULL
;
923 static inline struct net
*to_net_ns(struct ns_common
*ns
)
925 return container_of(ns
, struct net
, ns
);
928 static void netns_put(struct ns_common
*ns
)
930 put_net(to_net_ns(ns
));
933 static int netns_install(struct nsproxy
*nsproxy
, struct ns_common
*ns
)
935 struct net
*net
= to_net_ns(ns
);
937 if (!ns_capable(net
->user_ns
, CAP_SYS_ADMIN
) ||
938 !ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
941 put_net(nsproxy
->net_ns
);
942 nsproxy
->net_ns
= get_net(net
);
946 const struct proc_ns_operations netns_operations
= {
948 .type
= CLONE_NEWNET
,
951 .install
= netns_install
,