2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <linux/rhashtable.h>
62 #include <asm/cacheflush.h>
63 #include <linux/hash.h>
64 #include <linux/genetlink.h>
66 #include <net/net_namespace.h>
69 #include <net/netlink.h>
71 #include "af_netlink.h"
75 unsigned long masks
[0];
79 #define NETLINK_S_CONGESTED 0x0
82 #define NETLINK_F_KERNEL_SOCKET 0x1
83 #define NETLINK_F_RECV_PKTINFO 0x2
84 #define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85 #define NETLINK_F_RECV_NO_ENOBUFS 0x8
86 #define NETLINK_F_LISTEN_ALL_NSID 0x10
87 #define NETLINK_F_CAP_ACK 0x20
89 static inline int netlink_is_kernel(struct sock
*sk
)
91 return nlk_sk(sk
)->flags
& NETLINK_F_KERNEL_SOCKET
;
94 struct netlink_table
*nl_table __read_mostly
;
95 EXPORT_SYMBOL_GPL(nl_table
);
97 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait
);
99 static int netlink_dump(struct sock
*sk
);
100 static void netlink_skb_destructor(struct sk_buff
*skb
);
102 /* nl_table locking explained:
103 * Lookup and traversal are protected with an RCU read-side lock. Insertion
104 * and removal are protected with per bucket lock while using RCU list
105 * modification primitives and may run in parallel to RCU protected lookups.
106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
107 * been acquired * either during or after the socket has been removed from
108 * the list and after an RCU grace period.
110 DEFINE_RWLOCK(nl_table_lock
);
111 EXPORT_SYMBOL_GPL(nl_table_lock
);
112 static atomic_t nl_table_users
= ATOMIC_INIT(0);
114 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
116 static ATOMIC_NOTIFIER_HEAD(netlink_chain
);
118 static DEFINE_SPINLOCK(netlink_tap_lock
);
119 static struct list_head netlink_tap_all __read_mostly
;
121 static const struct rhashtable_params netlink_rhashtable_params
;
123 static inline u32
netlink_group_mask(u32 group
)
125 return group
? 1 << (group
- 1) : 0;
128 int netlink_add_tap(struct netlink_tap
*nt
)
130 if (unlikely(nt
->dev
->type
!= ARPHRD_NETLINK
))
133 spin_lock(&netlink_tap_lock
);
134 list_add_rcu(&nt
->list
, &netlink_tap_all
);
135 spin_unlock(&netlink_tap_lock
);
137 __module_get(nt
->module
);
141 EXPORT_SYMBOL_GPL(netlink_add_tap
);
143 static int __netlink_remove_tap(struct netlink_tap
*nt
)
146 struct netlink_tap
*tmp
;
148 spin_lock(&netlink_tap_lock
);
150 list_for_each_entry(tmp
, &netlink_tap_all
, list
) {
152 list_del_rcu(&nt
->list
);
158 pr_warn("__netlink_remove_tap: %p not found\n", nt
);
160 spin_unlock(&netlink_tap_lock
);
163 module_put(nt
->module
);
165 return found
? 0 : -ENODEV
;
168 int netlink_remove_tap(struct netlink_tap
*nt
)
172 ret
= __netlink_remove_tap(nt
);
177 EXPORT_SYMBOL_GPL(netlink_remove_tap
);
179 static bool netlink_filter_tap(const struct sk_buff
*skb
)
181 struct sock
*sk
= skb
->sk
;
183 /* We take the more conservative approach and
184 * whitelist socket protocols that may pass.
186 switch (sk
->sk_protocol
) {
188 case NETLINK_USERSOCK
:
189 case NETLINK_SOCK_DIAG
:
192 case NETLINK_FIB_LOOKUP
:
193 case NETLINK_NETFILTER
:
194 case NETLINK_GENERIC
:
201 static int __netlink_deliver_tap_skb(struct sk_buff
*skb
,
202 struct net_device
*dev
)
204 struct sk_buff
*nskb
;
205 struct sock
*sk
= skb
->sk
;
209 nskb
= skb_clone(skb
, GFP_ATOMIC
);
212 nskb
->protocol
= htons((u16
) sk
->sk_protocol
);
213 nskb
->pkt_type
= netlink_is_kernel(sk
) ?
214 PACKET_KERNEL
: PACKET_USER
;
215 skb_reset_network_header(nskb
);
216 ret
= dev_queue_xmit(nskb
);
217 if (unlikely(ret
> 0))
218 ret
= net_xmit_errno(ret
);
225 static void __netlink_deliver_tap(struct sk_buff
*skb
)
228 struct netlink_tap
*tmp
;
230 if (!netlink_filter_tap(skb
))
233 list_for_each_entry_rcu(tmp
, &netlink_tap_all
, list
) {
234 ret
= __netlink_deliver_tap_skb(skb
, tmp
->dev
);
240 static void netlink_deliver_tap(struct sk_buff
*skb
)
244 if (unlikely(!list_empty(&netlink_tap_all
)))
245 __netlink_deliver_tap(skb
);
250 static void netlink_deliver_tap_kernel(struct sock
*dst
, struct sock
*src
,
253 if (!(netlink_is_kernel(dst
) && netlink_is_kernel(src
)))
254 netlink_deliver_tap(skb
);
257 static void netlink_overrun(struct sock
*sk
)
259 struct netlink_sock
*nlk
= nlk_sk(sk
);
261 if (!(nlk
->flags
& NETLINK_F_RECV_NO_ENOBUFS
)) {
262 if (!test_and_set_bit(NETLINK_S_CONGESTED
,
263 &nlk_sk(sk
)->state
)) {
264 sk
->sk_err
= ENOBUFS
;
265 sk
->sk_error_report(sk
);
268 atomic_inc(&sk
->sk_drops
);
271 static void netlink_rcv_wake(struct sock
*sk
)
273 struct netlink_sock
*nlk
= nlk_sk(sk
);
275 if (skb_queue_empty(&sk
->sk_receive_queue
))
276 clear_bit(NETLINK_S_CONGESTED
, &nlk
->state
);
277 if (!test_bit(NETLINK_S_CONGESTED
, &nlk
->state
))
278 wake_up_interruptible(&nlk
->wait
);
281 #ifdef CONFIG_NETLINK_MMAP
282 static bool netlink_skb_is_mmaped(const struct sk_buff
*skb
)
284 return NETLINK_CB(skb
).flags
& NETLINK_SKB_MMAPED
;
287 static bool netlink_rx_is_mmaped(struct sock
*sk
)
289 return nlk_sk(sk
)->rx_ring
.pg_vec
!= NULL
;
292 static bool netlink_tx_is_mmaped(struct sock
*sk
)
294 return nlk_sk(sk
)->tx_ring
.pg_vec
!= NULL
;
297 static __pure
struct page
*pgvec_to_page(const void *addr
)
299 if (is_vmalloc_addr(addr
))
300 return vmalloc_to_page(addr
);
302 return virt_to_page(addr
);
305 static void free_pg_vec(void **pg_vec
, unsigned int order
, unsigned int len
)
309 for (i
= 0; i
< len
; i
++) {
310 if (pg_vec
[i
] != NULL
) {
311 if (is_vmalloc_addr(pg_vec
[i
]))
314 free_pages((unsigned long)pg_vec
[i
], order
);
320 static void *alloc_one_pg_vec_page(unsigned long order
)
323 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_COMP
| __GFP_ZERO
|
324 __GFP_NOWARN
| __GFP_NORETRY
;
326 buffer
= (void *)__get_free_pages(gfp_flags
, order
);
330 buffer
= vzalloc((1 << order
) * PAGE_SIZE
);
334 gfp_flags
&= ~__GFP_NORETRY
;
335 return (void *)__get_free_pages(gfp_flags
, order
);
338 static void **alloc_pg_vec(struct netlink_sock
*nlk
,
339 struct nl_mmap_req
*req
, unsigned int order
)
341 unsigned int block_nr
= req
->nm_block_nr
;
345 pg_vec
= kcalloc(block_nr
, sizeof(void *), GFP_KERNEL
);
349 for (i
= 0; i
< block_nr
; i
++) {
350 pg_vec
[i
] = alloc_one_pg_vec_page(order
);
351 if (pg_vec
[i
] == NULL
)
357 free_pg_vec(pg_vec
, order
, block_nr
);
363 __netlink_set_ring(struct sock
*sk
, struct nl_mmap_req
*req
, bool tx_ring
, void **pg_vec
,
366 struct netlink_sock
*nlk
= nlk_sk(sk
);
367 struct sk_buff_head
*queue
;
368 struct netlink_ring
*ring
;
370 queue
= tx_ring
? &sk
->sk_write_queue
: &sk
->sk_receive_queue
;
371 ring
= tx_ring
? &nlk
->tx_ring
: &nlk
->rx_ring
;
373 spin_lock_bh(&queue
->lock
);
375 ring
->frame_max
= req
->nm_frame_nr
- 1;
377 ring
->frame_size
= req
->nm_frame_size
;
378 ring
->pg_vec_pages
= req
->nm_block_size
/ PAGE_SIZE
;
380 swap(ring
->pg_vec_len
, req
->nm_block_nr
);
381 swap(ring
->pg_vec_order
, order
);
382 swap(ring
->pg_vec
, pg_vec
);
384 __skb_queue_purge(queue
);
385 spin_unlock_bh(&queue
->lock
);
387 WARN_ON(atomic_read(&nlk
->mapped
));
390 free_pg_vec(pg_vec
, order
, req
->nm_block_nr
);
393 static int netlink_set_ring(struct sock
*sk
, struct nl_mmap_req
*req
,
396 struct netlink_sock
*nlk
= nlk_sk(sk
);
397 struct netlink_ring
*ring
;
398 void **pg_vec
= NULL
;
399 unsigned int order
= 0;
401 ring
= tx_ring
? &nlk
->tx_ring
: &nlk
->rx_ring
;
403 if (atomic_read(&nlk
->mapped
))
405 if (atomic_read(&ring
->pending
))
408 if (req
->nm_block_nr
) {
409 if (ring
->pg_vec
!= NULL
)
412 if ((int)req
->nm_block_size
<= 0)
414 if (!PAGE_ALIGNED(req
->nm_block_size
))
416 if (req
->nm_frame_size
< NL_MMAP_HDRLEN
)
418 if (!IS_ALIGNED(req
->nm_frame_size
, NL_MMAP_MSG_ALIGNMENT
))
421 ring
->frames_per_block
= req
->nm_block_size
/
423 if (ring
->frames_per_block
== 0)
425 if (ring
->frames_per_block
* req
->nm_block_nr
!=
429 order
= get_order(req
->nm_block_size
);
430 pg_vec
= alloc_pg_vec(nlk
, req
, order
);
434 if (req
->nm_frame_nr
)
438 mutex_lock(&nlk
->pg_vec_lock
);
439 if (atomic_read(&nlk
->mapped
) == 0) {
440 __netlink_set_ring(sk
, req
, tx_ring
, pg_vec
, order
);
441 mutex_unlock(&nlk
->pg_vec_lock
);
445 mutex_unlock(&nlk
->pg_vec_lock
);
448 free_pg_vec(pg_vec
, order
, req
->nm_block_nr
);
453 static void netlink_mm_open(struct vm_area_struct
*vma
)
455 struct file
*file
= vma
->vm_file
;
456 struct socket
*sock
= file
->private_data
;
457 struct sock
*sk
= sock
->sk
;
460 atomic_inc(&nlk_sk(sk
)->mapped
);
463 static void netlink_mm_close(struct vm_area_struct
*vma
)
465 struct file
*file
= vma
->vm_file
;
466 struct socket
*sock
= file
->private_data
;
467 struct sock
*sk
= sock
->sk
;
470 atomic_dec(&nlk_sk(sk
)->mapped
);
473 static const struct vm_operations_struct netlink_mmap_ops
= {
474 .open
= netlink_mm_open
,
475 .close
= netlink_mm_close
,
478 static int netlink_mmap(struct file
*file
, struct socket
*sock
,
479 struct vm_area_struct
*vma
)
481 struct sock
*sk
= sock
->sk
;
482 struct netlink_sock
*nlk
= nlk_sk(sk
);
483 struct netlink_ring
*ring
;
484 unsigned long start
, size
, expected
;
491 mutex_lock(&nlk
->pg_vec_lock
);
494 for (ring
= &nlk
->rx_ring
; ring
<= &nlk
->tx_ring
; ring
++) {
495 if (ring
->pg_vec
== NULL
)
497 expected
+= ring
->pg_vec_len
* ring
->pg_vec_pages
* PAGE_SIZE
;
503 size
= vma
->vm_end
- vma
->vm_start
;
504 if (size
!= expected
)
507 start
= vma
->vm_start
;
508 for (ring
= &nlk
->rx_ring
; ring
<= &nlk
->tx_ring
; ring
++) {
509 if (ring
->pg_vec
== NULL
)
512 for (i
= 0; i
< ring
->pg_vec_len
; i
++) {
514 void *kaddr
= ring
->pg_vec
[i
];
517 for (pg_num
= 0; pg_num
< ring
->pg_vec_pages
; pg_num
++) {
518 page
= pgvec_to_page(kaddr
);
519 err
= vm_insert_page(vma
, start
, page
);
528 atomic_inc(&nlk
->mapped
);
529 vma
->vm_ops
= &netlink_mmap_ops
;
532 mutex_unlock(&nlk
->pg_vec_lock
);
536 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr
*hdr
, unsigned int nm_len
)
538 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
539 struct page
*p_start
, *p_end
;
541 /* First page is flushed through netlink_{get,set}_status */
542 p_start
= pgvec_to_page(hdr
+ PAGE_SIZE
);
543 p_end
= pgvec_to_page((void *)hdr
+ NL_MMAP_HDRLEN
+ nm_len
- 1);
544 while (p_start
<= p_end
) {
545 flush_dcache_page(p_start
);
551 static enum nl_mmap_status
netlink_get_status(const struct nl_mmap_hdr
*hdr
)
554 flush_dcache_page(pgvec_to_page(hdr
));
555 return hdr
->nm_status
;
558 static void netlink_set_status(struct nl_mmap_hdr
*hdr
,
559 enum nl_mmap_status status
)
562 hdr
->nm_status
= status
;
563 flush_dcache_page(pgvec_to_page(hdr
));
566 static struct nl_mmap_hdr
*
567 __netlink_lookup_frame(const struct netlink_ring
*ring
, unsigned int pos
)
569 unsigned int pg_vec_pos
, frame_off
;
571 pg_vec_pos
= pos
/ ring
->frames_per_block
;
572 frame_off
= pos
% ring
->frames_per_block
;
574 return ring
->pg_vec
[pg_vec_pos
] + (frame_off
* ring
->frame_size
);
577 static struct nl_mmap_hdr
*
578 netlink_lookup_frame(const struct netlink_ring
*ring
, unsigned int pos
,
579 enum nl_mmap_status status
)
581 struct nl_mmap_hdr
*hdr
;
583 hdr
= __netlink_lookup_frame(ring
, pos
);
584 if (netlink_get_status(hdr
) != status
)
590 static struct nl_mmap_hdr
*
591 netlink_current_frame(const struct netlink_ring
*ring
,
592 enum nl_mmap_status status
)
594 return netlink_lookup_frame(ring
, ring
->head
, status
);
597 static void netlink_increment_head(struct netlink_ring
*ring
)
599 ring
->head
= ring
->head
!= ring
->frame_max
? ring
->head
+ 1 : 0;
602 static void netlink_forward_ring(struct netlink_ring
*ring
)
604 unsigned int head
= ring
->head
;
605 const struct nl_mmap_hdr
*hdr
;
608 hdr
= __netlink_lookup_frame(ring
, ring
->head
);
609 if (hdr
->nm_status
== NL_MMAP_STATUS_UNUSED
)
611 if (hdr
->nm_status
!= NL_MMAP_STATUS_SKIP
)
613 netlink_increment_head(ring
);
614 } while (ring
->head
!= head
);
617 static bool netlink_has_valid_frame(struct netlink_ring
*ring
)
619 unsigned int head
= ring
->head
, pos
= head
;
620 const struct nl_mmap_hdr
*hdr
;
623 hdr
= __netlink_lookup_frame(ring
, pos
);
624 if (hdr
->nm_status
== NL_MMAP_STATUS_VALID
)
626 pos
= pos
!= 0 ? pos
- 1 : ring
->frame_max
;
627 } while (pos
!= head
);
632 static bool netlink_dump_space(struct netlink_sock
*nlk
)
634 struct netlink_ring
*ring
= &nlk
->rx_ring
;
635 struct nl_mmap_hdr
*hdr
;
638 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
642 n
= ring
->head
+ ring
->frame_max
/ 2;
643 if (n
> ring
->frame_max
)
644 n
-= ring
->frame_max
;
646 hdr
= __netlink_lookup_frame(ring
, n
);
648 return hdr
->nm_status
== NL_MMAP_STATUS_UNUSED
;
651 static unsigned int netlink_poll(struct file
*file
, struct socket
*sock
,
654 struct sock
*sk
= sock
->sk
;
655 struct netlink_sock
*nlk
= nlk_sk(sk
);
659 if (nlk
->rx_ring
.pg_vec
!= NULL
) {
660 /* Memory mapped sockets don't call recvmsg(), so flow control
661 * for dumps is performed here. A dump is allowed to continue
662 * if at least half the ring is unused.
664 while (nlk
->cb_running
&& netlink_dump_space(nlk
)) {
665 err
= netlink_dump(sk
);
668 sk
->sk_error_report(sk
);
672 netlink_rcv_wake(sk
);
675 mask
= datagram_poll(file
, sock
, wait
);
677 /* We could already have received frames in the normal receive
678 * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
679 * so if mask contains pollin/etc already, there's no point
682 if ((mask
& (POLLIN
| POLLRDNORM
)) != (POLLIN
| POLLRDNORM
)) {
683 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
684 if (nlk
->rx_ring
.pg_vec
) {
685 if (netlink_has_valid_frame(&nlk
->rx_ring
))
686 mask
|= POLLIN
| POLLRDNORM
;
688 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
691 spin_lock_bh(&sk
->sk_write_queue
.lock
);
692 if (nlk
->tx_ring
.pg_vec
) {
693 if (netlink_current_frame(&nlk
->tx_ring
, NL_MMAP_STATUS_UNUSED
))
694 mask
|= POLLOUT
| POLLWRNORM
;
696 spin_unlock_bh(&sk
->sk_write_queue
.lock
);
701 static struct nl_mmap_hdr
*netlink_mmap_hdr(struct sk_buff
*skb
)
703 return (struct nl_mmap_hdr
*)(skb
->head
- NL_MMAP_HDRLEN
);
706 static void netlink_ring_setup_skb(struct sk_buff
*skb
, struct sock
*sk
,
707 struct netlink_ring
*ring
,
708 struct nl_mmap_hdr
*hdr
)
713 size
= ring
->frame_size
- NL_MMAP_HDRLEN
;
714 data
= (void *)hdr
+ NL_MMAP_HDRLEN
;
718 skb_reset_tail_pointer(skb
);
719 skb
->end
= skb
->tail
+ size
;
722 skb
->destructor
= netlink_skb_destructor
;
723 NETLINK_CB(skb
).flags
|= NETLINK_SKB_MMAPED
;
724 NETLINK_CB(skb
).sk
= sk
;
727 static int netlink_mmap_sendmsg(struct sock
*sk
, struct msghdr
*msg
,
728 u32 dst_portid
, u32 dst_group
,
729 struct scm_cookie
*scm
)
731 struct netlink_sock
*nlk
= nlk_sk(sk
);
732 struct netlink_ring
*ring
;
733 struct nl_mmap_hdr
*hdr
;
736 int err
= 0, len
= 0;
738 mutex_lock(&nlk
->pg_vec_lock
);
740 ring
= &nlk
->tx_ring
;
741 maxlen
= ring
->frame_size
- NL_MMAP_HDRLEN
;
746 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_VALID
);
748 if (!(msg
->msg_flags
& MSG_DONTWAIT
) &&
749 atomic_read(&nlk
->tx_ring
.pending
))
754 nm_len
= ACCESS_ONCE(hdr
->nm_len
);
755 if (nm_len
> maxlen
) {
760 netlink_frame_flush_dcache(hdr
, nm_len
);
762 skb
= alloc_skb(nm_len
, GFP_KERNEL
);
767 __skb_put(skb
, nm_len
);
768 memcpy(skb
->data
, (void *)hdr
+ NL_MMAP_HDRLEN
, nm_len
);
769 netlink_set_status(hdr
, NL_MMAP_STATUS_UNUSED
);
771 netlink_increment_head(ring
);
773 NETLINK_CB(skb
).portid
= nlk
->portid
;
774 NETLINK_CB(skb
).dst_group
= dst_group
;
775 NETLINK_CB(skb
).creds
= scm
->creds
;
777 err
= security_netlink_send(sk
, skb
);
783 if (unlikely(dst_group
)) {
784 atomic_inc(&skb
->users
);
785 netlink_broadcast(sk
, skb
, dst_portid
, dst_group
,
788 err
= netlink_unicast(sk
, skb
, dst_portid
,
789 msg
->msg_flags
& MSG_DONTWAIT
);
794 } while (hdr
!= NULL
||
795 (!(msg
->msg_flags
& MSG_DONTWAIT
) &&
796 atomic_read(&nlk
->tx_ring
.pending
)));
801 mutex_unlock(&nlk
->pg_vec_lock
);
805 static void netlink_queue_mmaped_skb(struct sock
*sk
, struct sk_buff
*skb
)
807 struct nl_mmap_hdr
*hdr
;
809 hdr
= netlink_mmap_hdr(skb
);
810 hdr
->nm_len
= skb
->len
;
811 hdr
->nm_group
= NETLINK_CB(skb
).dst_group
;
812 hdr
->nm_pid
= NETLINK_CB(skb
).creds
.pid
;
813 hdr
->nm_uid
= from_kuid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.uid
);
814 hdr
->nm_gid
= from_kgid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.gid
);
815 netlink_frame_flush_dcache(hdr
, hdr
->nm_len
);
816 netlink_set_status(hdr
, NL_MMAP_STATUS_VALID
);
818 NETLINK_CB(skb
).flags
|= NETLINK_SKB_DELIVERED
;
822 static void netlink_ring_set_copied(struct sock
*sk
, struct sk_buff
*skb
)
824 struct netlink_sock
*nlk
= nlk_sk(sk
);
825 struct netlink_ring
*ring
= &nlk
->rx_ring
;
826 struct nl_mmap_hdr
*hdr
;
828 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
829 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
831 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
836 netlink_increment_head(ring
);
837 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
838 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
840 hdr
->nm_len
= skb
->len
;
841 hdr
->nm_group
= NETLINK_CB(skb
).dst_group
;
842 hdr
->nm_pid
= NETLINK_CB(skb
).creds
.pid
;
843 hdr
->nm_uid
= from_kuid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.uid
);
844 hdr
->nm_gid
= from_kgid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.gid
);
845 netlink_set_status(hdr
, NL_MMAP_STATUS_COPY
);
848 #else /* CONFIG_NETLINK_MMAP */
849 #define netlink_skb_is_mmaped(skb) false
850 #define netlink_rx_is_mmaped(sk) false
851 #define netlink_tx_is_mmaped(sk) false
852 #define netlink_mmap sock_no_mmap
853 #define netlink_poll datagram_poll
854 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
855 #endif /* CONFIG_NETLINK_MMAP */
857 static void netlink_skb_destructor(struct sk_buff
*skb
)
859 #ifdef CONFIG_NETLINK_MMAP
860 struct nl_mmap_hdr
*hdr
;
861 struct netlink_ring
*ring
;
864 /* If a packet from the kernel to userspace was freed because of an
865 * error without being delivered to userspace, the kernel must reset
866 * the status. In the direction userspace to kernel, the status is
867 * always reset here after the packet was processed and freed.
869 if (netlink_skb_is_mmaped(skb
)) {
870 hdr
= netlink_mmap_hdr(skb
);
871 sk
= NETLINK_CB(skb
).sk
;
873 if (NETLINK_CB(skb
).flags
& NETLINK_SKB_TX
) {
874 netlink_set_status(hdr
, NL_MMAP_STATUS_UNUSED
);
875 ring
= &nlk_sk(sk
)->tx_ring
;
877 if (!(NETLINK_CB(skb
).flags
& NETLINK_SKB_DELIVERED
)) {
879 netlink_set_status(hdr
, NL_MMAP_STATUS_VALID
);
881 ring
= &nlk_sk(sk
)->rx_ring
;
884 WARN_ON(atomic_read(&ring
->pending
) == 0);
885 atomic_dec(&ring
->pending
);
891 if (is_vmalloc_addr(skb
->head
)) {
893 !atomic_dec_return(&(skb_shinfo(skb
)->dataref
)))
902 static void netlink_skb_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
904 WARN_ON(skb
->sk
!= NULL
);
906 skb
->destructor
= netlink_skb_destructor
;
907 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
908 sk_mem_charge(sk
, skb
->truesize
);
911 static void netlink_sock_destruct(struct sock
*sk
)
913 struct netlink_sock
*nlk
= nlk_sk(sk
);
915 if (nlk
->cb_running
) {
917 nlk
->cb
.done(&nlk
->cb
);
919 module_put(nlk
->cb
.module
);
920 kfree_skb(nlk
->cb
.skb
);
923 skb_queue_purge(&sk
->sk_receive_queue
);
924 #ifdef CONFIG_NETLINK_MMAP
926 struct nl_mmap_req req
;
928 memset(&req
, 0, sizeof(req
));
929 if (nlk
->rx_ring
.pg_vec
)
930 __netlink_set_ring(sk
, &req
, false, NULL
, 0);
931 memset(&req
, 0, sizeof(req
));
932 if (nlk
->tx_ring
.pg_vec
)
933 __netlink_set_ring(sk
, &req
, true, NULL
, 0);
935 #endif /* CONFIG_NETLINK_MMAP */
937 if (!sock_flag(sk
, SOCK_DEAD
)) {
938 printk(KERN_ERR
"Freeing alive netlink socket %p\n", sk
);
942 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
943 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
944 WARN_ON(nlk_sk(sk
)->groups
);
947 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
948 * SMP. Look, when several writers sleep and reader wakes them up, all but one
949 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
950 * this, _but_ remember, it adds useless work on UP machines.
953 void netlink_table_grab(void)
954 __acquires(nl_table_lock
)
958 write_lock_irq(&nl_table_lock
);
960 if (atomic_read(&nl_table_users
)) {
961 DECLARE_WAITQUEUE(wait
, current
);
963 add_wait_queue_exclusive(&nl_table_wait
, &wait
);
965 set_current_state(TASK_UNINTERRUPTIBLE
);
966 if (atomic_read(&nl_table_users
) == 0)
968 write_unlock_irq(&nl_table_lock
);
970 write_lock_irq(&nl_table_lock
);
973 __set_current_state(TASK_RUNNING
);
974 remove_wait_queue(&nl_table_wait
, &wait
);
978 void netlink_table_ungrab(void)
979 __releases(nl_table_lock
)
981 write_unlock_irq(&nl_table_lock
);
982 wake_up(&nl_table_wait
);
986 netlink_lock_table(void)
988 /* read_lock() synchronizes us to netlink_table_grab */
990 read_lock(&nl_table_lock
);
991 atomic_inc(&nl_table_users
);
992 read_unlock(&nl_table_lock
);
996 netlink_unlock_table(void)
998 if (atomic_dec_and_test(&nl_table_users
))
999 wake_up(&nl_table_wait
);
1002 struct netlink_compare_arg
1004 possible_net_t pnet
;
1008 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1009 #define netlink_compare_arg_len \
1010 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
1012 static inline int netlink_compare(struct rhashtable_compare_arg
*arg
,
1015 const struct netlink_compare_arg
*x
= arg
->key
;
1016 const struct netlink_sock
*nlk
= ptr
;
1018 return nlk
->portid
!= x
->portid
||
1019 !net_eq(sock_net(&nlk
->sk
), read_pnet(&x
->pnet
));
1022 static void netlink_compare_arg_init(struct netlink_compare_arg
*arg
,
1023 struct net
*net
, u32 portid
)
1025 memset(arg
, 0, sizeof(*arg
));
1026 write_pnet(&arg
->pnet
, net
);
1027 arg
->portid
= portid
;
1030 static struct sock
*__netlink_lookup(struct netlink_table
*table
, u32 portid
,
1033 struct netlink_compare_arg arg
;
1035 netlink_compare_arg_init(&arg
, net
, portid
);
1036 return rhashtable_lookup_fast(&table
->hash
, &arg
,
1037 netlink_rhashtable_params
);
1040 static int __netlink_insert(struct netlink_table
*table
, struct sock
*sk
)
1042 struct netlink_compare_arg arg
;
1044 netlink_compare_arg_init(&arg
, sock_net(sk
), nlk_sk(sk
)->portid
);
1045 return rhashtable_lookup_insert_key(&table
->hash
, &arg
,
1047 netlink_rhashtable_params
);
1050 static struct sock
*netlink_lookup(struct net
*net
, int protocol
, u32 portid
)
1052 struct netlink_table
*table
= &nl_table
[protocol
];
1056 sk
= __netlink_lookup(table
, portid
, net
);
1064 static const struct proto_ops netlink_ops
;
1067 netlink_update_listeners(struct sock
*sk
)
1069 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
1072 struct listeners
*listeners
;
1074 listeners
= nl_deref_protected(tbl
->listeners
);
1078 for (i
= 0; i
< NLGRPLONGS(tbl
->groups
); i
++) {
1080 sk_for_each_bound(sk
, &tbl
->mc_list
) {
1081 if (i
< NLGRPLONGS(nlk_sk(sk
)->ngroups
))
1082 mask
|= nlk_sk(sk
)->groups
[i
];
1084 listeners
->masks
[i
] = mask
;
1086 /* this function is only called with the netlink table "grabbed", which
1087 * makes sure updates are visible before bind or setsockopt return. */
1090 static int netlink_insert(struct sock
*sk
, u32 portid
)
1092 struct netlink_table
*table
= &nl_table
[sk
->sk_protocol
];
1098 if (nlk_sk(sk
)->portid
)
1102 if (BITS_PER_LONG
> 32 &&
1103 unlikely(atomic_read(&table
->hash
.nelems
) >= UINT_MAX
))
1106 nlk_sk(sk
)->portid
= portid
;
1109 err
= __netlink_insert(table
, sk
);
1111 /* In case the hashtable backend returns with -EBUSY
1112 * from here, it must not escape to the caller.
1114 if (unlikely(err
== -EBUSY
))
1118 nlk_sk(sk
)->portid
= 0;
1127 static void netlink_remove(struct sock
*sk
)
1129 struct netlink_table
*table
;
1131 table
= &nl_table
[sk
->sk_protocol
];
1132 if (!rhashtable_remove_fast(&table
->hash
, &nlk_sk(sk
)->node
,
1133 netlink_rhashtable_params
)) {
1134 WARN_ON(atomic_read(&sk
->sk_refcnt
) == 1);
1138 netlink_table_grab();
1139 if (nlk_sk(sk
)->subscriptions
) {
1140 __sk_del_bind_node(sk
);
1141 netlink_update_listeners(sk
);
1143 if (sk
->sk_protocol
== NETLINK_GENERIC
)
1144 atomic_inc(&genl_sk_destructing_cnt
);
1145 netlink_table_ungrab();
1148 static struct proto netlink_proto
= {
1150 .owner
= THIS_MODULE
,
1151 .obj_size
= sizeof(struct netlink_sock
),
1154 static int __netlink_create(struct net
*net
, struct socket
*sock
,
1155 struct mutex
*cb_mutex
, int protocol
,
1159 struct netlink_sock
*nlk
;
1161 sock
->ops
= &netlink_ops
;
1163 sk
= sk_alloc(net
, PF_NETLINK
, GFP_KERNEL
, &netlink_proto
, kern
);
1167 sock_init_data(sock
, sk
);
1171 nlk
->cb_mutex
= cb_mutex
;
1173 nlk
->cb_mutex
= &nlk
->cb_def_mutex
;
1174 mutex_init(nlk
->cb_mutex
);
1176 init_waitqueue_head(&nlk
->wait
);
1177 #ifdef CONFIG_NETLINK_MMAP
1178 mutex_init(&nlk
->pg_vec_lock
);
1181 sk
->sk_destruct
= netlink_sock_destruct
;
1182 sk
->sk_protocol
= protocol
;
1186 static int netlink_create(struct net
*net
, struct socket
*sock
, int protocol
,
1189 struct module
*module
= NULL
;
1190 struct mutex
*cb_mutex
;
1191 struct netlink_sock
*nlk
;
1192 int (*bind
)(struct net
*net
, int group
);
1193 void (*unbind
)(struct net
*net
, int group
);
1196 sock
->state
= SS_UNCONNECTED
;
1198 if (sock
->type
!= SOCK_RAW
&& sock
->type
!= SOCK_DGRAM
)
1199 return -ESOCKTNOSUPPORT
;
1201 if (protocol
< 0 || protocol
>= MAX_LINKS
)
1202 return -EPROTONOSUPPORT
;
1204 netlink_lock_table();
1205 #ifdef CONFIG_MODULES
1206 if (!nl_table
[protocol
].registered
) {
1207 netlink_unlock_table();
1208 request_module("net-pf-%d-proto-%d", PF_NETLINK
, protocol
);
1209 netlink_lock_table();
1212 if (nl_table
[protocol
].registered
&&
1213 try_module_get(nl_table
[protocol
].module
))
1214 module
= nl_table
[protocol
].module
;
1216 err
= -EPROTONOSUPPORT
;
1217 cb_mutex
= nl_table
[protocol
].cb_mutex
;
1218 bind
= nl_table
[protocol
].bind
;
1219 unbind
= nl_table
[protocol
].unbind
;
1220 netlink_unlock_table();
1225 err
= __netlink_create(net
, sock
, cb_mutex
, protocol
, kern
);
1230 sock_prot_inuse_add(net
, &netlink_proto
, 1);
1233 nlk
= nlk_sk(sock
->sk
);
1234 nlk
->module
= module
;
1235 nlk
->netlink_bind
= bind
;
1236 nlk
->netlink_unbind
= unbind
;
1245 static void deferred_put_nlk_sk(struct rcu_head
*head
)
1247 struct netlink_sock
*nlk
= container_of(head
, struct netlink_sock
, rcu
);
1252 static int netlink_release(struct socket
*sock
)
1254 struct sock
*sk
= sock
->sk
;
1255 struct netlink_sock
*nlk
;
1265 * OK. Socket is unlinked, any packets that arrive now
1269 /* must not acquire netlink_table_lock in any way again before unbind
1270 * and notifying genetlink is done as otherwise it might deadlock
1272 if (nlk
->netlink_unbind
) {
1275 for (i
= 0; i
< nlk
->ngroups
; i
++)
1276 if (test_bit(i
, nlk
->groups
))
1277 nlk
->netlink_unbind(sock_net(sk
), i
+ 1);
1279 if (sk
->sk_protocol
== NETLINK_GENERIC
&&
1280 atomic_dec_return(&genl_sk_destructing_cnt
) == 0)
1281 wake_up(&genl_sk_destructing_waitq
);
1284 wake_up_interruptible_all(&nlk
->wait
);
1286 skb_queue_purge(&sk
->sk_write_queue
);
1289 struct netlink_notify n
= {
1290 .net
= sock_net(sk
),
1291 .protocol
= sk
->sk_protocol
,
1292 .portid
= nlk
->portid
,
1294 atomic_notifier_call_chain(&netlink_chain
,
1295 NETLINK_URELEASE
, &n
);
1298 module_put(nlk
->module
);
1300 if (netlink_is_kernel(sk
)) {
1301 netlink_table_grab();
1302 BUG_ON(nl_table
[sk
->sk_protocol
].registered
== 0);
1303 if (--nl_table
[sk
->sk_protocol
].registered
== 0) {
1304 struct listeners
*old
;
1306 old
= nl_deref_protected(nl_table
[sk
->sk_protocol
].listeners
);
1307 RCU_INIT_POINTER(nl_table
[sk
->sk_protocol
].listeners
, NULL
);
1308 kfree_rcu(old
, rcu
);
1309 nl_table
[sk
->sk_protocol
].module
= NULL
;
1310 nl_table
[sk
->sk_protocol
].bind
= NULL
;
1311 nl_table
[sk
->sk_protocol
].unbind
= NULL
;
1312 nl_table
[sk
->sk_protocol
].flags
= 0;
1313 nl_table
[sk
->sk_protocol
].registered
= 0;
1315 netlink_table_ungrab();
1322 sock_prot_inuse_add(sock_net(sk
), &netlink_proto
, -1);
1324 call_rcu(&nlk
->rcu
, deferred_put_nlk_sk
);
1328 static int netlink_autobind(struct socket
*sock
)
1330 struct sock
*sk
= sock
->sk
;
1331 struct net
*net
= sock_net(sk
);
1332 struct netlink_table
*table
= &nl_table
[sk
->sk_protocol
];
1333 s32 portid
= task_tgid_vnr(current
);
1341 ok
= !__netlink_lookup(table
, portid
, net
);
1344 /* Bind collision, search negative portid values. */
1346 /* rover will be in range [S32_MIN, -4097] */
1347 rover
= S32_MIN
+ prandom_u32_max(-4096 - S32_MIN
);
1348 else if (rover
>= -4096)
1354 err
= netlink_insert(sk
, portid
);
1355 if (err
== -EADDRINUSE
)
1358 /* If 2 threads race to autobind, that is fine. */
1366 * __netlink_ns_capable - General netlink message capability test
1367 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1368 * @user_ns: The user namespace of the capability to use
1369 * @cap: The capability to use
1371 * Test to see if the opener of the socket we received the message
1372 * from had when the netlink socket was created and the sender of the
1373 * message has has the capability @cap in the user namespace @user_ns.
1375 bool __netlink_ns_capable(const struct netlink_skb_parms
*nsp
,
1376 struct user_namespace
*user_ns
, int cap
)
1378 return ((nsp
->flags
& NETLINK_SKB_DST
) ||
1379 file_ns_capable(nsp
->sk
->sk_socket
->file
, user_ns
, cap
)) &&
1380 ns_capable(user_ns
, cap
);
1382 EXPORT_SYMBOL(__netlink_ns_capable
);
1385 * netlink_ns_capable - General netlink message capability test
1386 * @skb: socket buffer holding a netlink command from userspace
1387 * @user_ns: The user namespace of the capability to use
1388 * @cap: The capability to use
1390 * Test to see if the opener of the socket we received the message
1391 * from had when the netlink socket was created and the sender of the
1392 * message has has the capability @cap in the user namespace @user_ns.
1394 bool netlink_ns_capable(const struct sk_buff
*skb
,
1395 struct user_namespace
*user_ns
, int cap
)
1397 return __netlink_ns_capable(&NETLINK_CB(skb
), user_ns
, cap
);
1399 EXPORT_SYMBOL(netlink_ns_capable
);
1402 * netlink_capable - Netlink global message capability test
1403 * @skb: socket buffer holding a netlink command from userspace
1404 * @cap: The capability to use
1406 * Test to see if the opener of the socket we received the message
1407 * from had when the netlink socket was created and the sender of the
1408 * message has has the capability @cap in all user namespaces.
1410 bool netlink_capable(const struct sk_buff
*skb
, int cap
)
1412 return netlink_ns_capable(skb
, &init_user_ns
, cap
);
1414 EXPORT_SYMBOL(netlink_capable
);
1417 * netlink_net_capable - Netlink network namespace message capability test
1418 * @skb: socket buffer holding a netlink command from userspace
1419 * @cap: The capability to use
1421 * Test to see if the opener of the socket we received the message
1422 * from had when the netlink socket was created and the sender of the
1423 * message has has the capability @cap over the network namespace of
1424 * the socket we received the message from.
1426 bool netlink_net_capable(const struct sk_buff
*skb
, int cap
)
1428 return netlink_ns_capable(skb
, sock_net(skb
->sk
)->user_ns
, cap
);
1430 EXPORT_SYMBOL(netlink_net_capable
);
1432 static inline int netlink_allowed(const struct socket
*sock
, unsigned int flag
)
1434 return (nl_table
[sock
->sk
->sk_protocol
].flags
& flag
) ||
1435 ns_capable(sock_net(sock
->sk
)->user_ns
, CAP_NET_ADMIN
);
1439 netlink_update_subscriptions(struct sock
*sk
, unsigned int subscriptions
)
1441 struct netlink_sock
*nlk
= nlk_sk(sk
);
1443 if (nlk
->subscriptions
&& !subscriptions
)
1444 __sk_del_bind_node(sk
);
1445 else if (!nlk
->subscriptions
&& subscriptions
)
1446 sk_add_bind_node(sk
, &nl_table
[sk
->sk_protocol
].mc_list
);
1447 nlk
->subscriptions
= subscriptions
;
1450 static int netlink_realloc_groups(struct sock
*sk
)
1452 struct netlink_sock
*nlk
= nlk_sk(sk
);
1453 unsigned int groups
;
1454 unsigned long *new_groups
;
1457 netlink_table_grab();
1459 groups
= nl_table
[sk
->sk_protocol
].groups
;
1460 if (!nl_table
[sk
->sk_protocol
].registered
) {
1465 if (nlk
->ngroups
>= groups
)
1468 new_groups
= krealloc(nlk
->groups
, NLGRPSZ(groups
), GFP_ATOMIC
);
1469 if (new_groups
== NULL
) {
1473 memset((char *)new_groups
+ NLGRPSZ(nlk
->ngroups
), 0,
1474 NLGRPSZ(groups
) - NLGRPSZ(nlk
->ngroups
));
1476 nlk
->groups
= new_groups
;
1477 nlk
->ngroups
= groups
;
1479 netlink_table_ungrab();
1483 static void netlink_undo_bind(int group
, long unsigned int groups
,
1486 struct netlink_sock
*nlk
= nlk_sk(sk
);
1489 if (!nlk
->netlink_unbind
)
1492 for (undo
= 0; undo
< group
; undo
++)
1493 if (test_bit(undo
, &groups
))
1494 nlk
->netlink_unbind(sock_net(sk
), undo
+ 1);
1497 static int netlink_bind(struct socket
*sock
, struct sockaddr
*addr
,
1500 struct sock
*sk
= sock
->sk
;
1501 struct net
*net
= sock_net(sk
);
1502 struct netlink_sock
*nlk
= nlk_sk(sk
);
1503 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
1505 long unsigned int groups
= nladdr
->nl_groups
;
1507 if (addr_len
< sizeof(struct sockaddr_nl
))
1510 if (nladdr
->nl_family
!= AF_NETLINK
)
1513 /* Only superuser is allowed to listen multicasts */
1515 if (!netlink_allowed(sock
, NL_CFG_F_NONROOT_RECV
))
1517 err
= netlink_realloc_groups(sk
);
1523 if (nladdr
->nl_pid
!= nlk
->portid
)
1526 if (nlk
->netlink_bind
&& groups
) {
1529 for (group
= 0; group
< nlk
->ngroups
; group
++) {
1530 if (!test_bit(group
, &groups
))
1532 err
= nlk
->netlink_bind(net
, group
+ 1);
1535 netlink_undo_bind(group
, groups
, sk
);
1541 err
= nladdr
->nl_pid
?
1542 netlink_insert(sk
, nladdr
->nl_pid
) :
1543 netlink_autobind(sock
);
1545 netlink_undo_bind(nlk
->ngroups
, groups
, sk
);
1550 if (!groups
&& (nlk
->groups
== NULL
|| !(u32
)nlk
->groups
[0]))
1553 netlink_table_grab();
1554 netlink_update_subscriptions(sk
, nlk
->subscriptions
+
1556 hweight32(nlk
->groups
[0]));
1557 nlk
->groups
[0] = (nlk
->groups
[0] & ~0xffffffffUL
) | groups
;
1558 netlink_update_listeners(sk
);
1559 netlink_table_ungrab();
1564 static int netlink_connect(struct socket
*sock
, struct sockaddr
*addr
,
1565 int alen
, int flags
)
1568 struct sock
*sk
= sock
->sk
;
1569 struct netlink_sock
*nlk
= nlk_sk(sk
);
1570 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
1572 if (alen
< sizeof(addr
->sa_family
))
1575 if (addr
->sa_family
== AF_UNSPEC
) {
1576 sk
->sk_state
= NETLINK_UNCONNECTED
;
1577 nlk
->dst_portid
= 0;
1581 if (addr
->sa_family
!= AF_NETLINK
)
1584 if ((nladdr
->nl_groups
|| nladdr
->nl_pid
) &&
1585 !netlink_allowed(sock
, NL_CFG_F_NONROOT_SEND
))
1589 err
= netlink_autobind(sock
);
1592 sk
->sk_state
= NETLINK_CONNECTED
;
1593 nlk
->dst_portid
= nladdr
->nl_pid
;
1594 nlk
->dst_group
= ffs(nladdr
->nl_groups
);
1600 static int netlink_getname(struct socket
*sock
, struct sockaddr
*addr
,
1601 int *addr_len
, int peer
)
1603 struct sock
*sk
= sock
->sk
;
1604 struct netlink_sock
*nlk
= nlk_sk(sk
);
1605 DECLARE_SOCKADDR(struct sockaddr_nl
*, nladdr
, addr
);
1607 nladdr
->nl_family
= AF_NETLINK
;
1609 *addr_len
= sizeof(*nladdr
);
1612 nladdr
->nl_pid
= nlk
->dst_portid
;
1613 nladdr
->nl_groups
= netlink_group_mask(nlk
->dst_group
);
1615 nladdr
->nl_pid
= nlk
->portid
;
1616 nladdr
->nl_groups
= nlk
->groups
? nlk
->groups
[0] : 0;
1621 static struct sock
*netlink_getsockbyportid(struct sock
*ssk
, u32 portid
)
1624 struct netlink_sock
*nlk
;
1626 sock
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, portid
);
1628 return ERR_PTR(-ECONNREFUSED
);
1630 /* Don't bother queuing skb if kernel socket has no input function */
1632 if (sock
->sk_state
== NETLINK_CONNECTED
&&
1633 nlk
->dst_portid
!= nlk_sk(ssk
)->portid
) {
1635 return ERR_PTR(-ECONNREFUSED
);
1640 struct sock
*netlink_getsockbyfilp(struct file
*filp
)
1642 struct inode
*inode
= file_inode(filp
);
1645 if (!S_ISSOCK(inode
->i_mode
))
1646 return ERR_PTR(-ENOTSOCK
);
1648 sock
= SOCKET_I(inode
)->sk
;
1649 if (sock
->sk_family
!= AF_NETLINK
)
1650 return ERR_PTR(-EINVAL
);
1656 static struct sk_buff
*netlink_alloc_large_skb(unsigned int size
,
1659 struct sk_buff
*skb
;
1662 if (size
<= NLMSG_GOODSIZE
|| broadcast
)
1663 return alloc_skb(size
, GFP_KERNEL
);
1665 size
= SKB_DATA_ALIGN(size
) +
1666 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1668 data
= vmalloc(size
);
1672 skb
= __build_skb(data
, size
);
1676 skb
->destructor
= netlink_skb_destructor
;
1682 * Attach a skb to a netlink socket.
1683 * The caller must hold a reference to the destination socket. On error, the
1684 * reference is dropped. The skb is not send to the destination, just all
1685 * all error checks are performed and memory in the queue is reserved.
1687 * < 0: error. skb freed, reference to sock dropped.
1689 * 1: repeat lookup - reference dropped while waiting for socket memory.
1691 int netlink_attachskb(struct sock
*sk
, struct sk_buff
*skb
,
1692 long *timeo
, struct sock
*ssk
)
1694 struct netlink_sock
*nlk
;
1698 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
1699 test_bit(NETLINK_S_CONGESTED
, &nlk
->state
)) &&
1700 !netlink_skb_is_mmaped(skb
)) {
1701 DECLARE_WAITQUEUE(wait
, current
);
1703 if (!ssk
|| netlink_is_kernel(ssk
))
1704 netlink_overrun(sk
);
1710 __set_current_state(TASK_INTERRUPTIBLE
);
1711 add_wait_queue(&nlk
->wait
, &wait
);
1713 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
1714 test_bit(NETLINK_S_CONGESTED
, &nlk
->state
)) &&
1715 !sock_flag(sk
, SOCK_DEAD
))
1716 *timeo
= schedule_timeout(*timeo
);
1718 __set_current_state(TASK_RUNNING
);
1719 remove_wait_queue(&nlk
->wait
, &wait
);
1722 if (signal_pending(current
)) {
1724 return sock_intr_errno(*timeo
);
1728 netlink_skb_set_owner_r(skb
, sk
);
1732 static int __netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
1736 netlink_deliver_tap(skb
);
1738 #ifdef CONFIG_NETLINK_MMAP
1739 if (netlink_skb_is_mmaped(skb
))
1740 netlink_queue_mmaped_skb(sk
, skb
);
1741 else if (netlink_rx_is_mmaped(sk
))
1742 netlink_ring_set_copied(sk
, skb
);
1744 #endif /* CONFIG_NETLINK_MMAP */
1745 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1746 sk
->sk_data_ready(sk
);
1750 int netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
1752 int len
= __netlink_sendskb(sk
, skb
);
1758 void netlink_detachskb(struct sock
*sk
, struct sk_buff
*skb
)
1764 static struct sk_buff
*netlink_trim(struct sk_buff
*skb
, gfp_t allocation
)
1768 WARN_ON(skb
->sk
!= NULL
);
1769 if (netlink_skb_is_mmaped(skb
))
1772 delta
= skb
->end
- skb
->tail
;
1773 if (is_vmalloc_addr(skb
->head
) || delta
* 2 < skb
->truesize
)
1776 if (skb_shared(skb
)) {
1777 struct sk_buff
*nskb
= skb_clone(skb
, allocation
);
1784 if (!pskb_expand_head(skb
, 0, -delta
, allocation
))
1785 skb
->truesize
-= delta
;
1790 static int netlink_unicast_kernel(struct sock
*sk
, struct sk_buff
*skb
,
1794 struct netlink_sock
*nlk
= nlk_sk(sk
);
1796 ret
= -ECONNREFUSED
;
1797 if (nlk
->netlink_rcv
!= NULL
) {
1799 netlink_skb_set_owner_r(skb
, sk
);
1800 NETLINK_CB(skb
).sk
= ssk
;
1801 netlink_deliver_tap_kernel(sk
, ssk
, skb
);
1802 nlk
->netlink_rcv(skb
);
1811 int netlink_unicast(struct sock
*ssk
, struct sk_buff
*skb
,
1812 u32 portid
, int nonblock
)
1818 skb
= netlink_trim(skb
, gfp_any());
1820 timeo
= sock_sndtimeo(ssk
, nonblock
);
1822 sk
= netlink_getsockbyportid(ssk
, portid
);
1827 if (netlink_is_kernel(sk
))
1828 return netlink_unicast_kernel(sk
, skb
, ssk
);
1830 if (sk_filter(sk
, skb
)) {
1837 err
= netlink_attachskb(sk
, skb
, &timeo
, ssk
);
1843 return netlink_sendskb(sk
, skb
);
1845 EXPORT_SYMBOL(netlink_unicast
);
1847 struct sk_buff
*__netlink_alloc_skb(struct sock
*ssk
, unsigned int size
,
1848 unsigned int ldiff
, u32 dst_portid
,
1851 #ifdef CONFIG_NETLINK_MMAP
1852 unsigned int maxlen
, linear_size
;
1853 struct sock
*sk
= NULL
;
1854 struct sk_buff
*skb
;
1855 struct netlink_ring
*ring
;
1856 struct nl_mmap_hdr
*hdr
;
1858 sk
= netlink_getsockbyportid(ssk
, dst_portid
);
1862 ring
= &nlk_sk(sk
)->rx_ring
;
1863 /* fast-path without atomic ops for common case: non-mmaped receiver */
1864 if (ring
->pg_vec
== NULL
)
1867 /* We need to account the full linear size needed as a ring
1868 * slot cannot have non-linear parts.
1870 linear_size
= size
+ ldiff
;
1871 if (ring
->frame_size
- NL_MMAP_HDRLEN
< linear_size
)
1874 skb
= alloc_skb_head(gfp_mask
);
1878 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1879 /* check again under lock */
1880 if (ring
->pg_vec
== NULL
)
1883 /* check again under lock */
1884 maxlen
= ring
->frame_size
- NL_MMAP_HDRLEN
;
1885 if (maxlen
< linear_size
)
1888 netlink_forward_ring(ring
);
1889 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
1893 netlink_ring_setup_skb(skb
, sk
, ring
, hdr
);
1894 netlink_set_status(hdr
, NL_MMAP_STATUS_RESERVED
);
1895 atomic_inc(&ring
->pending
);
1896 netlink_increment_head(ring
);
1898 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1903 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1904 netlink_overrun(sk
);
1911 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1916 return alloc_skb(size
, gfp_mask
);
1918 EXPORT_SYMBOL_GPL(__netlink_alloc_skb
);
1920 int netlink_has_listeners(struct sock
*sk
, unsigned int group
)
1923 struct listeners
*listeners
;
1925 BUG_ON(!netlink_is_kernel(sk
));
1928 listeners
= rcu_dereference(nl_table
[sk
->sk_protocol
].listeners
);
1930 if (listeners
&& group
- 1 < nl_table
[sk
->sk_protocol
].groups
)
1931 res
= test_bit(group
- 1, listeners
->masks
);
1937 EXPORT_SYMBOL_GPL(netlink_has_listeners
);
1939 static int netlink_broadcast_deliver(struct sock
*sk
, struct sk_buff
*skb
)
1941 struct netlink_sock
*nlk
= nlk_sk(sk
);
1943 if (atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
&&
1944 !test_bit(NETLINK_S_CONGESTED
, &nlk
->state
)) {
1945 netlink_skb_set_owner_r(skb
, sk
);
1946 __netlink_sendskb(sk
, skb
);
1947 return atomic_read(&sk
->sk_rmem_alloc
) > (sk
->sk_rcvbuf
>> 1);
1952 struct netlink_broadcast_data
{
1953 struct sock
*exclude_sk
;
1958 int delivery_failure
;
1962 struct sk_buff
*skb
, *skb2
;
1963 int (*tx_filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
);
1967 static void do_one_broadcast(struct sock
*sk
,
1968 struct netlink_broadcast_data
*p
)
1970 struct netlink_sock
*nlk
= nlk_sk(sk
);
1973 if (p
->exclude_sk
== sk
)
1976 if (nlk
->portid
== p
->portid
|| p
->group
- 1 >= nlk
->ngroups
||
1977 !test_bit(p
->group
- 1, nlk
->groups
))
1980 if (!net_eq(sock_net(sk
), p
->net
)) {
1981 if (!(nlk
->flags
& NETLINK_F_LISTEN_ALL_NSID
))
1984 if (!peernet_has_id(sock_net(sk
), p
->net
))
1987 if (!file_ns_capable(sk
->sk_socket
->file
, p
->net
->user_ns
,
1993 netlink_overrun(sk
);
1998 if (p
->skb2
== NULL
) {
1999 if (skb_shared(p
->skb
)) {
2000 p
->skb2
= skb_clone(p
->skb
, p
->allocation
);
2002 p
->skb2
= skb_get(p
->skb
);
2004 * skb ownership may have been set when
2005 * delivered to a previous socket.
2007 skb_orphan(p
->skb2
);
2010 if (p
->skb2
== NULL
) {
2011 netlink_overrun(sk
);
2012 /* Clone failed. Notify ALL listeners. */
2014 if (nlk
->flags
& NETLINK_F_BROADCAST_SEND_ERROR
)
2015 p
->delivery_failure
= 1;
2018 if (p
->tx_filter
&& p
->tx_filter(sk
, p
->skb2
, p
->tx_data
)) {
2023 if (sk_filter(sk
, p
->skb2
)) {
2028 NETLINK_CB(p
->skb2
).nsid
= peernet2id(sock_net(sk
), p
->net
);
2029 NETLINK_CB(p
->skb2
).nsid_is_set
= true;
2030 val
= netlink_broadcast_deliver(sk
, p
->skb2
);
2032 netlink_overrun(sk
);
2033 if (nlk
->flags
& NETLINK_F_BROADCAST_SEND_ERROR
)
2034 p
->delivery_failure
= 1;
2036 p
->congested
|= val
;
2044 int netlink_broadcast_filtered(struct sock
*ssk
, struct sk_buff
*skb
, u32 portid
,
2045 u32 group
, gfp_t allocation
,
2046 int (*filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
),
2049 struct net
*net
= sock_net(ssk
);
2050 struct netlink_broadcast_data info
;
2053 skb
= netlink_trim(skb
, allocation
);
2055 info
.exclude_sk
= ssk
;
2057 info
.portid
= portid
;
2060 info
.delivery_failure
= 0;
2063 info
.allocation
= allocation
;
2066 info
.tx_filter
= filter
;
2067 info
.tx_data
= filter_data
;
2069 /* While we sleep in clone, do not allow to change socket list */
2071 netlink_lock_table();
2073 sk_for_each_bound(sk
, &nl_table
[ssk
->sk_protocol
].mc_list
)
2074 do_one_broadcast(sk
, &info
);
2078 netlink_unlock_table();
2080 if (info
.delivery_failure
) {
2081 kfree_skb(info
.skb2
);
2084 consume_skb(info
.skb2
);
2086 if (info
.delivered
) {
2087 if (info
.congested
&& (allocation
& __GFP_WAIT
))
2093 EXPORT_SYMBOL(netlink_broadcast_filtered
);
2095 int netlink_broadcast(struct sock
*ssk
, struct sk_buff
*skb
, u32 portid
,
2096 u32 group
, gfp_t allocation
)
2098 return netlink_broadcast_filtered(ssk
, skb
, portid
, group
, allocation
,
2101 EXPORT_SYMBOL(netlink_broadcast
);
2103 struct netlink_set_err_data
{
2104 struct sock
*exclude_sk
;
2110 static int do_one_set_err(struct sock
*sk
, struct netlink_set_err_data
*p
)
2112 struct netlink_sock
*nlk
= nlk_sk(sk
);
2115 if (sk
== p
->exclude_sk
)
2118 if (!net_eq(sock_net(sk
), sock_net(p
->exclude_sk
)))
2121 if (nlk
->portid
== p
->portid
|| p
->group
- 1 >= nlk
->ngroups
||
2122 !test_bit(p
->group
- 1, nlk
->groups
))
2125 if (p
->code
== ENOBUFS
&& nlk
->flags
& NETLINK_F_RECV_NO_ENOBUFS
) {
2130 sk
->sk_err
= p
->code
;
2131 sk
->sk_error_report(sk
);
2137 * netlink_set_err - report error to broadcast listeners
2138 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2139 * @portid: the PORTID of a process that we want to skip (if any)
2140 * @group: the broadcast group that will notice the error
2141 * @code: error code, must be negative (as usual in kernelspace)
2143 * This function returns the number of broadcast listeners that have set the
2144 * NETLINK_NO_ENOBUFS socket option.
2146 int netlink_set_err(struct sock
*ssk
, u32 portid
, u32 group
, int code
)
2148 struct netlink_set_err_data info
;
2152 info
.exclude_sk
= ssk
;
2153 info
.portid
= portid
;
2155 /* sk->sk_err wants a positive error value */
2158 read_lock(&nl_table_lock
);
2160 sk_for_each_bound(sk
, &nl_table
[ssk
->sk_protocol
].mc_list
)
2161 ret
+= do_one_set_err(sk
, &info
);
2163 read_unlock(&nl_table_lock
);
2166 EXPORT_SYMBOL(netlink_set_err
);
2168 /* must be called with netlink table grabbed */
2169 static void netlink_update_socket_mc(struct netlink_sock
*nlk
,
2173 int old
, new = !!is_new
, subscriptions
;
2175 old
= test_bit(group
- 1, nlk
->groups
);
2176 subscriptions
= nlk
->subscriptions
- old
+ new;
2178 __set_bit(group
- 1, nlk
->groups
);
2180 __clear_bit(group
- 1, nlk
->groups
);
2181 netlink_update_subscriptions(&nlk
->sk
, subscriptions
);
2182 netlink_update_listeners(&nlk
->sk
);
2185 static int netlink_setsockopt(struct socket
*sock
, int level
, int optname
,
2186 char __user
*optval
, unsigned int optlen
)
2188 struct sock
*sk
= sock
->sk
;
2189 struct netlink_sock
*nlk
= nlk_sk(sk
);
2190 unsigned int val
= 0;
2193 if (level
!= SOL_NETLINK
)
2194 return -ENOPROTOOPT
;
2196 if (optname
!= NETLINK_RX_RING
&& optname
!= NETLINK_TX_RING
&&
2197 optlen
>= sizeof(int) &&
2198 get_user(val
, (unsigned int __user
*)optval
))
2202 case NETLINK_PKTINFO
:
2204 nlk
->flags
|= NETLINK_F_RECV_PKTINFO
;
2206 nlk
->flags
&= ~NETLINK_F_RECV_PKTINFO
;
2209 case NETLINK_ADD_MEMBERSHIP
:
2210 case NETLINK_DROP_MEMBERSHIP
: {
2211 if (!netlink_allowed(sock
, NL_CFG_F_NONROOT_RECV
))
2213 err
= netlink_realloc_groups(sk
);
2216 if (!val
|| val
- 1 >= nlk
->ngroups
)
2218 if (optname
== NETLINK_ADD_MEMBERSHIP
&& nlk
->netlink_bind
) {
2219 err
= nlk
->netlink_bind(sock_net(sk
), val
);
2223 netlink_table_grab();
2224 netlink_update_socket_mc(nlk
, val
,
2225 optname
== NETLINK_ADD_MEMBERSHIP
);
2226 netlink_table_ungrab();
2227 if (optname
== NETLINK_DROP_MEMBERSHIP
&& nlk
->netlink_unbind
)
2228 nlk
->netlink_unbind(sock_net(sk
), val
);
2233 case NETLINK_BROADCAST_ERROR
:
2235 nlk
->flags
|= NETLINK_F_BROADCAST_SEND_ERROR
;
2237 nlk
->flags
&= ~NETLINK_F_BROADCAST_SEND_ERROR
;
2240 case NETLINK_NO_ENOBUFS
:
2242 nlk
->flags
|= NETLINK_F_RECV_NO_ENOBUFS
;
2243 clear_bit(NETLINK_S_CONGESTED
, &nlk
->state
);
2244 wake_up_interruptible(&nlk
->wait
);
2246 nlk
->flags
&= ~NETLINK_F_RECV_NO_ENOBUFS
;
2250 #ifdef CONFIG_NETLINK_MMAP
2251 case NETLINK_RX_RING
:
2252 case NETLINK_TX_RING
: {
2253 struct nl_mmap_req req
;
2255 /* Rings might consume more memory than queue limits, require
2258 if (!capable(CAP_NET_ADMIN
))
2260 if (optlen
< sizeof(req
))
2262 if (copy_from_user(&req
, optval
, sizeof(req
)))
2264 err
= netlink_set_ring(sk
, &req
,
2265 optname
== NETLINK_TX_RING
);
2268 #endif /* CONFIG_NETLINK_MMAP */
2269 case NETLINK_LISTEN_ALL_NSID
:
2270 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_BROADCAST
))
2274 nlk
->flags
|= NETLINK_F_LISTEN_ALL_NSID
;
2276 nlk
->flags
&= ~NETLINK_F_LISTEN_ALL_NSID
;
2279 case NETLINK_CAP_ACK
:
2281 nlk
->flags
|= NETLINK_F_CAP_ACK
;
2283 nlk
->flags
&= ~NETLINK_F_CAP_ACK
;
2292 static int netlink_getsockopt(struct socket
*sock
, int level
, int optname
,
2293 char __user
*optval
, int __user
*optlen
)
2295 struct sock
*sk
= sock
->sk
;
2296 struct netlink_sock
*nlk
= nlk_sk(sk
);
2299 if (level
!= SOL_NETLINK
)
2300 return -ENOPROTOOPT
;
2302 if (get_user(len
, optlen
))
2308 case NETLINK_PKTINFO
:
2309 if (len
< sizeof(int))
2312 val
= nlk
->flags
& NETLINK_F_RECV_PKTINFO
? 1 : 0;
2313 if (put_user(len
, optlen
) ||
2314 put_user(val
, optval
))
2318 case NETLINK_BROADCAST_ERROR
:
2319 if (len
< sizeof(int))
2322 val
= nlk
->flags
& NETLINK_F_BROADCAST_SEND_ERROR
? 1 : 0;
2323 if (put_user(len
, optlen
) ||
2324 put_user(val
, optval
))
2328 case NETLINK_NO_ENOBUFS
:
2329 if (len
< sizeof(int))
2332 val
= nlk
->flags
& NETLINK_F_RECV_NO_ENOBUFS
? 1 : 0;
2333 if (put_user(len
, optlen
) ||
2334 put_user(val
, optval
))
2338 case NETLINK_LIST_MEMBERSHIPS
: {
2339 int pos
, idx
, shift
;
2342 netlink_table_grab();
2343 for (pos
= 0; pos
* 8 < nlk
->ngroups
; pos
+= sizeof(u32
)) {
2344 if (len
- pos
< sizeof(u32
))
2347 idx
= pos
/ sizeof(unsigned long);
2348 shift
= (pos
% sizeof(unsigned long)) * 8;
2349 if (put_user((u32
)(nlk
->groups
[idx
] >> shift
),
2350 (u32 __user
*)(optval
+ pos
))) {
2355 if (put_user(ALIGN(nlk
->ngroups
/ 8, sizeof(u32
)), optlen
))
2357 netlink_table_ungrab();
2360 case NETLINK_CAP_ACK
:
2361 if (len
< sizeof(int))
2364 val
= nlk
->flags
& NETLINK_F_CAP_ACK
? 1 : 0;
2365 if (put_user(len
, optlen
) ||
2366 put_user(val
, optval
))
2376 static void netlink_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
2378 struct nl_pktinfo info
;
2380 info
.group
= NETLINK_CB(skb
).dst_group
;
2381 put_cmsg(msg
, SOL_NETLINK
, NETLINK_PKTINFO
, sizeof(info
), &info
);
2384 static void netlink_cmsg_listen_all_nsid(struct sock
*sk
, struct msghdr
*msg
,
2385 struct sk_buff
*skb
)
2387 if (!NETLINK_CB(skb
).nsid_is_set
)
2390 put_cmsg(msg
, SOL_NETLINK
, NETLINK_LISTEN_ALL_NSID
, sizeof(int),
2391 &NETLINK_CB(skb
).nsid
);
2394 static int netlink_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
2396 struct sock
*sk
= sock
->sk
;
2397 struct netlink_sock
*nlk
= nlk_sk(sk
);
2398 DECLARE_SOCKADDR(struct sockaddr_nl
*, addr
, msg
->msg_name
);
2401 struct sk_buff
*skb
;
2403 struct scm_cookie scm
;
2404 u32 netlink_skb_flags
= 0;
2406 if (msg
->msg_flags
&MSG_OOB
)
2409 err
= scm_send(sock
, msg
, &scm
, true);
2413 if (msg
->msg_namelen
) {
2415 if (addr
->nl_family
!= AF_NETLINK
)
2417 dst_portid
= addr
->nl_pid
;
2418 dst_group
= ffs(addr
->nl_groups
);
2420 if ((dst_group
|| dst_portid
) &&
2421 !netlink_allowed(sock
, NL_CFG_F_NONROOT_SEND
))
2423 netlink_skb_flags
|= NETLINK_SKB_DST
;
2425 dst_portid
= nlk
->dst_portid
;
2426 dst_group
= nlk
->dst_group
;
2430 err
= netlink_autobind(sock
);
2435 /* It's a really convoluted way for userland to ask for mmaped
2436 * sendmsg(), but that's what we've got...
2438 if (netlink_tx_is_mmaped(sk
) &&
2439 iter_is_iovec(&msg
->msg_iter
) &&
2440 msg
->msg_iter
.nr_segs
== 1 &&
2441 msg
->msg_iter
.iov
->iov_base
== NULL
) {
2442 err
= netlink_mmap_sendmsg(sk
, msg
, dst_portid
, dst_group
,
2448 if (len
> sk
->sk_sndbuf
- 32)
2451 skb
= netlink_alloc_large_skb(len
, dst_group
);
2455 NETLINK_CB(skb
).portid
= nlk
->portid
;
2456 NETLINK_CB(skb
).dst_group
= dst_group
;
2457 NETLINK_CB(skb
).creds
= scm
.creds
;
2458 NETLINK_CB(skb
).flags
= netlink_skb_flags
;
2461 if (memcpy_from_msg(skb_put(skb
, len
), msg
, len
)) {
2466 err
= security_netlink_send(sk
, skb
);
2473 atomic_inc(&skb
->users
);
2474 netlink_broadcast(sk
, skb
, dst_portid
, dst_group
, GFP_KERNEL
);
2476 err
= netlink_unicast(sk
, skb
, dst_portid
, msg
->msg_flags
&MSG_DONTWAIT
);
2483 static int netlink_recvmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
,
2486 struct scm_cookie scm
;
2487 struct sock
*sk
= sock
->sk
;
2488 struct netlink_sock
*nlk
= nlk_sk(sk
);
2489 int noblock
= flags
&MSG_DONTWAIT
;
2491 struct sk_buff
*skb
, *data_skb
;
2499 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
2505 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2506 if (unlikely(skb_shinfo(skb
)->frag_list
)) {
2508 * If this skb has a frag_list, then here that means that we
2509 * will have to use the frag_list skb's data for compat tasks
2510 * and the regular skb's data for normal (non-compat) tasks.
2512 * If we need to send the compat skb, assign it to the
2513 * 'data_skb' variable so that it will be used below for data
2514 * copying. We keep 'skb' for everything else, including
2515 * freeing both later.
2517 if (flags
& MSG_CMSG_COMPAT
)
2518 data_skb
= skb_shinfo(skb
)->frag_list
;
2522 /* Record the max length of recvmsg() calls for future allocations */
2523 nlk
->max_recvmsg_len
= max(nlk
->max_recvmsg_len
, len
);
2524 nlk
->max_recvmsg_len
= min_t(size_t, nlk
->max_recvmsg_len
,
2527 copied
= data_skb
->len
;
2529 msg
->msg_flags
|= MSG_TRUNC
;
2533 skb_reset_transport_header(data_skb
);
2534 err
= skb_copy_datagram_msg(data_skb
, 0, msg
, copied
);
2536 if (msg
->msg_name
) {
2537 DECLARE_SOCKADDR(struct sockaddr_nl
*, addr
, msg
->msg_name
);
2538 addr
->nl_family
= AF_NETLINK
;
2540 addr
->nl_pid
= NETLINK_CB(skb
).portid
;
2541 addr
->nl_groups
= netlink_group_mask(NETLINK_CB(skb
).dst_group
);
2542 msg
->msg_namelen
= sizeof(*addr
);
2545 if (nlk
->flags
& NETLINK_F_RECV_PKTINFO
)
2546 netlink_cmsg_recv_pktinfo(msg
, skb
);
2547 if (nlk
->flags
& NETLINK_F_LISTEN_ALL_NSID
)
2548 netlink_cmsg_listen_all_nsid(sk
, msg
, skb
);
2550 memset(&scm
, 0, sizeof(scm
));
2551 scm
.creds
= *NETLINK_CREDS(skb
);
2552 if (flags
& MSG_TRUNC
)
2553 copied
= data_skb
->len
;
2555 skb_free_datagram(sk
, skb
);
2557 if (nlk
->cb_running
&&
2558 atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
/ 2) {
2559 ret
= netlink_dump(sk
);
2562 sk
->sk_error_report(sk
);
2566 scm_recv(sock
, msg
, &scm
, flags
);
2568 netlink_rcv_wake(sk
);
2569 return err
? : copied
;
2572 static void netlink_data_ready(struct sock
*sk
)
2578 * We export these functions to other modules. They provide a
2579 * complete set of kernel non-blocking support for message
2584 __netlink_kernel_create(struct net
*net
, int unit
, struct module
*module
,
2585 struct netlink_kernel_cfg
*cfg
)
2587 struct socket
*sock
;
2589 struct netlink_sock
*nlk
;
2590 struct listeners
*listeners
= NULL
;
2591 struct mutex
*cb_mutex
= cfg
? cfg
->cb_mutex
: NULL
;
2592 unsigned int groups
;
2596 if (unit
< 0 || unit
>= MAX_LINKS
)
2599 if (sock_create_lite(PF_NETLINK
, SOCK_DGRAM
, unit
, &sock
))
2602 if (__netlink_create(net
, sock
, cb_mutex
, unit
, 1) < 0)
2603 goto out_sock_release_nosk
;
2607 if (!cfg
|| cfg
->groups
< 32)
2610 groups
= cfg
->groups
;
2612 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
2614 goto out_sock_release
;
2616 sk
->sk_data_ready
= netlink_data_ready
;
2617 if (cfg
&& cfg
->input
)
2618 nlk_sk(sk
)->netlink_rcv
= cfg
->input
;
2620 if (netlink_insert(sk
, 0))
2621 goto out_sock_release
;
2624 nlk
->flags
|= NETLINK_F_KERNEL_SOCKET
;
2626 netlink_table_grab();
2627 if (!nl_table
[unit
].registered
) {
2628 nl_table
[unit
].groups
= groups
;
2629 rcu_assign_pointer(nl_table
[unit
].listeners
, listeners
);
2630 nl_table
[unit
].cb_mutex
= cb_mutex
;
2631 nl_table
[unit
].module
= module
;
2633 nl_table
[unit
].bind
= cfg
->bind
;
2634 nl_table
[unit
].unbind
= cfg
->unbind
;
2635 nl_table
[unit
].flags
= cfg
->flags
;
2637 nl_table
[unit
].compare
= cfg
->compare
;
2639 nl_table
[unit
].registered
= 1;
2642 nl_table
[unit
].registered
++;
2644 netlink_table_ungrab();
2649 netlink_kernel_release(sk
);
2652 out_sock_release_nosk
:
2656 EXPORT_SYMBOL(__netlink_kernel_create
);
2659 netlink_kernel_release(struct sock
*sk
)
2661 if (sk
== NULL
|| sk
->sk_socket
== NULL
)
2664 sock_release(sk
->sk_socket
);
2666 EXPORT_SYMBOL(netlink_kernel_release
);
2668 int __netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
2670 struct listeners
*new, *old
;
2671 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
2676 if (NLGRPSZ(tbl
->groups
) < NLGRPSZ(groups
)) {
2677 new = kzalloc(sizeof(*new) + NLGRPSZ(groups
), GFP_ATOMIC
);
2680 old
= nl_deref_protected(tbl
->listeners
);
2681 memcpy(new->masks
, old
->masks
, NLGRPSZ(tbl
->groups
));
2682 rcu_assign_pointer(tbl
->listeners
, new);
2684 kfree_rcu(old
, rcu
);
2686 tbl
->groups
= groups
;
2692 * netlink_change_ngroups - change number of multicast groups
2694 * This changes the number of multicast groups that are available
2695 * on a certain netlink family. Note that it is not possible to
2696 * change the number of groups to below 32. Also note that it does
2697 * not implicitly call netlink_clear_multicast_users() when the
2698 * number of groups is reduced.
2700 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2701 * @groups: The new number of groups.
2703 int netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
2707 netlink_table_grab();
2708 err
= __netlink_change_ngroups(sk
, groups
);
2709 netlink_table_ungrab();
2714 void __netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
2717 struct netlink_table
*tbl
= &nl_table
[ksk
->sk_protocol
];
2719 sk_for_each_bound(sk
, &tbl
->mc_list
)
2720 netlink_update_socket_mc(nlk_sk(sk
), group
, 0);
2724 __nlmsg_put(struct sk_buff
*skb
, u32 portid
, u32 seq
, int type
, int len
, int flags
)
2726 struct nlmsghdr
*nlh
;
2727 int size
= nlmsg_msg_size(len
);
2729 nlh
= (struct nlmsghdr
*)skb_put(skb
, NLMSG_ALIGN(size
));
2730 nlh
->nlmsg_type
= type
;
2731 nlh
->nlmsg_len
= size
;
2732 nlh
->nlmsg_flags
= flags
;
2733 nlh
->nlmsg_pid
= portid
;
2734 nlh
->nlmsg_seq
= seq
;
2735 if (!__builtin_constant_p(size
) || NLMSG_ALIGN(size
) - size
!= 0)
2736 memset(nlmsg_data(nlh
) + len
, 0, NLMSG_ALIGN(size
) - size
);
2739 EXPORT_SYMBOL(__nlmsg_put
);
2742 * It looks a bit ugly.
2743 * It would be better to create kernel thread.
2746 static int netlink_dump(struct sock
*sk
)
2748 struct netlink_sock
*nlk
= nlk_sk(sk
);
2749 struct netlink_callback
*cb
;
2750 struct sk_buff
*skb
= NULL
;
2751 struct nlmsghdr
*nlh
;
2752 int len
, err
= -ENOBUFS
;
2755 mutex_lock(nlk
->cb_mutex
);
2756 if (!nlk
->cb_running
) {
2762 alloc_size
= max_t(int, cb
->min_dump_alloc
, NLMSG_GOODSIZE
);
2764 if (!netlink_rx_is_mmaped(sk
) &&
2765 atomic_read(&sk
->sk_rmem_alloc
) >= sk
->sk_rcvbuf
)
2768 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2769 * required, but it makes sense to _attempt_ a 16K bytes allocation
2770 * to reduce number of system calls on dump operations, if user
2771 * ever provided a big enough buffer.
2773 if (alloc_size
< nlk
->max_recvmsg_len
) {
2774 skb
= netlink_alloc_skb(sk
,
2775 nlk
->max_recvmsg_len
,
2780 /* available room should be exact amount to avoid MSG_TRUNC */
2782 skb_reserve(skb
, skb_tailroom(skb
) -
2783 nlk
->max_recvmsg_len
);
2786 skb
= netlink_alloc_skb(sk
, alloc_size
, nlk
->portid
,
2790 netlink_skb_set_owner_r(skb
, sk
);
2792 len
= cb
->dump(skb
, cb
);
2795 mutex_unlock(nlk
->cb_mutex
);
2797 if (sk_filter(sk
, skb
))
2800 __netlink_sendskb(sk
, skb
);
2804 nlh
= nlmsg_put_answer(skb
, cb
, NLMSG_DONE
, sizeof(len
), NLM_F_MULTI
);
2808 nl_dump_check_consistent(cb
, nlh
);
2810 memcpy(nlmsg_data(nlh
), &len
, sizeof(len
));
2812 if (sk_filter(sk
, skb
))
2815 __netlink_sendskb(sk
, skb
);
2820 nlk
->cb_running
= false;
2821 mutex_unlock(nlk
->cb_mutex
);
2822 module_put(cb
->module
);
2823 consume_skb(cb
->skb
);
2827 mutex_unlock(nlk
->cb_mutex
);
2832 int __netlink_dump_start(struct sock
*ssk
, struct sk_buff
*skb
,
2833 const struct nlmsghdr
*nlh
,
2834 struct netlink_dump_control
*control
)
2836 struct netlink_callback
*cb
;
2838 struct netlink_sock
*nlk
;
2841 /* Memory mapped dump requests need to be copied to avoid looping
2842 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2843 * a reference to the skb.
2845 if (netlink_skb_is_mmaped(skb
)) {
2846 skb
= skb_copy(skb
, GFP_KERNEL
);
2850 atomic_inc(&skb
->users
);
2852 sk
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, NETLINK_CB(skb
).portid
);
2854 ret
= -ECONNREFUSED
;
2859 mutex_lock(nlk
->cb_mutex
);
2860 /* A dump is in progress... */
2861 if (nlk
->cb_running
) {
2865 /* add reference of module which cb->dump belongs to */
2866 if (!try_module_get(control
->module
)) {
2867 ret
= -EPROTONOSUPPORT
;
2872 memset(cb
, 0, sizeof(*cb
));
2873 cb
->dump
= control
->dump
;
2874 cb
->done
= control
->done
;
2876 cb
->data
= control
->data
;
2877 cb
->module
= control
->module
;
2878 cb
->min_dump_alloc
= control
->min_dump_alloc
;
2881 nlk
->cb_running
= true;
2883 mutex_unlock(nlk
->cb_mutex
);
2885 ret
= netlink_dump(sk
);
2891 /* We successfully started a dump, by returning -EINTR we
2892 * signal not to send ACK even if it was requested.
2898 mutex_unlock(nlk
->cb_mutex
);
2903 EXPORT_SYMBOL(__netlink_dump_start
);
2905 void netlink_ack(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, int err
)
2907 struct sk_buff
*skb
;
2908 struct nlmsghdr
*rep
;
2909 struct nlmsgerr
*errmsg
;
2910 size_t payload
= sizeof(*errmsg
);
2911 struct netlink_sock
*nlk
= nlk_sk(NETLINK_CB(in_skb
).sk
);
2913 /* Error messages get the original request appened, unless the user
2914 * requests to cap the error message.
2916 if (!(nlk
->flags
& NETLINK_F_CAP_ACK
) && err
)
2917 payload
+= nlmsg_len(nlh
);
2919 skb
= netlink_alloc_skb(in_skb
->sk
, nlmsg_total_size(payload
),
2920 NETLINK_CB(in_skb
).portid
, GFP_KERNEL
);
2924 sk
= netlink_lookup(sock_net(in_skb
->sk
),
2925 in_skb
->sk
->sk_protocol
,
2926 NETLINK_CB(in_skb
).portid
);
2928 sk
->sk_err
= ENOBUFS
;
2929 sk
->sk_error_report(sk
);
2935 rep
= __nlmsg_put(skb
, NETLINK_CB(in_skb
).portid
, nlh
->nlmsg_seq
,
2936 NLMSG_ERROR
, payload
, 0);
2937 errmsg
= nlmsg_data(rep
);
2938 errmsg
->error
= err
;
2939 memcpy(&errmsg
->msg
, nlh
, payload
> sizeof(*errmsg
) ? nlh
->nlmsg_len
: sizeof(*nlh
));
2940 netlink_unicast(in_skb
->sk
, skb
, NETLINK_CB(in_skb
).portid
, MSG_DONTWAIT
);
2942 EXPORT_SYMBOL(netlink_ack
);
2944 int netlink_rcv_skb(struct sk_buff
*skb
, int (*cb
)(struct sk_buff
*,
2947 struct nlmsghdr
*nlh
;
2950 while (skb
->len
>= nlmsg_total_size(0)) {
2953 nlh
= nlmsg_hdr(skb
);
2956 if (nlh
->nlmsg_len
< NLMSG_HDRLEN
|| skb
->len
< nlh
->nlmsg_len
)
2959 /* Only requests are handled by the kernel */
2960 if (!(nlh
->nlmsg_flags
& NLM_F_REQUEST
))
2963 /* Skip control messages */
2964 if (nlh
->nlmsg_type
< NLMSG_MIN_TYPE
)
2972 if (nlh
->nlmsg_flags
& NLM_F_ACK
|| err
)
2973 netlink_ack(skb
, nlh
, err
);
2976 msglen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
2977 if (msglen
> skb
->len
)
2979 skb_pull(skb
, msglen
);
2984 EXPORT_SYMBOL(netlink_rcv_skb
);
2987 * nlmsg_notify - send a notification netlink message
2988 * @sk: netlink socket to use
2989 * @skb: notification message
2990 * @portid: destination netlink portid for reports or 0
2991 * @group: destination multicast group or 0
2992 * @report: 1 to report back, 0 to disable
2993 * @flags: allocation flags
2995 int nlmsg_notify(struct sock
*sk
, struct sk_buff
*skb
, u32 portid
,
2996 unsigned int group
, int report
, gfp_t flags
)
3001 int exclude_portid
= 0;
3004 atomic_inc(&skb
->users
);
3005 exclude_portid
= portid
;
3008 /* errors reported via destination sk->sk_err, but propagate
3009 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
3010 err
= nlmsg_multicast(sk
, skb
, exclude_portid
, group
, flags
);
3016 err2
= nlmsg_unicast(sk
, skb
, portid
);
3017 if (!err
|| err
== -ESRCH
)
3023 EXPORT_SYMBOL(nlmsg_notify
);
3025 #ifdef CONFIG_PROC_FS
3026 struct nl_seq_iter
{
3027 struct seq_net_private p
;
3028 struct rhashtable_iter hti
;
3032 static int netlink_walk_start(struct nl_seq_iter
*iter
)
3036 err
= rhashtable_walk_init(&nl_table
[iter
->link
].hash
, &iter
->hti
);
3038 iter
->link
= MAX_LINKS
;
3042 err
= rhashtable_walk_start(&iter
->hti
);
3043 return err
== -EAGAIN
? 0 : err
;
3046 static void netlink_walk_stop(struct nl_seq_iter
*iter
)
3048 rhashtable_walk_stop(&iter
->hti
);
3049 rhashtable_walk_exit(&iter
->hti
);
3052 static void *__netlink_seq_next(struct seq_file
*seq
)
3054 struct nl_seq_iter
*iter
= seq
->private;
3055 struct netlink_sock
*nlk
;
3061 nlk
= rhashtable_walk_next(&iter
->hti
);
3064 if (PTR_ERR(nlk
) == -EAGAIN
)
3073 netlink_walk_stop(iter
);
3074 if (++iter
->link
>= MAX_LINKS
)
3077 err
= netlink_walk_start(iter
);
3079 return ERR_PTR(err
);
3081 } while (sock_net(&nlk
->sk
) != seq_file_net(seq
));
3086 static void *netlink_seq_start(struct seq_file
*seq
, loff_t
*posp
)
3088 struct nl_seq_iter
*iter
= seq
->private;
3089 void *obj
= SEQ_START_TOKEN
;
3095 err
= netlink_walk_start(iter
);
3097 return ERR_PTR(err
);
3099 for (pos
= *posp
; pos
&& obj
&& !IS_ERR(obj
); pos
--)
3100 obj
= __netlink_seq_next(seq
);
3105 static void *netlink_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3108 return __netlink_seq_next(seq
);
3111 static void netlink_seq_stop(struct seq_file
*seq
, void *v
)
3113 struct nl_seq_iter
*iter
= seq
->private;
3115 if (iter
->link
>= MAX_LINKS
)
3118 netlink_walk_stop(iter
);
3122 static int netlink_seq_show(struct seq_file
*seq
, void *v
)
3124 if (v
== SEQ_START_TOKEN
) {
3126 "sk Eth Pid Groups "
3127 "Rmem Wmem Dump Locks Drops Inode\n");
3130 struct netlink_sock
*nlk
= nlk_sk(s
);
3132 seq_printf(seq
, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3136 nlk
->groups
? (u32
)nlk
->groups
[0] : 0,
3137 sk_rmem_alloc_get(s
),
3138 sk_wmem_alloc_get(s
),
3140 atomic_read(&s
->sk_refcnt
),
3141 atomic_read(&s
->sk_drops
),
3149 static const struct seq_operations netlink_seq_ops
= {
3150 .start
= netlink_seq_start
,
3151 .next
= netlink_seq_next
,
3152 .stop
= netlink_seq_stop
,
3153 .show
= netlink_seq_show
,
3157 static int netlink_seq_open(struct inode
*inode
, struct file
*file
)
3159 return seq_open_net(inode
, file
, &netlink_seq_ops
,
3160 sizeof(struct nl_seq_iter
));
3163 static const struct file_operations netlink_seq_fops
= {
3164 .owner
= THIS_MODULE
,
3165 .open
= netlink_seq_open
,
3167 .llseek
= seq_lseek
,
3168 .release
= seq_release_net
,
3173 int netlink_register_notifier(struct notifier_block
*nb
)
3175 return atomic_notifier_chain_register(&netlink_chain
, nb
);
3177 EXPORT_SYMBOL(netlink_register_notifier
);
3179 int netlink_unregister_notifier(struct notifier_block
*nb
)
3181 return atomic_notifier_chain_unregister(&netlink_chain
, nb
);
3183 EXPORT_SYMBOL(netlink_unregister_notifier
);
3185 static const struct proto_ops netlink_ops
= {
3186 .family
= PF_NETLINK
,
3187 .owner
= THIS_MODULE
,
3188 .release
= netlink_release
,
3189 .bind
= netlink_bind
,
3190 .connect
= netlink_connect
,
3191 .socketpair
= sock_no_socketpair
,
3192 .accept
= sock_no_accept
,
3193 .getname
= netlink_getname
,
3194 .poll
= netlink_poll
,
3195 .ioctl
= sock_no_ioctl
,
3196 .listen
= sock_no_listen
,
3197 .shutdown
= sock_no_shutdown
,
3198 .setsockopt
= netlink_setsockopt
,
3199 .getsockopt
= netlink_getsockopt
,
3200 .sendmsg
= netlink_sendmsg
,
3201 .recvmsg
= netlink_recvmsg
,
3202 .mmap
= netlink_mmap
,
3203 .sendpage
= sock_no_sendpage
,
3206 static const struct net_proto_family netlink_family_ops
= {
3207 .family
= PF_NETLINK
,
3208 .create
= netlink_create
,
3209 .owner
= THIS_MODULE
, /* for consistency 8) */
3212 static int __net_init
netlink_net_init(struct net
*net
)
3214 #ifdef CONFIG_PROC_FS
3215 if (!proc_create("netlink", 0, net
->proc_net
, &netlink_seq_fops
))
3221 static void __net_exit
netlink_net_exit(struct net
*net
)
3223 #ifdef CONFIG_PROC_FS
3224 remove_proc_entry("netlink", net
->proc_net
);
3228 static void __init
netlink_add_usersock_entry(void)
3230 struct listeners
*listeners
;
3233 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
3235 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3237 netlink_table_grab();
3239 nl_table
[NETLINK_USERSOCK
].groups
= groups
;
3240 rcu_assign_pointer(nl_table
[NETLINK_USERSOCK
].listeners
, listeners
);
3241 nl_table
[NETLINK_USERSOCK
].module
= THIS_MODULE
;
3242 nl_table
[NETLINK_USERSOCK
].registered
= 1;
3243 nl_table
[NETLINK_USERSOCK
].flags
= NL_CFG_F_NONROOT_SEND
;
3245 netlink_table_ungrab();
3248 static struct pernet_operations __net_initdata netlink_net_ops
= {
3249 .init
= netlink_net_init
,
3250 .exit
= netlink_net_exit
,
3253 static inline u32
netlink_hash(const void *data
, u32 len
, u32 seed
)
3255 const struct netlink_sock
*nlk
= data
;
3256 struct netlink_compare_arg arg
;
3258 netlink_compare_arg_init(&arg
, sock_net(&nlk
->sk
), nlk
->portid
);
3259 return jhash2((u32
*)&arg
, netlink_compare_arg_len
/ sizeof(u32
), seed
);
3262 static const struct rhashtable_params netlink_rhashtable_params
= {
3263 .head_offset
= offsetof(struct netlink_sock
, node
),
3264 .key_len
= netlink_compare_arg_len
,
3265 .obj_hashfn
= netlink_hash
,
3266 .obj_cmpfn
= netlink_compare
,
3267 .automatic_shrinking
= true,
3270 static int __init
netlink_proto_init(void)
3273 int err
= proto_register(&netlink_proto
, 0);
3278 BUILD_BUG_ON(sizeof(struct netlink_skb_parms
) > FIELD_SIZEOF(struct sk_buff
, cb
));
3280 nl_table
= kcalloc(MAX_LINKS
, sizeof(*nl_table
), GFP_KERNEL
);
3284 for (i
= 0; i
< MAX_LINKS
; i
++) {
3285 if (rhashtable_init(&nl_table
[i
].hash
,
3286 &netlink_rhashtable_params
) < 0) {
3288 rhashtable_destroy(&nl_table
[i
].hash
);
3294 INIT_LIST_HEAD(&netlink_tap_all
);
3296 netlink_add_usersock_entry();
3298 sock_register(&netlink_family_ops
);
3299 register_pernet_subsys(&netlink_net_ops
);
3300 /* The netlink device handler may be needed early. */
3305 panic("netlink_init: Cannot allocate nl_table\n");
3308 core_initcall(netlink_proto_init
);