2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <linux/rhashtable.h>
62 #include <asm/cacheflush.h>
63 #include <linux/hash.h>
65 #include <net/net_namespace.h>
68 #include <net/netlink.h>
70 #include "af_netlink.h"
74 unsigned long masks
[0];
78 #define NETLINK_CONGESTED 0x0
81 #define NETLINK_KERNEL_SOCKET 0x1
82 #define NETLINK_RECV_PKTINFO 0x2
83 #define NETLINK_BROADCAST_SEND_ERROR 0x4
84 #define NETLINK_RECV_NO_ENOBUFS 0x8
86 static inline int netlink_is_kernel(struct sock
*sk
)
88 return nlk_sk(sk
)->flags
& NETLINK_KERNEL_SOCKET
;
91 struct netlink_table
*nl_table
;
92 EXPORT_SYMBOL_GPL(nl_table
);
94 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait
);
96 static int netlink_dump(struct sock
*sk
);
97 static void netlink_skb_destructor(struct sk_buff
*skb
);
99 /* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
107 DEFINE_RWLOCK(nl_table_lock
);
108 EXPORT_SYMBOL_GPL(nl_table_lock
);
109 static atomic_t nl_table_users
= ATOMIC_INIT(0);
111 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
113 /* Protects netlink socket hash table mutations */
114 DEFINE_MUTEX(nl_sk_hash_lock
);
115 EXPORT_SYMBOL_GPL(nl_sk_hash_lock
);
117 #ifdef CONFIG_PROVE_LOCKING
118 static int lockdep_nl_sk_hash_is_held(void *parent
)
121 return lockdep_is_held(&nl_sk_hash_lock
) || lockdep_is_held(&nl_table_lock
);
126 static ATOMIC_NOTIFIER_HEAD(netlink_chain
);
128 static DEFINE_SPINLOCK(netlink_tap_lock
);
129 static struct list_head netlink_tap_all __read_mostly
;
131 static inline u32
netlink_group_mask(u32 group
)
133 return group
? 1 << (group
- 1) : 0;
136 int netlink_add_tap(struct netlink_tap
*nt
)
138 if (unlikely(nt
->dev
->type
!= ARPHRD_NETLINK
))
141 spin_lock(&netlink_tap_lock
);
142 list_add_rcu(&nt
->list
, &netlink_tap_all
);
143 spin_unlock(&netlink_tap_lock
);
145 __module_get(nt
->module
);
149 EXPORT_SYMBOL_GPL(netlink_add_tap
);
151 static int __netlink_remove_tap(struct netlink_tap
*nt
)
154 struct netlink_tap
*tmp
;
156 spin_lock(&netlink_tap_lock
);
158 list_for_each_entry(tmp
, &netlink_tap_all
, list
) {
160 list_del_rcu(&nt
->list
);
166 pr_warn("__netlink_remove_tap: %p not found\n", nt
);
168 spin_unlock(&netlink_tap_lock
);
170 if (found
&& nt
->module
)
171 module_put(nt
->module
);
173 return found
? 0 : -ENODEV
;
176 int netlink_remove_tap(struct netlink_tap
*nt
)
180 ret
= __netlink_remove_tap(nt
);
185 EXPORT_SYMBOL_GPL(netlink_remove_tap
);
187 static bool netlink_filter_tap(const struct sk_buff
*skb
)
189 struct sock
*sk
= skb
->sk
;
191 /* We take the more conservative approach and
192 * whitelist socket protocols that may pass.
194 switch (sk
->sk_protocol
) {
196 case NETLINK_USERSOCK
:
197 case NETLINK_SOCK_DIAG
:
200 case NETLINK_FIB_LOOKUP
:
201 case NETLINK_NETFILTER
:
202 case NETLINK_GENERIC
:
209 static int __netlink_deliver_tap_skb(struct sk_buff
*skb
,
210 struct net_device
*dev
)
212 struct sk_buff
*nskb
;
213 struct sock
*sk
= skb
->sk
;
217 nskb
= skb_clone(skb
, GFP_ATOMIC
);
220 nskb
->protocol
= htons((u16
) sk
->sk_protocol
);
221 nskb
->pkt_type
= netlink_is_kernel(sk
) ?
222 PACKET_KERNEL
: PACKET_USER
;
223 skb_reset_network_header(nskb
);
224 ret
= dev_queue_xmit(nskb
);
225 if (unlikely(ret
> 0))
226 ret
= net_xmit_errno(ret
);
233 static void __netlink_deliver_tap(struct sk_buff
*skb
)
236 struct netlink_tap
*tmp
;
238 if (!netlink_filter_tap(skb
))
241 list_for_each_entry_rcu(tmp
, &netlink_tap_all
, list
) {
242 ret
= __netlink_deliver_tap_skb(skb
, tmp
->dev
);
248 static void netlink_deliver_tap(struct sk_buff
*skb
)
252 if (unlikely(!list_empty(&netlink_tap_all
)))
253 __netlink_deliver_tap(skb
);
258 static void netlink_deliver_tap_kernel(struct sock
*dst
, struct sock
*src
,
261 if (!(netlink_is_kernel(dst
) && netlink_is_kernel(src
)))
262 netlink_deliver_tap(skb
);
265 static void netlink_overrun(struct sock
*sk
)
267 struct netlink_sock
*nlk
= nlk_sk(sk
);
269 if (!(nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
)) {
270 if (!test_and_set_bit(NETLINK_CONGESTED
, &nlk_sk(sk
)->state
)) {
271 sk
->sk_err
= ENOBUFS
;
272 sk
->sk_error_report(sk
);
275 atomic_inc(&sk
->sk_drops
);
278 static void netlink_rcv_wake(struct sock
*sk
)
280 struct netlink_sock
*nlk
= nlk_sk(sk
);
282 if (skb_queue_empty(&sk
->sk_receive_queue
))
283 clear_bit(NETLINK_CONGESTED
, &nlk
->state
);
284 if (!test_bit(NETLINK_CONGESTED
, &nlk
->state
))
285 wake_up_interruptible(&nlk
->wait
);
288 #ifdef CONFIG_NETLINK_MMAP
289 static bool netlink_skb_is_mmaped(const struct sk_buff
*skb
)
291 return NETLINK_CB(skb
).flags
& NETLINK_SKB_MMAPED
;
294 static bool netlink_rx_is_mmaped(struct sock
*sk
)
296 return nlk_sk(sk
)->rx_ring
.pg_vec
!= NULL
;
299 static bool netlink_tx_is_mmaped(struct sock
*sk
)
301 return nlk_sk(sk
)->tx_ring
.pg_vec
!= NULL
;
304 static __pure
struct page
*pgvec_to_page(const void *addr
)
306 if (is_vmalloc_addr(addr
))
307 return vmalloc_to_page(addr
);
309 return virt_to_page(addr
);
312 static void free_pg_vec(void **pg_vec
, unsigned int order
, unsigned int len
)
316 for (i
= 0; i
< len
; i
++) {
317 if (pg_vec
[i
] != NULL
) {
318 if (is_vmalloc_addr(pg_vec
[i
]))
321 free_pages((unsigned long)pg_vec
[i
], order
);
327 static void *alloc_one_pg_vec_page(unsigned long order
)
330 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_COMP
| __GFP_ZERO
|
331 __GFP_NOWARN
| __GFP_NORETRY
;
333 buffer
= (void *)__get_free_pages(gfp_flags
, order
);
337 buffer
= vzalloc((1 << order
) * PAGE_SIZE
);
341 gfp_flags
&= ~__GFP_NORETRY
;
342 return (void *)__get_free_pages(gfp_flags
, order
);
345 static void **alloc_pg_vec(struct netlink_sock
*nlk
,
346 struct nl_mmap_req
*req
, unsigned int order
)
348 unsigned int block_nr
= req
->nm_block_nr
;
352 pg_vec
= kcalloc(block_nr
, sizeof(void *), GFP_KERNEL
);
356 for (i
= 0; i
< block_nr
; i
++) {
357 pg_vec
[i
] = alloc_one_pg_vec_page(order
);
358 if (pg_vec
[i
] == NULL
)
364 free_pg_vec(pg_vec
, order
, block_nr
);
368 static int netlink_set_ring(struct sock
*sk
, struct nl_mmap_req
*req
,
369 bool closing
, bool tx_ring
)
371 struct netlink_sock
*nlk
= nlk_sk(sk
);
372 struct netlink_ring
*ring
;
373 struct sk_buff_head
*queue
;
374 void **pg_vec
= NULL
;
375 unsigned int order
= 0;
378 ring
= tx_ring
? &nlk
->tx_ring
: &nlk
->rx_ring
;
379 queue
= tx_ring
? &sk
->sk_write_queue
: &sk
->sk_receive_queue
;
382 if (atomic_read(&nlk
->mapped
))
384 if (atomic_read(&ring
->pending
))
388 if (req
->nm_block_nr
) {
389 if (ring
->pg_vec
!= NULL
)
392 if ((int)req
->nm_block_size
<= 0)
394 if (!PAGE_ALIGNED(req
->nm_block_size
))
396 if (req
->nm_frame_size
< NL_MMAP_HDRLEN
)
398 if (!IS_ALIGNED(req
->nm_frame_size
, NL_MMAP_MSG_ALIGNMENT
))
401 ring
->frames_per_block
= req
->nm_block_size
/
403 if (ring
->frames_per_block
== 0)
405 if (ring
->frames_per_block
* req
->nm_block_nr
!=
409 order
= get_order(req
->nm_block_size
);
410 pg_vec
= alloc_pg_vec(nlk
, req
, order
);
414 if (req
->nm_frame_nr
)
419 mutex_lock(&nlk
->pg_vec_lock
);
420 if (closing
|| atomic_read(&nlk
->mapped
) == 0) {
422 spin_lock_bh(&queue
->lock
);
424 ring
->frame_max
= req
->nm_frame_nr
- 1;
426 ring
->frame_size
= req
->nm_frame_size
;
427 ring
->pg_vec_pages
= req
->nm_block_size
/ PAGE_SIZE
;
429 swap(ring
->pg_vec_len
, req
->nm_block_nr
);
430 swap(ring
->pg_vec_order
, order
);
431 swap(ring
->pg_vec
, pg_vec
);
433 __skb_queue_purge(queue
);
434 spin_unlock_bh(&queue
->lock
);
436 WARN_ON(atomic_read(&nlk
->mapped
));
438 mutex_unlock(&nlk
->pg_vec_lock
);
441 free_pg_vec(pg_vec
, order
, req
->nm_block_nr
);
445 static void netlink_mm_open(struct vm_area_struct
*vma
)
447 struct file
*file
= vma
->vm_file
;
448 struct socket
*sock
= file
->private_data
;
449 struct sock
*sk
= sock
->sk
;
452 atomic_inc(&nlk_sk(sk
)->mapped
);
455 static void netlink_mm_close(struct vm_area_struct
*vma
)
457 struct file
*file
= vma
->vm_file
;
458 struct socket
*sock
= file
->private_data
;
459 struct sock
*sk
= sock
->sk
;
462 atomic_dec(&nlk_sk(sk
)->mapped
);
465 static const struct vm_operations_struct netlink_mmap_ops
= {
466 .open
= netlink_mm_open
,
467 .close
= netlink_mm_close
,
470 static int netlink_mmap(struct file
*file
, struct socket
*sock
,
471 struct vm_area_struct
*vma
)
473 struct sock
*sk
= sock
->sk
;
474 struct netlink_sock
*nlk
= nlk_sk(sk
);
475 struct netlink_ring
*ring
;
476 unsigned long start
, size
, expected
;
483 mutex_lock(&nlk
->pg_vec_lock
);
486 for (ring
= &nlk
->rx_ring
; ring
<= &nlk
->tx_ring
; ring
++) {
487 if (ring
->pg_vec
== NULL
)
489 expected
+= ring
->pg_vec_len
* ring
->pg_vec_pages
* PAGE_SIZE
;
495 size
= vma
->vm_end
- vma
->vm_start
;
496 if (size
!= expected
)
499 start
= vma
->vm_start
;
500 for (ring
= &nlk
->rx_ring
; ring
<= &nlk
->tx_ring
; ring
++) {
501 if (ring
->pg_vec
== NULL
)
504 for (i
= 0; i
< ring
->pg_vec_len
; i
++) {
506 void *kaddr
= ring
->pg_vec
[i
];
509 for (pg_num
= 0; pg_num
< ring
->pg_vec_pages
; pg_num
++) {
510 page
= pgvec_to_page(kaddr
);
511 err
= vm_insert_page(vma
, start
, page
);
520 atomic_inc(&nlk
->mapped
);
521 vma
->vm_ops
= &netlink_mmap_ops
;
524 mutex_unlock(&nlk
->pg_vec_lock
);
528 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr
*hdr
)
530 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
531 struct page
*p_start
, *p_end
;
533 /* First page is flushed through netlink_{get,set}_status */
534 p_start
= pgvec_to_page(hdr
+ PAGE_SIZE
);
535 p_end
= pgvec_to_page((void *)hdr
+ NL_MMAP_HDRLEN
+ hdr
->nm_len
- 1);
536 while (p_start
<= p_end
) {
537 flush_dcache_page(p_start
);
543 static enum nl_mmap_status
netlink_get_status(const struct nl_mmap_hdr
*hdr
)
546 flush_dcache_page(pgvec_to_page(hdr
));
547 return hdr
->nm_status
;
550 static void netlink_set_status(struct nl_mmap_hdr
*hdr
,
551 enum nl_mmap_status status
)
553 hdr
->nm_status
= status
;
554 flush_dcache_page(pgvec_to_page(hdr
));
558 static struct nl_mmap_hdr
*
559 __netlink_lookup_frame(const struct netlink_ring
*ring
, unsigned int pos
)
561 unsigned int pg_vec_pos
, frame_off
;
563 pg_vec_pos
= pos
/ ring
->frames_per_block
;
564 frame_off
= pos
% ring
->frames_per_block
;
566 return ring
->pg_vec
[pg_vec_pos
] + (frame_off
* ring
->frame_size
);
569 static struct nl_mmap_hdr
*
570 netlink_lookup_frame(const struct netlink_ring
*ring
, unsigned int pos
,
571 enum nl_mmap_status status
)
573 struct nl_mmap_hdr
*hdr
;
575 hdr
= __netlink_lookup_frame(ring
, pos
);
576 if (netlink_get_status(hdr
) != status
)
582 static struct nl_mmap_hdr
*
583 netlink_current_frame(const struct netlink_ring
*ring
,
584 enum nl_mmap_status status
)
586 return netlink_lookup_frame(ring
, ring
->head
, status
);
589 static struct nl_mmap_hdr
*
590 netlink_previous_frame(const struct netlink_ring
*ring
,
591 enum nl_mmap_status status
)
595 prev
= ring
->head
? ring
->head
- 1 : ring
->frame_max
;
596 return netlink_lookup_frame(ring
, prev
, status
);
599 static void netlink_increment_head(struct netlink_ring
*ring
)
601 ring
->head
= ring
->head
!= ring
->frame_max
? ring
->head
+ 1 : 0;
604 static void netlink_forward_ring(struct netlink_ring
*ring
)
606 unsigned int head
= ring
->head
, pos
= head
;
607 const struct nl_mmap_hdr
*hdr
;
610 hdr
= __netlink_lookup_frame(ring
, pos
);
611 if (hdr
->nm_status
== NL_MMAP_STATUS_UNUSED
)
613 if (hdr
->nm_status
!= NL_MMAP_STATUS_SKIP
)
615 netlink_increment_head(ring
);
616 } while (ring
->head
!= head
);
619 static bool netlink_dump_space(struct netlink_sock
*nlk
)
621 struct netlink_ring
*ring
= &nlk
->rx_ring
;
622 struct nl_mmap_hdr
*hdr
;
625 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
629 n
= ring
->head
+ ring
->frame_max
/ 2;
630 if (n
> ring
->frame_max
)
631 n
-= ring
->frame_max
;
633 hdr
= __netlink_lookup_frame(ring
, n
);
635 return hdr
->nm_status
== NL_MMAP_STATUS_UNUSED
;
638 static unsigned int netlink_poll(struct file
*file
, struct socket
*sock
,
641 struct sock
*sk
= sock
->sk
;
642 struct netlink_sock
*nlk
= nlk_sk(sk
);
646 if (nlk
->rx_ring
.pg_vec
!= NULL
) {
647 /* Memory mapped sockets don't call recvmsg(), so flow control
648 * for dumps is performed here. A dump is allowed to continue
649 * if at least half the ring is unused.
651 while (nlk
->cb_running
&& netlink_dump_space(nlk
)) {
652 err
= netlink_dump(sk
);
655 sk
->sk_error_report(sk
);
659 netlink_rcv_wake(sk
);
662 mask
= datagram_poll(file
, sock
, wait
);
664 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
665 if (nlk
->rx_ring
.pg_vec
) {
666 netlink_forward_ring(&nlk
->rx_ring
);
667 if (!netlink_previous_frame(&nlk
->rx_ring
, NL_MMAP_STATUS_UNUSED
))
668 mask
|= POLLIN
| POLLRDNORM
;
670 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
672 spin_lock_bh(&sk
->sk_write_queue
.lock
);
673 if (nlk
->tx_ring
.pg_vec
) {
674 if (netlink_current_frame(&nlk
->tx_ring
, NL_MMAP_STATUS_UNUSED
))
675 mask
|= POLLOUT
| POLLWRNORM
;
677 spin_unlock_bh(&sk
->sk_write_queue
.lock
);
682 static struct nl_mmap_hdr
*netlink_mmap_hdr(struct sk_buff
*skb
)
684 return (struct nl_mmap_hdr
*)(skb
->head
- NL_MMAP_HDRLEN
);
687 static void netlink_ring_setup_skb(struct sk_buff
*skb
, struct sock
*sk
,
688 struct netlink_ring
*ring
,
689 struct nl_mmap_hdr
*hdr
)
694 size
= ring
->frame_size
- NL_MMAP_HDRLEN
;
695 data
= (void *)hdr
+ NL_MMAP_HDRLEN
;
699 skb_reset_tail_pointer(skb
);
700 skb
->end
= skb
->tail
+ size
;
703 skb
->destructor
= netlink_skb_destructor
;
704 NETLINK_CB(skb
).flags
|= NETLINK_SKB_MMAPED
;
705 NETLINK_CB(skb
).sk
= sk
;
708 static int netlink_mmap_sendmsg(struct sock
*sk
, struct msghdr
*msg
,
709 u32 dst_portid
, u32 dst_group
,
710 struct sock_iocb
*siocb
)
712 struct netlink_sock
*nlk
= nlk_sk(sk
);
713 struct netlink_ring
*ring
;
714 struct nl_mmap_hdr
*hdr
;
718 int err
= 0, len
= 0;
720 /* Netlink messages are validated by the receiver before processing.
721 * In order to avoid userspace changing the contents of the message
722 * after validation, the socket and the ring may only be used by a
723 * single process, otherwise we fall back to copying.
725 if (atomic_long_read(&sk
->sk_socket
->file
->f_count
) > 1 ||
726 atomic_read(&nlk
->mapped
) > 1)
729 mutex_lock(&nlk
->pg_vec_lock
);
731 ring
= &nlk
->tx_ring
;
732 maxlen
= ring
->frame_size
- NL_MMAP_HDRLEN
;
735 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_VALID
);
737 if (!(msg
->msg_flags
& MSG_DONTWAIT
) &&
738 atomic_read(&nlk
->tx_ring
.pending
))
742 if (hdr
->nm_len
> maxlen
) {
747 netlink_frame_flush_dcache(hdr
);
749 if (likely(dst_portid
== 0 && dst_group
== 0 && excl
)) {
750 skb
= alloc_skb_head(GFP_KERNEL
);
756 netlink_ring_setup_skb(skb
, sk
, ring
, hdr
);
757 NETLINK_CB(skb
).flags
|= NETLINK_SKB_TX
;
758 __skb_put(skb
, hdr
->nm_len
);
759 netlink_set_status(hdr
, NL_MMAP_STATUS_RESERVED
);
760 atomic_inc(&ring
->pending
);
762 skb
= alloc_skb(hdr
->nm_len
, GFP_KERNEL
);
767 __skb_put(skb
, hdr
->nm_len
);
768 memcpy(skb
->data
, (void *)hdr
+ NL_MMAP_HDRLEN
, hdr
->nm_len
);
769 netlink_set_status(hdr
, NL_MMAP_STATUS_UNUSED
);
772 netlink_increment_head(ring
);
774 NETLINK_CB(skb
).portid
= nlk
->portid
;
775 NETLINK_CB(skb
).dst_group
= dst_group
;
776 NETLINK_CB(skb
).creds
= siocb
->scm
->creds
;
778 err
= security_netlink_send(sk
, skb
);
784 if (unlikely(dst_group
)) {
785 atomic_inc(&skb
->users
);
786 netlink_broadcast(sk
, skb
, dst_portid
, dst_group
,
789 err
= netlink_unicast(sk
, skb
, dst_portid
,
790 msg
->msg_flags
& MSG_DONTWAIT
);
795 } while (hdr
!= NULL
||
796 (!(msg
->msg_flags
& MSG_DONTWAIT
) &&
797 atomic_read(&nlk
->tx_ring
.pending
)));
802 mutex_unlock(&nlk
->pg_vec_lock
);
806 static void netlink_queue_mmaped_skb(struct sock
*sk
, struct sk_buff
*skb
)
808 struct nl_mmap_hdr
*hdr
;
810 hdr
= netlink_mmap_hdr(skb
);
811 hdr
->nm_len
= skb
->len
;
812 hdr
->nm_group
= NETLINK_CB(skb
).dst_group
;
813 hdr
->nm_pid
= NETLINK_CB(skb
).creds
.pid
;
814 hdr
->nm_uid
= from_kuid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.uid
);
815 hdr
->nm_gid
= from_kgid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.gid
);
816 netlink_frame_flush_dcache(hdr
);
817 netlink_set_status(hdr
, NL_MMAP_STATUS_VALID
);
819 NETLINK_CB(skb
).flags
|= NETLINK_SKB_DELIVERED
;
823 static void netlink_ring_set_copied(struct sock
*sk
, struct sk_buff
*skb
)
825 struct netlink_sock
*nlk
= nlk_sk(sk
);
826 struct netlink_ring
*ring
= &nlk
->rx_ring
;
827 struct nl_mmap_hdr
*hdr
;
829 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
830 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
832 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
837 netlink_increment_head(ring
);
838 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
839 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
841 hdr
->nm_len
= skb
->len
;
842 hdr
->nm_group
= NETLINK_CB(skb
).dst_group
;
843 hdr
->nm_pid
= NETLINK_CB(skb
).creds
.pid
;
844 hdr
->nm_uid
= from_kuid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.uid
);
845 hdr
->nm_gid
= from_kgid(sk_user_ns(sk
), NETLINK_CB(skb
).creds
.gid
);
846 netlink_set_status(hdr
, NL_MMAP_STATUS_COPY
);
849 #else /* CONFIG_NETLINK_MMAP */
850 #define netlink_skb_is_mmaped(skb) false
851 #define netlink_rx_is_mmaped(sk) false
852 #define netlink_tx_is_mmaped(sk) false
853 #define netlink_mmap sock_no_mmap
854 #define netlink_poll datagram_poll
855 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
856 #endif /* CONFIG_NETLINK_MMAP */
858 static void netlink_skb_destructor(struct sk_buff
*skb
)
860 #ifdef CONFIG_NETLINK_MMAP
861 struct nl_mmap_hdr
*hdr
;
862 struct netlink_ring
*ring
;
865 /* If a packet from the kernel to userspace was freed because of an
866 * error without being delivered to userspace, the kernel must reset
867 * the status. In the direction userspace to kernel, the status is
868 * always reset here after the packet was processed and freed.
870 if (netlink_skb_is_mmaped(skb
)) {
871 hdr
= netlink_mmap_hdr(skb
);
872 sk
= NETLINK_CB(skb
).sk
;
874 if (NETLINK_CB(skb
).flags
& NETLINK_SKB_TX
) {
875 netlink_set_status(hdr
, NL_MMAP_STATUS_UNUSED
);
876 ring
= &nlk_sk(sk
)->tx_ring
;
878 if (!(NETLINK_CB(skb
).flags
& NETLINK_SKB_DELIVERED
)) {
880 netlink_set_status(hdr
, NL_MMAP_STATUS_VALID
);
882 ring
= &nlk_sk(sk
)->rx_ring
;
885 WARN_ON(atomic_read(&ring
->pending
) == 0);
886 atomic_dec(&ring
->pending
);
892 if (is_vmalloc_addr(skb
->head
)) {
894 !atomic_dec_return(&(skb_shinfo(skb
)->dataref
)))
903 static void netlink_skb_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
905 WARN_ON(skb
->sk
!= NULL
);
907 skb
->destructor
= netlink_skb_destructor
;
908 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
909 sk_mem_charge(sk
, skb
->truesize
);
912 static void netlink_sock_destruct(struct sock
*sk
)
914 struct netlink_sock
*nlk
= nlk_sk(sk
);
916 if (nlk
->cb_running
) {
918 nlk
->cb
.done(&nlk
->cb
);
920 module_put(nlk
->cb
.module
);
921 kfree_skb(nlk
->cb
.skb
);
924 skb_queue_purge(&sk
->sk_receive_queue
);
925 #ifdef CONFIG_NETLINK_MMAP
927 struct nl_mmap_req req
;
929 memset(&req
, 0, sizeof(req
));
930 if (nlk
->rx_ring
.pg_vec
)
931 netlink_set_ring(sk
, &req
, true, false);
932 memset(&req
, 0, sizeof(req
));
933 if (nlk
->tx_ring
.pg_vec
)
934 netlink_set_ring(sk
, &req
, true, true);
936 #endif /* CONFIG_NETLINK_MMAP */
938 if (!sock_flag(sk
, SOCK_DEAD
)) {
939 printk(KERN_ERR
"Freeing alive netlink socket %p\n", sk
);
943 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
944 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
945 WARN_ON(nlk_sk(sk
)->groups
);
948 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
949 * SMP. Look, when several writers sleep and reader wakes them up, all but one
950 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
951 * this, _but_ remember, it adds useless work on UP machines.
954 void netlink_table_grab(void)
955 __acquires(nl_table_lock
)
959 write_lock_irq(&nl_table_lock
);
961 if (atomic_read(&nl_table_users
)) {
962 DECLARE_WAITQUEUE(wait
, current
);
964 add_wait_queue_exclusive(&nl_table_wait
, &wait
);
966 set_current_state(TASK_UNINTERRUPTIBLE
);
967 if (atomic_read(&nl_table_users
) == 0)
969 write_unlock_irq(&nl_table_lock
);
971 write_lock_irq(&nl_table_lock
);
974 __set_current_state(TASK_RUNNING
);
975 remove_wait_queue(&nl_table_wait
, &wait
);
979 void netlink_table_ungrab(void)
980 __releases(nl_table_lock
)
982 write_unlock_irq(&nl_table_lock
);
983 wake_up(&nl_table_wait
);
987 netlink_lock_table(void)
989 /* read_lock() synchronizes us to netlink_table_grab */
991 read_lock(&nl_table_lock
);
992 atomic_inc(&nl_table_users
);
993 read_unlock(&nl_table_lock
);
997 netlink_unlock_table(void)
999 if (atomic_dec_and_test(&nl_table_users
))
1000 wake_up(&nl_table_wait
);
1003 struct netlink_compare_arg
1009 static bool netlink_compare(void *ptr
, void *arg
)
1011 struct netlink_compare_arg
*x
= arg
;
1012 struct sock
*sk
= ptr
;
1014 return nlk_sk(sk
)->portid
== x
->portid
&&
1015 net_eq(sock_net(sk
), x
->net
);
1018 static struct sock
*__netlink_lookup(struct netlink_table
*table
, u32 portid
,
1021 struct netlink_compare_arg arg
= {
1027 hash
= rhashtable_hashfn(&table
->hash
, &portid
, sizeof(portid
));
1029 return rhashtable_lookup_compare(&table
->hash
, hash
,
1030 &netlink_compare
, &arg
);
1033 static struct sock
*netlink_lookup(struct net
*net
, int protocol
, u32 portid
)
1035 struct netlink_table
*table
= &nl_table
[protocol
];
1038 read_lock(&nl_table_lock
);
1040 sk
= __netlink_lookup(table
, portid
, net
);
1044 read_unlock(&nl_table_lock
);
1049 static const struct proto_ops netlink_ops
;
1052 netlink_update_listeners(struct sock
*sk
)
1054 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
1057 struct listeners
*listeners
;
1059 listeners
= nl_deref_protected(tbl
->listeners
);
1063 for (i
= 0; i
< NLGRPLONGS(tbl
->groups
); i
++) {
1065 sk_for_each_bound(sk
, &tbl
->mc_list
) {
1066 if (i
< NLGRPLONGS(nlk_sk(sk
)->ngroups
))
1067 mask
|= nlk_sk(sk
)->groups
[i
];
1069 listeners
->masks
[i
] = mask
;
1071 /* this function is only called with the netlink table "grabbed", which
1072 * makes sure updates are visible before bind or setsockopt return. */
1075 static int netlink_insert(struct sock
*sk
, struct net
*net
, u32 portid
)
1077 struct netlink_table
*table
= &nl_table
[sk
->sk_protocol
];
1078 int err
= -EADDRINUSE
;
1080 mutex_lock(&nl_sk_hash_lock
);
1081 if (__netlink_lookup(table
, portid
, net
))
1085 if (nlk_sk(sk
)->portid
)
1089 if (BITS_PER_LONG
> 32 && unlikely(table
->hash
.nelems
>= UINT_MAX
))
1092 nlk_sk(sk
)->portid
= portid
;
1094 rhashtable_insert(&table
->hash
, &nlk_sk(sk
)->node
);
1097 mutex_unlock(&nl_sk_hash_lock
);
1101 static void netlink_remove(struct sock
*sk
)
1103 struct netlink_table
*table
;
1105 mutex_lock(&nl_sk_hash_lock
);
1106 table
= &nl_table
[sk
->sk_protocol
];
1107 if (rhashtable_remove(&table
->hash
, &nlk_sk(sk
)->node
)) {
1108 WARN_ON(atomic_read(&sk
->sk_refcnt
) == 1);
1111 mutex_unlock(&nl_sk_hash_lock
);
1113 netlink_table_grab();
1114 if (nlk_sk(sk
)->subscriptions
)
1115 __sk_del_bind_node(sk
);
1116 netlink_table_ungrab();
1119 static struct proto netlink_proto
= {
1121 .owner
= THIS_MODULE
,
1122 .obj_size
= sizeof(struct netlink_sock
),
1125 static int __netlink_create(struct net
*net
, struct socket
*sock
,
1126 struct mutex
*cb_mutex
, int protocol
)
1129 struct netlink_sock
*nlk
;
1131 sock
->ops
= &netlink_ops
;
1133 sk
= sk_alloc(net
, PF_NETLINK
, GFP_KERNEL
, &netlink_proto
);
1137 sock_init_data(sock
, sk
);
1141 nlk
->cb_mutex
= cb_mutex
;
1143 nlk
->cb_mutex
= &nlk
->cb_def_mutex
;
1144 mutex_init(nlk
->cb_mutex
);
1146 init_waitqueue_head(&nlk
->wait
);
1147 #ifdef CONFIG_NETLINK_MMAP
1148 mutex_init(&nlk
->pg_vec_lock
);
1151 sk
->sk_destruct
= netlink_sock_destruct
;
1152 sk
->sk_protocol
= protocol
;
1156 static int netlink_create(struct net
*net
, struct socket
*sock
, int protocol
,
1159 struct module
*module
= NULL
;
1160 struct mutex
*cb_mutex
;
1161 struct netlink_sock
*nlk
;
1162 int (*bind
)(int group
);
1163 void (*unbind
)(int group
);
1166 sock
->state
= SS_UNCONNECTED
;
1168 if (sock
->type
!= SOCK_RAW
&& sock
->type
!= SOCK_DGRAM
)
1169 return -ESOCKTNOSUPPORT
;
1171 if (protocol
< 0 || protocol
>= MAX_LINKS
)
1172 return -EPROTONOSUPPORT
;
1174 netlink_lock_table();
1175 #ifdef CONFIG_MODULES
1176 if (!nl_table
[protocol
].registered
) {
1177 netlink_unlock_table();
1178 request_module("net-pf-%d-proto-%d", PF_NETLINK
, protocol
);
1179 netlink_lock_table();
1182 if (nl_table
[protocol
].registered
&&
1183 try_module_get(nl_table
[protocol
].module
))
1184 module
= nl_table
[protocol
].module
;
1186 err
= -EPROTONOSUPPORT
;
1187 cb_mutex
= nl_table
[protocol
].cb_mutex
;
1188 bind
= nl_table
[protocol
].bind
;
1189 unbind
= nl_table
[protocol
].unbind
;
1190 netlink_unlock_table();
1195 err
= __netlink_create(net
, sock
, cb_mutex
, protocol
);
1200 sock_prot_inuse_add(net
, &netlink_proto
, 1);
1203 nlk
= nlk_sk(sock
->sk
);
1204 nlk
->module
= module
;
1205 nlk
->netlink_bind
= bind
;
1206 nlk
->netlink_unbind
= unbind
;
1215 static int netlink_release(struct socket
*sock
)
1217 struct sock
*sk
= sock
->sk
;
1218 struct netlink_sock
*nlk
;
1228 * OK. Socket is unlinked, any packets that arrive now
1233 wake_up_interruptible_all(&nlk
->wait
);
1235 skb_queue_purge(&sk
->sk_write_queue
);
1238 struct netlink_notify n
= {
1239 .net
= sock_net(sk
),
1240 .protocol
= sk
->sk_protocol
,
1241 .portid
= nlk
->portid
,
1243 atomic_notifier_call_chain(&netlink_chain
,
1244 NETLINK_URELEASE
, &n
);
1247 module_put(nlk
->module
);
1249 netlink_table_grab();
1250 if (netlink_is_kernel(sk
)) {
1251 BUG_ON(nl_table
[sk
->sk_protocol
].registered
== 0);
1252 if (--nl_table
[sk
->sk_protocol
].registered
== 0) {
1253 struct listeners
*old
;
1255 old
= nl_deref_protected(nl_table
[sk
->sk_protocol
].listeners
);
1256 RCU_INIT_POINTER(nl_table
[sk
->sk_protocol
].listeners
, NULL
);
1257 kfree_rcu(old
, rcu
);
1258 nl_table
[sk
->sk_protocol
].module
= NULL
;
1259 nl_table
[sk
->sk_protocol
].bind
= NULL
;
1260 nl_table
[sk
->sk_protocol
].unbind
= NULL
;
1261 nl_table
[sk
->sk_protocol
].flags
= 0;
1262 nl_table
[sk
->sk_protocol
].registered
= 0;
1264 } else if (nlk
->subscriptions
) {
1265 netlink_update_listeners(sk
);
1267 netlink_table_ungrab();
1273 sock_prot_inuse_add(sock_net(sk
), &netlink_proto
, -1);
1279 static int netlink_autobind(struct socket
*sock
)
1281 struct sock
*sk
= sock
->sk
;
1282 struct net
*net
= sock_net(sk
);
1283 struct netlink_table
*table
= &nl_table
[sk
->sk_protocol
];
1284 s32 portid
= task_tgid_vnr(current
);
1286 static s32 rover
= -4097;
1290 netlink_table_grab();
1292 if (__netlink_lookup(table
, portid
, net
)) {
1293 /* Bind collision, search negative portid values. */
1298 netlink_table_ungrab();
1302 netlink_table_ungrab();
1304 err
= netlink_insert(sk
, net
, portid
);
1305 if (err
== -EADDRINUSE
)
1308 /* If 2 threads race to autobind, that is fine. */
1316 * __netlink_ns_capable - General netlink message capability test
1317 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1318 * @user_ns: The user namespace of the capability to use
1319 * @cap: The capability to use
1321 * Test to see if the opener of the socket we received the message
1322 * from had when the netlink socket was created and the sender of the
1323 * message has has the capability @cap in the user namespace @user_ns.
1325 bool __netlink_ns_capable(const struct netlink_skb_parms
*nsp
,
1326 struct user_namespace
*user_ns
, int cap
)
1328 return ((nsp
->flags
& NETLINK_SKB_DST
) ||
1329 file_ns_capable(nsp
->sk
->sk_socket
->file
, user_ns
, cap
)) &&
1330 ns_capable(user_ns
, cap
);
1332 EXPORT_SYMBOL(__netlink_ns_capable
);
1335 * netlink_ns_capable - General netlink message capability test
1336 * @skb: socket buffer holding a netlink command from userspace
1337 * @user_ns: The user namespace of the capability to use
1338 * @cap: The capability to use
1340 * Test to see if the opener of the socket we received the message
1341 * from had when the netlink socket was created and the sender of the
1342 * message has has the capability @cap in the user namespace @user_ns.
1344 bool netlink_ns_capable(const struct sk_buff
*skb
,
1345 struct user_namespace
*user_ns
, int cap
)
1347 return __netlink_ns_capable(&NETLINK_CB(skb
), user_ns
, cap
);
1349 EXPORT_SYMBOL(netlink_ns_capable
);
1352 * netlink_capable - Netlink global message capability test
1353 * @skb: socket buffer holding a netlink command from userspace
1354 * @cap: The capability to use
1356 * Test to see if the opener of the socket we received the message
1357 * from had when the netlink socket was created and the sender of the
1358 * message has has the capability @cap in all user namespaces.
1360 bool netlink_capable(const struct sk_buff
*skb
, int cap
)
1362 return netlink_ns_capable(skb
, &init_user_ns
, cap
);
1364 EXPORT_SYMBOL(netlink_capable
);
1367 * netlink_net_capable - Netlink network namespace message capability test
1368 * @skb: socket buffer holding a netlink command from userspace
1369 * @cap: The capability to use
1371 * Test to see if the opener of the socket we received the message
1372 * from had when the netlink socket was created and the sender of the
1373 * message has has the capability @cap over the network namespace of
1374 * the socket we received the message from.
1376 bool netlink_net_capable(const struct sk_buff
*skb
, int cap
)
1378 return netlink_ns_capable(skb
, sock_net(skb
->sk
)->user_ns
, cap
);
1380 EXPORT_SYMBOL(netlink_net_capable
);
1382 static inline int netlink_allowed(const struct socket
*sock
, unsigned int flag
)
1384 return (nl_table
[sock
->sk
->sk_protocol
].flags
& flag
) ||
1385 ns_capable(sock_net(sock
->sk
)->user_ns
, CAP_NET_ADMIN
);
1389 netlink_update_subscriptions(struct sock
*sk
, unsigned int subscriptions
)
1391 struct netlink_sock
*nlk
= nlk_sk(sk
);
1393 if (nlk
->subscriptions
&& !subscriptions
)
1394 __sk_del_bind_node(sk
);
1395 else if (!nlk
->subscriptions
&& subscriptions
)
1396 sk_add_bind_node(sk
, &nl_table
[sk
->sk_protocol
].mc_list
);
1397 nlk
->subscriptions
= subscriptions
;
1400 static int netlink_realloc_groups(struct sock
*sk
)
1402 struct netlink_sock
*nlk
= nlk_sk(sk
);
1403 unsigned int groups
;
1404 unsigned long *new_groups
;
1407 netlink_table_grab();
1409 groups
= nl_table
[sk
->sk_protocol
].groups
;
1410 if (!nl_table
[sk
->sk_protocol
].registered
) {
1415 if (nlk
->ngroups
>= groups
)
1418 new_groups
= krealloc(nlk
->groups
, NLGRPSZ(groups
), GFP_ATOMIC
);
1419 if (new_groups
== NULL
) {
1423 memset((char *)new_groups
+ NLGRPSZ(nlk
->ngroups
), 0,
1424 NLGRPSZ(groups
) - NLGRPSZ(nlk
->ngroups
));
1426 nlk
->groups
= new_groups
;
1427 nlk
->ngroups
= groups
;
1429 netlink_table_ungrab();
1433 static void netlink_unbind(int group
, long unsigned int groups
,
1434 struct netlink_sock
*nlk
)
1438 if (!nlk
->netlink_unbind
)
1441 for (undo
= 0; undo
< group
; undo
++)
1442 if (test_bit(undo
, &groups
))
1443 nlk
->netlink_unbind(undo
);
1446 static int netlink_bind(struct socket
*sock
, struct sockaddr
*addr
,
1449 struct sock
*sk
= sock
->sk
;
1450 struct net
*net
= sock_net(sk
);
1451 struct netlink_sock
*nlk
= nlk_sk(sk
);
1452 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
1454 long unsigned int groups
= nladdr
->nl_groups
;
1456 if (addr_len
< sizeof(struct sockaddr_nl
))
1459 if (nladdr
->nl_family
!= AF_NETLINK
)
1462 /* Only superuser is allowed to listen multicasts */
1464 if (!netlink_allowed(sock
, NL_CFG_F_NONROOT_RECV
))
1466 err
= netlink_realloc_groups(sk
);
1472 if (nladdr
->nl_pid
!= nlk
->portid
)
1475 if (nlk
->netlink_bind
&& groups
) {
1478 for (group
= 0; group
< nlk
->ngroups
; group
++) {
1479 if (!test_bit(group
, &groups
))
1481 err
= nlk
->netlink_bind(group
);
1484 netlink_unbind(group
, groups
, nlk
);
1490 err
= nladdr
->nl_pid
?
1491 netlink_insert(sk
, net
, nladdr
->nl_pid
) :
1492 netlink_autobind(sock
);
1494 netlink_unbind(nlk
->ngroups
, groups
, nlk
);
1499 if (!groups
&& (nlk
->groups
== NULL
|| !(u32
)nlk
->groups
[0]))
1502 netlink_table_grab();
1503 netlink_update_subscriptions(sk
, nlk
->subscriptions
+
1505 hweight32(nlk
->groups
[0]));
1506 nlk
->groups
[0] = (nlk
->groups
[0] & ~0xffffffffUL
) | groups
;
1507 netlink_update_listeners(sk
);
1508 netlink_table_ungrab();
1513 static int netlink_connect(struct socket
*sock
, struct sockaddr
*addr
,
1514 int alen
, int flags
)
1517 struct sock
*sk
= sock
->sk
;
1518 struct netlink_sock
*nlk
= nlk_sk(sk
);
1519 struct sockaddr_nl
*nladdr
= (struct sockaddr_nl
*)addr
;
1521 if (alen
< sizeof(addr
->sa_family
))
1524 if (addr
->sa_family
== AF_UNSPEC
) {
1525 sk
->sk_state
= NETLINK_UNCONNECTED
;
1526 nlk
->dst_portid
= 0;
1530 if (addr
->sa_family
!= AF_NETLINK
)
1533 if ((nladdr
->nl_groups
|| nladdr
->nl_pid
) &&
1534 !netlink_allowed(sock
, NL_CFG_F_NONROOT_SEND
))
1538 err
= netlink_autobind(sock
);
1541 sk
->sk_state
= NETLINK_CONNECTED
;
1542 nlk
->dst_portid
= nladdr
->nl_pid
;
1543 nlk
->dst_group
= ffs(nladdr
->nl_groups
);
1549 static int netlink_getname(struct socket
*sock
, struct sockaddr
*addr
,
1550 int *addr_len
, int peer
)
1552 struct sock
*sk
= sock
->sk
;
1553 struct netlink_sock
*nlk
= nlk_sk(sk
);
1554 DECLARE_SOCKADDR(struct sockaddr_nl
*, nladdr
, addr
);
1556 nladdr
->nl_family
= AF_NETLINK
;
1558 *addr_len
= sizeof(*nladdr
);
1561 nladdr
->nl_pid
= nlk
->dst_portid
;
1562 nladdr
->nl_groups
= netlink_group_mask(nlk
->dst_group
);
1564 nladdr
->nl_pid
= nlk
->portid
;
1565 nladdr
->nl_groups
= nlk
->groups
? nlk
->groups
[0] : 0;
1570 static struct sock
*netlink_getsockbyportid(struct sock
*ssk
, u32 portid
)
1573 struct netlink_sock
*nlk
;
1575 sock
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, portid
);
1577 return ERR_PTR(-ECONNREFUSED
);
1579 /* Don't bother queuing skb if kernel socket has no input function */
1581 if (sock
->sk_state
== NETLINK_CONNECTED
&&
1582 nlk
->dst_portid
!= nlk_sk(ssk
)->portid
) {
1584 return ERR_PTR(-ECONNREFUSED
);
1589 struct sock
*netlink_getsockbyfilp(struct file
*filp
)
1591 struct inode
*inode
= file_inode(filp
);
1594 if (!S_ISSOCK(inode
->i_mode
))
1595 return ERR_PTR(-ENOTSOCK
);
1597 sock
= SOCKET_I(inode
)->sk
;
1598 if (sock
->sk_family
!= AF_NETLINK
)
1599 return ERR_PTR(-EINVAL
);
1605 static struct sk_buff
*netlink_alloc_large_skb(unsigned int size
,
1608 struct sk_buff
*skb
;
1611 if (size
<= NLMSG_GOODSIZE
|| broadcast
)
1612 return alloc_skb(size
, GFP_KERNEL
);
1614 size
= SKB_DATA_ALIGN(size
) +
1615 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1617 data
= vmalloc(size
);
1621 skb
= build_skb(data
, size
);
1626 skb
->destructor
= netlink_skb_destructor
;
1633 * Attach a skb to a netlink socket.
1634 * The caller must hold a reference to the destination socket. On error, the
1635 * reference is dropped. The skb is not send to the destination, just all
1636 * all error checks are performed and memory in the queue is reserved.
1638 * < 0: error. skb freed, reference to sock dropped.
1640 * 1: repeat lookup - reference dropped while waiting for socket memory.
1642 int netlink_attachskb(struct sock
*sk
, struct sk_buff
*skb
,
1643 long *timeo
, struct sock
*ssk
)
1645 struct netlink_sock
*nlk
;
1649 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
1650 test_bit(NETLINK_CONGESTED
, &nlk
->state
)) &&
1651 !netlink_skb_is_mmaped(skb
)) {
1652 DECLARE_WAITQUEUE(wait
, current
);
1654 if (!ssk
|| netlink_is_kernel(ssk
))
1655 netlink_overrun(sk
);
1661 __set_current_state(TASK_INTERRUPTIBLE
);
1662 add_wait_queue(&nlk
->wait
, &wait
);
1664 if ((atomic_read(&sk
->sk_rmem_alloc
) > sk
->sk_rcvbuf
||
1665 test_bit(NETLINK_CONGESTED
, &nlk
->state
)) &&
1666 !sock_flag(sk
, SOCK_DEAD
))
1667 *timeo
= schedule_timeout(*timeo
);
1669 __set_current_state(TASK_RUNNING
);
1670 remove_wait_queue(&nlk
->wait
, &wait
);
1673 if (signal_pending(current
)) {
1675 return sock_intr_errno(*timeo
);
1679 netlink_skb_set_owner_r(skb
, sk
);
1683 static int __netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
1687 netlink_deliver_tap(skb
);
1689 #ifdef CONFIG_NETLINK_MMAP
1690 if (netlink_skb_is_mmaped(skb
))
1691 netlink_queue_mmaped_skb(sk
, skb
);
1692 else if (netlink_rx_is_mmaped(sk
))
1693 netlink_ring_set_copied(sk
, skb
);
1695 #endif /* CONFIG_NETLINK_MMAP */
1696 skb_queue_tail(&sk
->sk_receive_queue
, skb
);
1697 sk
->sk_data_ready(sk
);
1701 int netlink_sendskb(struct sock
*sk
, struct sk_buff
*skb
)
1703 int len
= __netlink_sendskb(sk
, skb
);
1709 void netlink_detachskb(struct sock
*sk
, struct sk_buff
*skb
)
1715 static struct sk_buff
*netlink_trim(struct sk_buff
*skb
, gfp_t allocation
)
1719 WARN_ON(skb
->sk
!= NULL
);
1720 if (netlink_skb_is_mmaped(skb
))
1723 delta
= skb
->end
- skb
->tail
;
1724 if (is_vmalloc_addr(skb
->head
) || delta
* 2 < skb
->truesize
)
1727 if (skb_shared(skb
)) {
1728 struct sk_buff
*nskb
= skb_clone(skb
, allocation
);
1735 if (!pskb_expand_head(skb
, 0, -delta
, allocation
))
1736 skb
->truesize
-= delta
;
1741 static int netlink_unicast_kernel(struct sock
*sk
, struct sk_buff
*skb
,
1745 struct netlink_sock
*nlk
= nlk_sk(sk
);
1747 ret
= -ECONNREFUSED
;
1748 if (nlk
->netlink_rcv
!= NULL
) {
1750 netlink_skb_set_owner_r(skb
, sk
);
1751 NETLINK_CB(skb
).sk
= ssk
;
1752 netlink_deliver_tap_kernel(sk
, ssk
, skb
);
1753 nlk
->netlink_rcv(skb
);
1762 int netlink_unicast(struct sock
*ssk
, struct sk_buff
*skb
,
1763 u32 portid
, int nonblock
)
1769 skb
= netlink_trim(skb
, gfp_any());
1771 timeo
= sock_sndtimeo(ssk
, nonblock
);
1773 sk
= netlink_getsockbyportid(ssk
, portid
);
1778 if (netlink_is_kernel(sk
))
1779 return netlink_unicast_kernel(sk
, skb
, ssk
);
1781 if (sk_filter(sk
, skb
)) {
1788 err
= netlink_attachskb(sk
, skb
, &timeo
, ssk
);
1794 return netlink_sendskb(sk
, skb
);
1796 EXPORT_SYMBOL(netlink_unicast
);
1798 struct sk_buff
*netlink_alloc_skb(struct sock
*ssk
, unsigned int size
,
1799 u32 dst_portid
, gfp_t gfp_mask
)
1801 #ifdef CONFIG_NETLINK_MMAP
1802 struct sock
*sk
= NULL
;
1803 struct sk_buff
*skb
;
1804 struct netlink_ring
*ring
;
1805 struct nl_mmap_hdr
*hdr
;
1806 unsigned int maxlen
;
1808 sk
= netlink_getsockbyportid(ssk
, dst_portid
);
1812 ring
= &nlk_sk(sk
)->rx_ring
;
1813 /* fast-path without atomic ops for common case: non-mmaped receiver */
1814 if (ring
->pg_vec
== NULL
)
1817 if (ring
->frame_size
- NL_MMAP_HDRLEN
< size
)
1820 skb
= alloc_skb_head(gfp_mask
);
1824 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1825 /* check again under lock */
1826 if (ring
->pg_vec
== NULL
)
1829 /* check again under lock */
1830 maxlen
= ring
->frame_size
- NL_MMAP_HDRLEN
;
1834 netlink_forward_ring(ring
);
1835 hdr
= netlink_current_frame(ring
, NL_MMAP_STATUS_UNUSED
);
1838 netlink_ring_setup_skb(skb
, sk
, ring
, hdr
);
1839 netlink_set_status(hdr
, NL_MMAP_STATUS_RESERVED
);
1840 atomic_inc(&ring
->pending
);
1841 netlink_increment_head(ring
);
1843 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1848 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1849 netlink_overrun(sk
);
1856 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1861 return alloc_skb(size
, gfp_mask
);
1863 EXPORT_SYMBOL_GPL(netlink_alloc_skb
);
1865 int netlink_has_listeners(struct sock
*sk
, unsigned int group
)
1868 struct listeners
*listeners
;
1870 BUG_ON(!netlink_is_kernel(sk
));
1873 listeners
= rcu_dereference(nl_table
[sk
->sk_protocol
].listeners
);
1875 if (listeners
&& group
- 1 < nl_table
[sk
->sk_protocol
].groups
)
1876 res
= test_bit(group
- 1, listeners
->masks
);
1882 EXPORT_SYMBOL_GPL(netlink_has_listeners
);
1884 static int netlink_broadcast_deliver(struct sock
*sk
, struct sk_buff
*skb
)
1886 struct netlink_sock
*nlk
= nlk_sk(sk
);
1888 if (atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
&&
1889 !test_bit(NETLINK_CONGESTED
, &nlk
->state
)) {
1890 netlink_skb_set_owner_r(skb
, sk
);
1891 __netlink_sendskb(sk
, skb
);
1892 return atomic_read(&sk
->sk_rmem_alloc
) > (sk
->sk_rcvbuf
>> 1);
1897 struct netlink_broadcast_data
{
1898 struct sock
*exclude_sk
;
1903 int delivery_failure
;
1907 struct sk_buff
*skb
, *skb2
;
1908 int (*tx_filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
);
1912 static void do_one_broadcast(struct sock
*sk
,
1913 struct netlink_broadcast_data
*p
)
1915 struct netlink_sock
*nlk
= nlk_sk(sk
);
1918 if (p
->exclude_sk
== sk
)
1921 if (nlk
->portid
== p
->portid
|| p
->group
- 1 >= nlk
->ngroups
||
1922 !test_bit(p
->group
- 1, nlk
->groups
))
1925 if (!net_eq(sock_net(sk
), p
->net
))
1929 netlink_overrun(sk
);
1934 if (p
->skb2
== NULL
) {
1935 if (skb_shared(p
->skb
)) {
1936 p
->skb2
= skb_clone(p
->skb
, p
->allocation
);
1938 p
->skb2
= skb_get(p
->skb
);
1940 * skb ownership may have been set when
1941 * delivered to a previous socket.
1943 skb_orphan(p
->skb2
);
1946 if (p
->skb2
== NULL
) {
1947 netlink_overrun(sk
);
1948 /* Clone failed. Notify ALL listeners. */
1950 if (nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
)
1951 p
->delivery_failure
= 1;
1952 } else if (p
->tx_filter
&& p
->tx_filter(sk
, p
->skb2
, p
->tx_data
)) {
1955 } else if (sk_filter(sk
, p
->skb2
)) {
1958 } else if ((val
= netlink_broadcast_deliver(sk
, p
->skb2
)) < 0) {
1959 netlink_overrun(sk
);
1960 if (nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
)
1961 p
->delivery_failure
= 1;
1963 p
->congested
|= val
;
1970 int netlink_broadcast_filtered(struct sock
*ssk
, struct sk_buff
*skb
, u32 portid
,
1971 u32 group
, gfp_t allocation
,
1972 int (*filter
)(struct sock
*dsk
, struct sk_buff
*skb
, void *data
),
1975 struct net
*net
= sock_net(ssk
);
1976 struct netlink_broadcast_data info
;
1979 skb
= netlink_trim(skb
, allocation
);
1981 info
.exclude_sk
= ssk
;
1983 info
.portid
= portid
;
1986 info
.delivery_failure
= 0;
1989 info
.allocation
= allocation
;
1992 info
.tx_filter
= filter
;
1993 info
.tx_data
= filter_data
;
1995 /* While we sleep in clone, do not allow to change socket list */
1997 netlink_lock_table();
1999 sk_for_each_bound(sk
, &nl_table
[ssk
->sk_protocol
].mc_list
)
2000 do_one_broadcast(sk
, &info
);
2004 netlink_unlock_table();
2006 if (info
.delivery_failure
) {
2007 kfree_skb(info
.skb2
);
2010 consume_skb(info
.skb2
);
2012 if (info
.delivered
) {
2013 if (info
.congested
&& (allocation
& __GFP_WAIT
))
2019 EXPORT_SYMBOL(netlink_broadcast_filtered
);
2021 int netlink_broadcast(struct sock
*ssk
, struct sk_buff
*skb
, u32 portid
,
2022 u32 group
, gfp_t allocation
)
2024 return netlink_broadcast_filtered(ssk
, skb
, portid
, group
, allocation
,
2027 EXPORT_SYMBOL(netlink_broadcast
);
2029 struct netlink_set_err_data
{
2030 struct sock
*exclude_sk
;
2036 static int do_one_set_err(struct sock
*sk
, struct netlink_set_err_data
*p
)
2038 struct netlink_sock
*nlk
= nlk_sk(sk
);
2041 if (sk
== p
->exclude_sk
)
2044 if (!net_eq(sock_net(sk
), sock_net(p
->exclude_sk
)))
2047 if (nlk
->portid
== p
->portid
|| p
->group
- 1 >= nlk
->ngroups
||
2048 !test_bit(p
->group
- 1, nlk
->groups
))
2051 if (p
->code
== ENOBUFS
&& nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
) {
2056 sk
->sk_err
= p
->code
;
2057 sk
->sk_error_report(sk
);
2063 * netlink_set_err - report error to broadcast listeners
2064 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2065 * @portid: the PORTID of a process that we want to skip (if any)
2066 * @group: the broadcast group that will notice the error
2067 * @code: error code, must be negative (as usual in kernelspace)
2069 * This function returns the number of broadcast listeners that have set the
2070 * NETLINK_RECV_NO_ENOBUFS socket option.
2072 int netlink_set_err(struct sock
*ssk
, u32 portid
, u32 group
, int code
)
2074 struct netlink_set_err_data info
;
2078 info
.exclude_sk
= ssk
;
2079 info
.portid
= portid
;
2081 /* sk->sk_err wants a positive error value */
2084 read_lock(&nl_table_lock
);
2086 sk_for_each_bound(sk
, &nl_table
[ssk
->sk_protocol
].mc_list
)
2087 ret
+= do_one_set_err(sk
, &info
);
2089 read_unlock(&nl_table_lock
);
2092 EXPORT_SYMBOL(netlink_set_err
);
2094 /* must be called with netlink table grabbed */
2095 static void netlink_update_socket_mc(struct netlink_sock
*nlk
,
2099 int old
, new = !!is_new
, subscriptions
;
2101 old
= test_bit(group
- 1, nlk
->groups
);
2102 subscriptions
= nlk
->subscriptions
- old
+ new;
2104 __set_bit(group
- 1, nlk
->groups
);
2106 __clear_bit(group
- 1, nlk
->groups
);
2107 netlink_update_subscriptions(&nlk
->sk
, subscriptions
);
2108 netlink_update_listeners(&nlk
->sk
);
2111 static int netlink_setsockopt(struct socket
*sock
, int level
, int optname
,
2112 char __user
*optval
, unsigned int optlen
)
2114 struct sock
*sk
= sock
->sk
;
2115 struct netlink_sock
*nlk
= nlk_sk(sk
);
2116 unsigned int val
= 0;
2119 if (level
!= SOL_NETLINK
)
2120 return -ENOPROTOOPT
;
2122 if (optname
!= NETLINK_RX_RING
&& optname
!= NETLINK_TX_RING
&&
2123 optlen
>= sizeof(int) &&
2124 get_user(val
, (unsigned int __user
*)optval
))
2128 case NETLINK_PKTINFO
:
2130 nlk
->flags
|= NETLINK_RECV_PKTINFO
;
2132 nlk
->flags
&= ~NETLINK_RECV_PKTINFO
;
2135 case NETLINK_ADD_MEMBERSHIP
:
2136 case NETLINK_DROP_MEMBERSHIP
: {
2137 if (!netlink_allowed(sock
, NL_CFG_F_NONROOT_RECV
))
2139 err
= netlink_realloc_groups(sk
);
2142 if (!val
|| val
- 1 >= nlk
->ngroups
)
2144 if (optname
== NETLINK_ADD_MEMBERSHIP
&& nlk
->netlink_bind
) {
2145 err
= nlk
->netlink_bind(val
);
2149 netlink_table_grab();
2150 netlink_update_socket_mc(nlk
, val
,
2151 optname
== NETLINK_ADD_MEMBERSHIP
);
2152 netlink_table_ungrab();
2153 if (optname
== NETLINK_DROP_MEMBERSHIP
&& nlk
->netlink_unbind
)
2154 nlk
->netlink_unbind(val
);
2159 case NETLINK_BROADCAST_ERROR
:
2161 nlk
->flags
|= NETLINK_BROADCAST_SEND_ERROR
;
2163 nlk
->flags
&= ~NETLINK_BROADCAST_SEND_ERROR
;
2166 case NETLINK_NO_ENOBUFS
:
2168 nlk
->flags
|= NETLINK_RECV_NO_ENOBUFS
;
2169 clear_bit(NETLINK_CONGESTED
, &nlk
->state
);
2170 wake_up_interruptible(&nlk
->wait
);
2172 nlk
->flags
&= ~NETLINK_RECV_NO_ENOBUFS
;
2176 #ifdef CONFIG_NETLINK_MMAP
2177 case NETLINK_RX_RING
:
2178 case NETLINK_TX_RING
: {
2179 struct nl_mmap_req req
;
2181 /* Rings might consume more memory than queue limits, require
2184 if (!capable(CAP_NET_ADMIN
))
2186 if (optlen
< sizeof(req
))
2188 if (copy_from_user(&req
, optval
, sizeof(req
)))
2190 err
= netlink_set_ring(sk
, &req
, false,
2191 optname
== NETLINK_TX_RING
);
2194 #endif /* CONFIG_NETLINK_MMAP */
2201 static int netlink_getsockopt(struct socket
*sock
, int level
, int optname
,
2202 char __user
*optval
, int __user
*optlen
)
2204 struct sock
*sk
= sock
->sk
;
2205 struct netlink_sock
*nlk
= nlk_sk(sk
);
2208 if (level
!= SOL_NETLINK
)
2209 return -ENOPROTOOPT
;
2211 if (get_user(len
, optlen
))
2217 case NETLINK_PKTINFO
:
2218 if (len
< sizeof(int))
2221 val
= nlk
->flags
& NETLINK_RECV_PKTINFO
? 1 : 0;
2222 if (put_user(len
, optlen
) ||
2223 put_user(val
, optval
))
2227 case NETLINK_BROADCAST_ERROR
:
2228 if (len
< sizeof(int))
2231 val
= nlk
->flags
& NETLINK_BROADCAST_SEND_ERROR
? 1 : 0;
2232 if (put_user(len
, optlen
) ||
2233 put_user(val
, optval
))
2237 case NETLINK_NO_ENOBUFS
:
2238 if (len
< sizeof(int))
2241 val
= nlk
->flags
& NETLINK_RECV_NO_ENOBUFS
? 1 : 0;
2242 if (put_user(len
, optlen
) ||
2243 put_user(val
, optval
))
2253 static void netlink_cmsg_recv_pktinfo(struct msghdr
*msg
, struct sk_buff
*skb
)
2255 struct nl_pktinfo info
;
2257 info
.group
= NETLINK_CB(skb
).dst_group
;
2258 put_cmsg(msg
, SOL_NETLINK
, NETLINK_PKTINFO
, sizeof(info
), &info
);
2261 static int netlink_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
2262 struct msghdr
*msg
, size_t len
)
2264 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
2265 struct sock
*sk
= sock
->sk
;
2266 struct netlink_sock
*nlk
= nlk_sk(sk
);
2267 DECLARE_SOCKADDR(struct sockaddr_nl
*, addr
, msg
->msg_name
);
2270 struct sk_buff
*skb
;
2272 struct scm_cookie scm
;
2273 u32 netlink_skb_flags
= 0;
2275 if (msg
->msg_flags
&MSG_OOB
)
2278 if (NULL
== siocb
->scm
)
2281 err
= scm_send(sock
, msg
, siocb
->scm
, true);
2285 if (msg
->msg_namelen
) {
2287 if (addr
->nl_family
!= AF_NETLINK
)
2289 dst_portid
= addr
->nl_pid
;
2290 dst_group
= ffs(addr
->nl_groups
);
2292 if ((dst_group
|| dst_portid
) &&
2293 !netlink_allowed(sock
, NL_CFG_F_NONROOT_SEND
))
2295 netlink_skb_flags
|= NETLINK_SKB_DST
;
2297 dst_portid
= nlk
->dst_portid
;
2298 dst_group
= nlk
->dst_group
;
2302 err
= netlink_autobind(sock
);
2307 if (netlink_tx_is_mmaped(sk
) &&
2308 msg
->msg_iov
->iov_base
== NULL
) {
2309 err
= netlink_mmap_sendmsg(sk
, msg
, dst_portid
, dst_group
,
2315 if (len
> sk
->sk_sndbuf
- 32)
2318 skb
= netlink_alloc_large_skb(len
, dst_group
);
2322 NETLINK_CB(skb
).portid
= nlk
->portid
;
2323 NETLINK_CB(skb
).dst_group
= dst_group
;
2324 NETLINK_CB(skb
).creds
= siocb
->scm
->creds
;
2325 NETLINK_CB(skb
).flags
= netlink_skb_flags
;
2328 if (memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
)) {
2333 err
= security_netlink_send(sk
, skb
);
2340 atomic_inc(&skb
->users
);
2341 netlink_broadcast(sk
, skb
, dst_portid
, dst_group
, GFP_KERNEL
);
2343 err
= netlink_unicast(sk
, skb
, dst_portid
, msg
->msg_flags
&MSG_DONTWAIT
);
2346 scm_destroy(siocb
->scm
);
2350 static int netlink_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
2351 struct msghdr
*msg
, size_t len
,
2354 struct sock_iocb
*siocb
= kiocb_to_siocb(kiocb
);
2355 struct scm_cookie scm
;
2356 struct sock
*sk
= sock
->sk
;
2357 struct netlink_sock
*nlk
= nlk_sk(sk
);
2358 int noblock
= flags
&MSG_DONTWAIT
;
2360 struct sk_buff
*skb
, *data_skb
;
2368 skb
= skb_recv_datagram(sk
, flags
, noblock
, &err
);
2374 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2375 if (unlikely(skb_shinfo(skb
)->frag_list
)) {
2377 * If this skb has a frag_list, then here that means that we
2378 * will have to use the frag_list skb's data for compat tasks
2379 * and the regular skb's data for normal (non-compat) tasks.
2381 * If we need to send the compat skb, assign it to the
2382 * 'data_skb' variable so that it will be used below for data
2383 * copying. We keep 'skb' for everything else, including
2384 * freeing both later.
2386 if (flags
& MSG_CMSG_COMPAT
)
2387 data_skb
= skb_shinfo(skb
)->frag_list
;
2391 /* Record the max length of recvmsg() calls for future allocations */
2392 nlk
->max_recvmsg_len
= max(nlk
->max_recvmsg_len
, len
);
2393 nlk
->max_recvmsg_len
= min_t(size_t, nlk
->max_recvmsg_len
,
2396 copied
= data_skb
->len
;
2398 msg
->msg_flags
|= MSG_TRUNC
;
2402 skb_reset_transport_header(data_skb
);
2403 err
= skb_copy_datagram_msg(data_skb
, 0, msg
, copied
);
2405 if (msg
->msg_name
) {
2406 DECLARE_SOCKADDR(struct sockaddr_nl
*, addr
, msg
->msg_name
);
2407 addr
->nl_family
= AF_NETLINK
;
2409 addr
->nl_pid
= NETLINK_CB(skb
).portid
;
2410 addr
->nl_groups
= netlink_group_mask(NETLINK_CB(skb
).dst_group
);
2411 msg
->msg_namelen
= sizeof(*addr
);
2414 if (nlk
->flags
& NETLINK_RECV_PKTINFO
)
2415 netlink_cmsg_recv_pktinfo(msg
, skb
);
2417 if (NULL
== siocb
->scm
) {
2418 memset(&scm
, 0, sizeof(scm
));
2421 siocb
->scm
->creds
= *NETLINK_CREDS(skb
);
2422 if (flags
& MSG_TRUNC
)
2423 copied
= data_skb
->len
;
2425 skb_free_datagram(sk
, skb
);
2427 if (nlk
->cb_running
&&
2428 atomic_read(&sk
->sk_rmem_alloc
) <= sk
->sk_rcvbuf
/ 2) {
2429 ret
= netlink_dump(sk
);
2432 sk
->sk_error_report(sk
);
2436 scm_recv(sock
, msg
, siocb
->scm
, flags
);
2438 netlink_rcv_wake(sk
);
2439 return err
? : copied
;
2442 static void netlink_data_ready(struct sock
*sk
)
2448 * We export these functions to other modules. They provide a
2449 * complete set of kernel non-blocking support for message
2454 __netlink_kernel_create(struct net
*net
, int unit
, struct module
*module
,
2455 struct netlink_kernel_cfg
*cfg
)
2457 struct socket
*sock
;
2459 struct netlink_sock
*nlk
;
2460 struct listeners
*listeners
= NULL
;
2461 struct mutex
*cb_mutex
= cfg
? cfg
->cb_mutex
: NULL
;
2462 unsigned int groups
;
2466 if (unit
< 0 || unit
>= MAX_LINKS
)
2469 if (sock_create_lite(PF_NETLINK
, SOCK_DGRAM
, unit
, &sock
))
2473 * We have to just have a reference on the net from sk, but don't
2474 * get_net it. Besides, we cannot get and then put the net here.
2475 * So we create one inside init_net and the move it to net.
2478 if (__netlink_create(&init_net
, sock
, cb_mutex
, unit
) < 0)
2479 goto out_sock_release_nosk
;
2482 sk_change_net(sk
, net
);
2484 if (!cfg
|| cfg
->groups
< 32)
2487 groups
= cfg
->groups
;
2489 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
2491 goto out_sock_release
;
2493 sk
->sk_data_ready
= netlink_data_ready
;
2494 if (cfg
&& cfg
->input
)
2495 nlk_sk(sk
)->netlink_rcv
= cfg
->input
;
2497 if (netlink_insert(sk
, net
, 0))
2498 goto out_sock_release
;
2501 nlk
->flags
|= NETLINK_KERNEL_SOCKET
;
2503 netlink_table_grab();
2504 if (!nl_table
[unit
].registered
) {
2505 nl_table
[unit
].groups
= groups
;
2506 rcu_assign_pointer(nl_table
[unit
].listeners
, listeners
);
2507 nl_table
[unit
].cb_mutex
= cb_mutex
;
2508 nl_table
[unit
].module
= module
;
2510 nl_table
[unit
].bind
= cfg
->bind
;
2511 nl_table
[unit
].unbind
= cfg
->unbind
;
2512 nl_table
[unit
].flags
= cfg
->flags
;
2514 nl_table
[unit
].compare
= cfg
->compare
;
2516 nl_table
[unit
].registered
= 1;
2519 nl_table
[unit
].registered
++;
2521 netlink_table_ungrab();
2526 netlink_kernel_release(sk
);
2529 out_sock_release_nosk
:
2533 EXPORT_SYMBOL(__netlink_kernel_create
);
2536 netlink_kernel_release(struct sock
*sk
)
2538 sk_release_kernel(sk
);
2540 EXPORT_SYMBOL(netlink_kernel_release
);
2542 int __netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
2544 struct listeners
*new, *old
;
2545 struct netlink_table
*tbl
= &nl_table
[sk
->sk_protocol
];
2550 if (NLGRPSZ(tbl
->groups
) < NLGRPSZ(groups
)) {
2551 new = kzalloc(sizeof(*new) + NLGRPSZ(groups
), GFP_ATOMIC
);
2554 old
= nl_deref_protected(tbl
->listeners
);
2555 memcpy(new->masks
, old
->masks
, NLGRPSZ(tbl
->groups
));
2556 rcu_assign_pointer(tbl
->listeners
, new);
2558 kfree_rcu(old
, rcu
);
2560 tbl
->groups
= groups
;
2566 * netlink_change_ngroups - change number of multicast groups
2568 * This changes the number of multicast groups that are available
2569 * on a certain netlink family. Note that it is not possible to
2570 * change the number of groups to below 32. Also note that it does
2571 * not implicitly call netlink_clear_multicast_users() when the
2572 * number of groups is reduced.
2574 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2575 * @groups: The new number of groups.
2577 int netlink_change_ngroups(struct sock
*sk
, unsigned int groups
)
2581 netlink_table_grab();
2582 err
= __netlink_change_ngroups(sk
, groups
);
2583 netlink_table_ungrab();
2588 void __netlink_clear_multicast_users(struct sock
*ksk
, unsigned int group
)
2591 struct netlink_table
*tbl
= &nl_table
[ksk
->sk_protocol
];
2593 sk_for_each_bound(sk
, &tbl
->mc_list
)
2594 netlink_update_socket_mc(nlk_sk(sk
), group
, 0);
2598 __nlmsg_put(struct sk_buff
*skb
, u32 portid
, u32 seq
, int type
, int len
, int flags
)
2600 struct nlmsghdr
*nlh
;
2601 int size
= nlmsg_msg_size(len
);
2603 nlh
= (struct nlmsghdr
*)skb_put(skb
, NLMSG_ALIGN(size
));
2604 nlh
->nlmsg_type
= type
;
2605 nlh
->nlmsg_len
= size
;
2606 nlh
->nlmsg_flags
= flags
;
2607 nlh
->nlmsg_pid
= portid
;
2608 nlh
->nlmsg_seq
= seq
;
2609 if (!__builtin_constant_p(size
) || NLMSG_ALIGN(size
) - size
!= 0)
2610 memset(nlmsg_data(nlh
) + len
, 0, NLMSG_ALIGN(size
) - size
);
2613 EXPORT_SYMBOL(__nlmsg_put
);
2616 * It looks a bit ugly.
2617 * It would be better to create kernel thread.
2620 static int netlink_dump(struct sock
*sk
)
2622 struct netlink_sock
*nlk
= nlk_sk(sk
);
2623 struct netlink_callback
*cb
;
2624 struct sk_buff
*skb
= NULL
;
2625 struct nlmsghdr
*nlh
;
2626 int len
, err
= -ENOBUFS
;
2629 mutex_lock(nlk
->cb_mutex
);
2630 if (!nlk
->cb_running
) {
2636 alloc_size
= max_t(int, cb
->min_dump_alloc
, NLMSG_GOODSIZE
);
2638 if (!netlink_rx_is_mmaped(sk
) &&
2639 atomic_read(&sk
->sk_rmem_alloc
) >= sk
->sk_rcvbuf
)
2642 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2643 * required, but it makes sense to _attempt_ a 16K bytes allocation
2644 * to reduce number of system calls on dump operations, if user
2645 * ever provided a big enough buffer.
2647 if (alloc_size
< nlk
->max_recvmsg_len
) {
2648 skb
= netlink_alloc_skb(sk
,
2649 nlk
->max_recvmsg_len
,
2654 /* available room should be exact amount to avoid MSG_TRUNC */
2656 skb_reserve(skb
, skb_tailroom(skb
) -
2657 nlk
->max_recvmsg_len
);
2660 skb
= netlink_alloc_skb(sk
, alloc_size
, nlk
->portid
,
2664 netlink_skb_set_owner_r(skb
, sk
);
2666 len
= cb
->dump(skb
, cb
);
2669 mutex_unlock(nlk
->cb_mutex
);
2671 if (sk_filter(sk
, skb
))
2674 __netlink_sendskb(sk
, skb
);
2678 nlh
= nlmsg_put_answer(skb
, cb
, NLMSG_DONE
, sizeof(len
), NLM_F_MULTI
);
2682 nl_dump_check_consistent(cb
, nlh
);
2684 memcpy(nlmsg_data(nlh
), &len
, sizeof(len
));
2686 if (sk_filter(sk
, skb
))
2689 __netlink_sendskb(sk
, skb
);
2694 nlk
->cb_running
= false;
2695 mutex_unlock(nlk
->cb_mutex
);
2696 module_put(cb
->module
);
2697 consume_skb(cb
->skb
);
2701 mutex_unlock(nlk
->cb_mutex
);
2706 int __netlink_dump_start(struct sock
*ssk
, struct sk_buff
*skb
,
2707 const struct nlmsghdr
*nlh
,
2708 struct netlink_dump_control
*control
)
2710 struct netlink_callback
*cb
;
2712 struct netlink_sock
*nlk
;
2715 /* Memory mapped dump requests need to be copied to avoid looping
2716 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2717 * a reference to the skb.
2719 if (netlink_skb_is_mmaped(skb
)) {
2720 skb
= skb_copy(skb
, GFP_KERNEL
);
2724 atomic_inc(&skb
->users
);
2726 sk
= netlink_lookup(sock_net(ssk
), ssk
->sk_protocol
, NETLINK_CB(skb
).portid
);
2728 ret
= -ECONNREFUSED
;
2733 mutex_lock(nlk
->cb_mutex
);
2734 /* A dump is in progress... */
2735 if (nlk
->cb_running
) {
2739 /* add reference of module which cb->dump belongs to */
2740 if (!try_module_get(control
->module
)) {
2741 ret
= -EPROTONOSUPPORT
;
2746 memset(cb
, 0, sizeof(*cb
));
2747 cb
->dump
= control
->dump
;
2748 cb
->done
= control
->done
;
2750 cb
->data
= control
->data
;
2751 cb
->module
= control
->module
;
2752 cb
->min_dump_alloc
= control
->min_dump_alloc
;
2755 nlk
->cb_running
= true;
2757 mutex_unlock(nlk
->cb_mutex
);
2759 ret
= netlink_dump(sk
);
2765 /* We successfully started a dump, by returning -EINTR we
2766 * signal not to send ACK even if it was requested.
2772 mutex_unlock(nlk
->cb_mutex
);
2777 EXPORT_SYMBOL(__netlink_dump_start
);
2779 void netlink_ack(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
, int err
)
2781 struct sk_buff
*skb
;
2782 struct nlmsghdr
*rep
;
2783 struct nlmsgerr
*errmsg
;
2784 size_t payload
= sizeof(*errmsg
);
2786 /* error messages get the original request appened */
2788 payload
+= nlmsg_len(nlh
);
2790 skb
= netlink_alloc_skb(in_skb
->sk
, nlmsg_total_size(payload
),
2791 NETLINK_CB(in_skb
).portid
, GFP_KERNEL
);
2795 sk
= netlink_lookup(sock_net(in_skb
->sk
),
2796 in_skb
->sk
->sk_protocol
,
2797 NETLINK_CB(in_skb
).portid
);
2799 sk
->sk_err
= ENOBUFS
;
2800 sk
->sk_error_report(sk
);
2806 rep
= __nlmsg_put(skb
, NETLINK_CB(in_skb
).portid
, nlh
->nlmsg_seq
,
2807 NLMSG_ERROR
, payload
, 0);
2808 errmsg
= nlmsg_data(rep
);
2809 errmsg
->error
= err
;
2810 memcpy(&errmsg
->msg
, nlh
, err
? nlh
->nlmsg_len
: sizeof(*nlh
));
2811 netlink_unicast(in_skb
->sk
, skb
, NETLINK_CB(in_skb
).portid
, MSG_DONTWAIT
);
2813 EXPORT_SYMBOL(netlink_ack
);
2815 int netlink_rcv_skb(struct sk_buff
*skb
, int (*cb
)(struct sk_buff
*,
2818 struct nlmsghdr
*nlh
;
2821 while (skb
->len
>= nlmsg_total_size(0)) {
2824 nlh
= nlmsg_hdr(skb
);
2827 if (nlh
->nlmsg_len
< NLMSG_HDRLEN
|| skb
->len
< nlh
->nlmsg_len
)
2830 /* Only requests are handled by the kernel */
2831 if (!(nlh
->nlmsg_flags
& NLM_F_REQUEST
))
2834 /* Skip control messages */
2835 if (nlh
->nlmsg_type
< NLMSG_MIN_TYPE
)
2843 if (nlh
->nlmsg_flags
& NLM_F_ACK
|| err
)
2844 netlink_ack(skb
, nlh
, err
);
2847 msglen
= NLMSG_ALIGN(nlh
->nlmsg_len
);
2848 if (msglen
> skb
->len
)
2850 skb_pull(skb
, msglen
);
2855 EXPORT_SYMBOL(netlink_rcv_skb
);
2858 * nlmsg_notify - send a notification netlink message
2859 * @sk: netlink socket to use
2860 * @skb: notification message
2861 * @portid: destination netlink portid for reports or 0
2862 * @group: destination multicast group or 0
2863 * @report: 1 to report back, 0 to disable
2864 * @flags: allocation flags
2866 int nlmsg_notify(struct sock
*sk
, struct sk_buff
*skb
, u32 portid
,
2867 unsigned int group
, int report
, gfp_t flags
)
2872 int exclude_portid
= 0;
2875 atomic_inc(&skb
->users
);
2876 exclude_portid
= portid
;
2879 /* errors reported via destination sk->sk_err, but propagate
2880 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2881 err
= nlmsg_multicast(sk
, skb
, exclude_portid
, group
, flags
);
2887 err2
= nlmsg_unicast(sk
, skb
, portid
);
2888 if (!err
|| err
== -ESRCH
)
2894 EXPORT_SYMBOL(nlmsg_notify
);
2896 #ifdef CONFIG_PROC_FS
2897 struct nl_seq_iter
{
2898 struct seq_net_private p
;
2903 static struct sock
*netlink_seq_socket_idx(struct seq_file
*seq
, loff_t pos
)
2905 struct nl_seq_iter
*iter
= seq
->private;
2907 struct netlink_sock
*nlk
;
2911 for (i
= 0; i
< MAX_LINKS
; i
++) {
2912 struct rhashtable
*ht
= &nl_table
[i
].hash
;
2913 const struct bucket_table
*tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
2915 for (j
= 0; j
< tbl
->size
; j
++) {
2916 rht_for_each_entry_rcu(nlk
, tbl
->buckets
[j
], node
) {
2917 s
= (struct sock
*)nlk
;
2919 if (sock_net(s
) != seq_file_net(seq
))
2933 static void *netlink_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2934 __acquires(nl_table_lock
) __acquires(RCU
)
2936 read_lock(&nl_table_lock
);
2938 return *pos
? netlink_seq_socket_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
2941 static void *netlink_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2943 struct rhashtable
*ht
;
2944 struct netlink_sock
*nlk
;
2945 struct nl_seq_iter
*iter
;
2951 if (v
== SEQ_START_TOKEN
)
2952 return netlink_seq_socket_idx(seq
, 0);
2954 net
= seq_file_net(seq
);
2955 iter
= seq
->private;
2959 ht
= &nl_table
[i
].hash
;
2960 rht_for_each_entry(nlk
, nlk
->node
.next
, ht
, node
)
2961 if (net_eq(sock_net((struct sock
*)nlk
), net
))
2964 j
= iter
->hash_idx
+ 1;
2967 const struct bucket_table
*tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
2969 for (; j
< tbl
->size
; j
++) {
2970 rht_for_each_entry(nlk
, tbl
->buckets
[j
], ht
, node
) {
2971 if (net_eq(sock_net((struct sock
*)nlk
), net
)) {
2980 } while (++i
< MAX_LINKS
);
2985 static void netlink_seq_stop(struct seq_file
*seq
, void *v
)
2986 __releases(RCU
) __releases(nl_table_lock
)
2989 read_unlock(&nl_table_lock
);
2993 static int netlink_seq_show(struct seq_file
*seq
, void *v
)
2995 if (v
== SEQ_START_TOKEN
) {
2997 "sk Eth Pid Groups "
2998 "Rmem Wmem Dump Locks Drops Inode\n");
3001 struct netlink_sock
*nlk
= nlk_sk(s
);
3003 seq_printf(seq
, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3007 nlk
->groups
? (u32
)nlk
->groups
[0] : 0,
3008 sk_rmem_alloc_get(s
),
3009 sk_wmem_alloc_get(s
),
3011 atomic_read(&s
->sk_refcnt
),
3012 atomic_read(&s
->sk_drops
),
3020 static const struct seq_operations netlink_seq_ops
= {
3021 .start
= netlink_seq_start
,
3022 .next
= netlink_seq_next
,
3023 .stop
= netlink_seq_stop
,
3024 .show
= netlink_seq_show
,
3028 static int netlink_seq_open(struct inode
*inode
, struct file
*file
)
3030 return seq_open_net(inode
, file
, &netlink_seq_ops
,
3031 sizeof(struct nl_seq_iter
));
3034 static const struct file_operations netlink_seq_fops
= {
3035 .owner
= THIS_MODULE
,
3036 .open
= netlink_seq_open
,
3038 .llseek
= seq_lseek
,
3039 .release
= seq_release_net
,
3044 int netlink_register_notifier(struct notifier_block
*nb
)
3046 return atomic_notifier_chain_register(&netlink_chain
, nb
);
3048 EXPORT_SYMBOL(netlink_register_notifier
);
3050 int netlink_unregister_notifier(struct notifier_block
*nb
)
3052 return atomic_notifier_chain_unregister(&netlink_chain
, nb
);
3054 EXPORT_SYMBOL(netlink_unregister_notifier
);
3056 static const struct proto_ops netlink_ops
= {
3057 .family
= PF_NETLINK
,
3058 .owner
= THIS_MODULE
,
3059 .release
= netlink_release
,
3060 .bind
= netlink_bind
,
3061 .connect
= netlink_connect
,
3062 .socketpair
= sock_no_socketpair
,
3063 .accept
= sock_no_accept
,
3064 .getname
= netlink_getname
,
3065 .poll
= netlink_poll
,
3066 .ioctl
= sock_no_ioctl
,
3067 .listen
= sock_no_listen
,
3068 .shutdown
= sock_no_shutdown
,
3069 .setsockopt
= netlink_setsockopt
,
3070 .getsockopt
= netlink_getsockopt
,
3071 .sendmsg
= netlink_sendmsg
,
3072 .recvmsg
= netlink_recvmsg
,
3073 .mmap
= netlink_mmap
,
3074 .sendpage
= sock_no_sendpage
,
3077 static const struct net_proto_family netlink_family_ops
= {
3078 .family
= PF_NETLINK
,
3079 .create
= netlink_create
,
3080 .owner
= THIS_MODULE
, /* for consistency 8) */
3083 static int __net_init
netlink_net_init(struct net
*net
)
3085 #ifdef CONFIG_PROC_FS
3086 if (!proc_create("netlink", 0, net
->proc_net
, &netlink_seq_fops
))
3092 static void __net_exit
netlink_net_exit(struct net
*net
)
3094 #ifdef CONFIG_PROC_FS
3095 remove_proc_entry("netlink", net
->proc_net
);
3099 static void __init
netlink_add_usersock_entry(void)
3101 struct listeners
*listeners
;
3104 listeners
= kzalloc(sizeof(*listeners
) + NLGRPSZ(groups
), GFP_KERNEL
);
3106 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3108 netlink_table_grab();
3110 nl_table
[NETLINK_USERSOCK
].groups
= groups
;
3111 rcu_assign_pointer(nl_table
[NETLINK_USERSOCK
].listeners
, listeners
);
3112 nl_table
[NETLINK_USERSOCK
].module
= THIS_MODULE
;
3113 nl_table
[NETLINK_USERSOCK
].registered
= 1;
3114 nl_table
[NETLINK_USERSOCK
].flags
= NL_CFG_F_NONROOT_SEND
;
3116 netlink_table_ungrab();
3119 static struct pernet_operations __net_initdata netlink_net_ops
= {
3120 .init
= netlink_net_init
,
3121 .exit
= netlink_net_exit
,
3124 static int __init
netlink_proto_init(void)
3127 int err
= proto_register(&netlink_proto
, 0);
3128 struct rhashtable_params ht_params
= {
3129 .head_offset
= offsetof(struct netlink_sock
, node
),
3130 .key_offset
= offsetof(struct netlink_sock
, portid
),
3131 .key_len
= sizeof(u32
), /* portid */
3132 .hashfn
= arch_fast_hash
,
3133 .max_shift
= 16, /* 64K */
3134 .grow_decision
= rht_grow_above_75
,
3135 .shrink_decision
= rht_shrink_below_30
,
3136 #ifdef CONFIG_PROVE_LOCKING
3137 .mutex_is_held
= lockdep_nl_sk_hash_is_held
,
3144 BUILD_BUG_ON(sizeof(struct netlink_skb_parms
) > FIELD_SIZEOF(struct sk_buff
, cb
));
3146 nl_table
= kcalloc(MAX_LINKS
, sizeof(*nl_table
), GFP_KERNEL
);
3150 for (i
= 0; i
< MAX_LINKS
; i
++) {
3151 if (rhashtable_init(&nl_table
[i
].hash
, &ht_params
) < 0) {
3153 rhashtable_destroy(&nl_table
[i
].hash
);
3159 INIT_LIST_HEAD(&netlink_tap_all
);
3161 netlink_add_usersock_entry();
3163 sock_register(&netlink_family_ops
);
3164 register_pernet_subsys(&netlink_net_ops
);
3165 /* The netlink device handler may be needed early. */
3170 panic("netlink_init: Cannot allocate nl_table\n");
3173 core_initcall(netlink_proto_init
);