rhashtable: Supports for nulls marker
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
cd1df525 6 * Patrick McHardy <kaber@trash.net>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
746fac4d 12 *
1da177e4
LT
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
1da177e4
LT
23 */
24
1da177e4
LT
25#include <linux/module.h>
26
4fc268d2 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/init.h>
1da177e4
LT
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
1da177e4
LT
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
af65bdfc 58#include <linux/mutex.h>
ccdfcc39 59#include <linux/vmalloc.h>
bcbde0d4 60#include <linux/if_arp.h>
e341694e 61#include <linux/rhashtable.h>
9652e931 62#include <asm/cacheflush.h>
e341694e 63#include <linux/hash.h>
54e0f520 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4
LT
66#include <net/sock.h>
67#include <net/scm.h>
82ace47a 68#include <net/netlink.h>
1da177e4 69
0f29c768 70#include "af_netlink.h"
1da177e4 71
5c398dc8
ED
72struct listeners {
73 struct rcu_head rcu;
74 unsigned long masks[0];
6c04bb18
JB
75};
76
cd967e05
PM
77/* state bits */
78#define NETLINK_CONGESTED 0x0
79
80/* flags */
77247bbb 81#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 82#define NETLINK_RECV_PKTINFO 0x2
be0c22a4 83#define NETLINK_BROADCAST_SEND_ERROR 0x4
38938bfe 84#define NETLINK_RECV_NO_ENOBUFS 0x8
77247bbb 85
035c4c16 86static inline int netlink_is_kernel(struct sock *sk)
aed81560
DL
87{
88 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
89}
90
0f29c768
AV
91struct netlink_table *nl_table;
92EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
93
94static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
95
96static int netlink_dump(struct sock *sk);
9652e931 97static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 98
78fd1d0a
TG
99/* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
106 */
0f29c768
AV
107DEFINE_RWLOCK(nl_table_lock);
108EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
109static atomic_t nl_table_users = ATOMIC_INIT(0);
110
6d772ac5
ED
111#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
112
e341694e
TG
113/* Protects netlink socket hash table mutations */
114DEFINE_MUTEX(nl_sk_hash_lock);
6c8f7e70 115EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
e341694e 116
e041c683 117static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 118
bcbde0d4
DB
119static DEFINE_SPINLOCK(netlink_tap_lock);
120static struct list_head netlink_tap_all __read_mostly;
121
b57ef81f 122static inline u32 netlink_group_mask(u32 group)
d629b836
PM
123{
124 return group ? 1 << (group - 1) : 0;
125}
126
bcbde0d4
DB
127int netlink_add_tap(struct netlink_tap *nt)
128{
129 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
130 return -EINVAL;
131
132 spin_lock(&netlink_tap_lock);
133 list_add_rcu(&nt->list, &netlink_tap_all);
134 spin_unlock(&netlink_tap_lock);
135
fcd4d35e 136 __module_get(nt->module);
bcbde0d4
DB
137
138 return 0;
139}
140EXPORT_SYMBOL_GPL(netlink_add_tap);
141
2173f8d9 142static int __netlink_remove_tap(struct netlink_tap *nt)
bcbde0d4
DB
143{
144 bool found = false;
145 struct netlink_tap *tmp;
146
147 spin_lock(&netlink_tap_lock);
148
149 list_for_each_entry(tmp, &netlink_tap_all, list) {
150 if (nt == tmp) {
151 list_del_rcu(&nt->list);
152 found = true;
153 goto out;
154 }
155 }
156
157 pr_warn("__netlink_remove_tap: %p not found\n", nt);
158out:
159 spin_unlock(&netlink_tap_lock);
160
161 if (found && nt->module)
162 module_put(nt->module);
163
164 return found ? 0 : -ENODEV;
165}
bcbde0d4
DB
166
167int netlink_remove_tap(struct netlink_tap *nt)
168{
169 int ret;
170
171 ret = __netlink_remove_tap(nt);
172 synchronize_net();
173
174 return ret;
175}
176EXPORT_SYMBOL_GPL(netlink_remove_tap);
177
5ffd5cdd
DB
178static bool netlink_filter_tap(const struct sk_buff *skb)
179{
180 struct sock *sk = skb->sk;
5ffd5cdd
DB
181
182 /* We take the more conservative approach and
183 * whitelist socket protocols that may pass.
184 */
185 switch (sk->sk_protocol) {
186 case NETLINK_ROUTE:
187 case NETLINK_USERSOCK:
188 case NETLINK_SOCK_DIAG:
189 case NETLINK_NFLOG:
190 case NETLINK_XFRM:
191 case NETLINK_FIB_LOOKUP:
192 case NETLINK_NETFILTER:
193 case NETLINK_GENERIC:
498044bb 194 return true;
5ffd5cdd
DB
195 }
196
498044bb 197 return false;
5ffd5cdd
DB
198}
199
bcbde0d4
DB
200static int __netlink_deliver_tap_skb(struct sk_buff *skb,
201 struct net_device *dev)
202{
203 struct sk_buff *nskb;
5ffd5cdd 204 struct sock *sk = skb->sk;
bcbde0d4
DB
205 int ret = -ENOMEM;
206
207 dev_hold(dev);
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (nskb) {
210 nskb->dev = dev;
5ffd5cdd 211 nskb->protocol = htons((u16) sk->sk_protocol);
604d13c9
DB
212 nskb->pkt_type = netlink_is_kernel(sk) ?
213 PACKET_KERNEL : PACKET_USER;
4e48ed88 214 skb_reset_network_header(nskb);
bcbde0d4
DB
215 ret = dev_queue_xmit(nskb);
216 if (unlikely(ret > 0))
217 ret = net_xmit_errno(ret);
218 }
219
220 dev_put(dev);
221 return ret;
222}
223
224static void __netlink_deliver_tap(struct sk_buff *skb)
225{
226 int ret;
227 struct netlink_tap *tmp;
228
5ffd5cdd
DB
229 if (!netlink_filter_tap(skb))
230 return;
231
bcbde0d4
DB
232 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
233 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
234 if (unlikely(ret))
235 break;
236 }
237}
238
239static void netlink_deliver_tap(struct sk_buff *skb)
240{
241 rcu_read_lock();
242
243 if (unlikely(!list_empty(&netlink_tap_all)))
244 __netlink_deliver_tap(skb);
245
246 rcu_read_unlock();
247}
248
73bfd370
DB
249static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
250 struct sk_buff *skb)
251{
252 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
253 netlink_deliver_tap(skb);
254}
255
cd1df525
PM
256static void netlink_overrun(struct sock *sk)
257{
258 struct netlink_sock *nlk = nlk_sk(sk);
259
260 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
261 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
262 sk->sk_err = ENOBUFS;
263 sk->sk_error_report(sk);
264 }
265 }
266 atomic_inc(&sk->sk_drops);
267}
268
269static void netlink_rcv_wake(struct sock *sk)
270{
271 struct netlink_sock *nlk = nlk_sk(sk);
272
273 if (skb_queue_empty(&sk->sk_receive_queue))
274 clear_bit(NETLINK_CONGESTED, &nlk->state);
275 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
276 wake_up_interruptible(&nlk->wait);
277}
278
ccdfcc39 279#ifdef CONFIG_NETLINK_MMAP
9652e931
PM
280static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
281{
282 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
283}
284
f9c22888
PM
285static bool netlink_rx_is_mmaped(struct sock *sk)
286{
287 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
288}
289
5fd96123
PM
290static bool netlink_tx_is_mmaped(struct sock *sk)
291{
292 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
293}
294
ccdfcc39
PM
295static __pure struct page *pgvec_to_page(const void *addr)
296{
297 if (is_vmalloc_addr(addr))
298 return vmalloc_to_page(addr);
299 else
300 return virt_to_page(addr);
301}
302
303static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
304{
305 unsigned int i;
306
307 for (i = 0; i < len; i++) {
308 if (pg_vec[i] != NULL) {
309 if (is_vmalloc_addr(pg_vec[i]))
310 vfree(pg_vec[i]);
311 else
312 free_pages((unsigned long)pg_vec[i], order);
313 }
314 }
315 kfree(pg_vec);
316}
317
318static void *alloc_one_pg_vec_page(unsigned long order)
319{
320 void *buffer;
321 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
322 __GFP_NOWARN | __GFP_NORETRY;
323
324 buffer = (void *)__get_free_pages(gfp_flags, order);
325 if (buffer != NULL)
326 return buffer;
327
328 buffer = vzalloc((1 << order) * PAGE_SIZE);
329 if (buffer != NULL)
330 return buffer;
331
332 gfp_flags &= ~__GFP_NORETRY;
333 return (void *)__get_free_pages(gfp_flags, order);
334}
335
336static void **alloc_pg_vec(struct netlink_sock *nlk,
337 struct nl_mmap_req *req, unsigned int order)
338{
339 unsigned int block_nr = req->nm_block_nr;
340 unsigned int i;
8a849bb7 341 void **pg_vec;
ccdfcc39
PM
342
343 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
344 if (pg_vec == NULL)
345 return NULL;
346
347 for (i = 0; i < block_nr; i++) {
8a849bb7 348 pg_vec[i] = alloc_one_pg_vec_page(order);
ccdfcc39
PM
349 if (pg_vec[i] == NULL)
350 goto err1;
351 }
352
353 return pg_vec;
354err1:
355 free_pg_vec(pg_vec, order, block_nr);
356 return NULL;
357}
358
359static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
360 bool closing, bool tx_ring)
361{
362 struct netlink_sock *nlk = nlk_sk(sk);
363 struct netlink_ring *ring;
364 struct sk_buff_head *queue;
365 void **pg_vec = NULL;
366 unsigned int order = 0;
367 int err;
368
369 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
370 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
371
372 if (!closing) {
373 if (atomic_read(&nlk->mapped))
374 return -EBUSY;
375 if (atomic_read(&ring->pending))
376 return -EBUSY;
377 }
378
379 if (req->nm_block_nr) {
380 if (ring->pg_vec != NULL)
381 return -EBUSY;
382
383 if ((int)req->nm_block_size <= 0)
384 return -EINVAL;
74e83b23 385 if (!PAGE_ALIGNED(req->nm_block_size))
ccdfcc39
PM
386 return -EINVAL;
387 if (req->nm_frame_size < NL_MMAP_HDRLEN)
388 return -EINVAL;
389 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
390 return -EINVAL;
391
392 ring->frames_per_block = req->nm_block_size /
393 req->nm_frame_size;
394 if (ring->frames_per_block == 0)
395 return -EINVAL;
396 if (ring->frames_per_block * req->nm_block_nr !=
397 req->nm_frame_nr)
398 return -EINVAL;
399
400 order = get_order(req->nm_block_size);
401 pg_vec = alloc_pg_vec(nlk, req, order);
402 if (pg_vec == NULL)
403 return -ENOMEM;
404 } else {
405 if (req->nm_frame_nr)
406 return -EINVAL;
407 }
408
409 err = -EBUSY;
410 mutex_lock(&nlk->pg_vec_lock);
411 if (closing || atomic_read(&nlk->mapped) == 0) {
412 err = 0;
413 spin_lock_bh(&queue->lock);
414
415 ring->frame_max = req->nm_frame_nr - 1;
416 ring->head = 0;
417 ring->frame_size = req->nm_frame_size;
418 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
419
420 swap(ring->pg_vec_len, req->nm_block_nr);
421 swap(ring->pg_vec_order, order);
422 swap(ring->pg_vec, pg_vec);
423
424 __skb_queue_purge(queue);
425 spin_unlock_bh(&queue->lock);
426
427 WARN_ON(atomic_read(&nlk->mapped));
428 }
429 mutex_unlock(&nlk->pg_vec_lock);
430
431 if (pg_vec)
432 free_pg_vec(pg_vec, order, req->nm_block_nr);
433 return err;
434}
435
436static void netlink_mm_open(struct vm_area_struct *vma)
437{
438 struct file *file = vma->vm_file;
439 struct socket *sock = file->private_data;
440 struct sock *sk = sock->sk;
441
442 if (sk)
443 atomic_inc(&nlk_sk(sk)->mapped);
444}
445
446static void netlink_mm_close(struct vm_area_struct *vma)
447{
448 struct file *file = vma->vm_file;
449 struct socket *sock = file->private_data;
450 struct sock *sk = sock->sk;
451
452 if (sk)
453 atomic_dec(&nlk_sk(sk)->mapped);
454}
455
456static const struct vm_operations_struct netlink_mmap_ops = {
457 .open = netlink_mm_open,
458 .close = netlink_mm_close,
459};
460
461static int netlink_mmap(struct file *file, struct socket *sock,
462 struct vm_area_struct *vma)
463{
464 struct sock *sk = sock->sk;
465 struct netlink_sock *nlk = nlk_sk(sk);
466 struct netlink_ring *ring;
467 unsigned long start, size, expected;
468 unsigned int i;
469 int err = -EINVAL;
470
471 if (vma->vm_pgoff)
472 return -EINVAL;
473
474 mutex_lock(&nlk->pg_vec_lock);
475
476 expected = 0;
477 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
478 if (ring->pg_vec == NULL)
479 continue;
480 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
481 }
482
483 if (expected == 0)
484 goto out;
485
486 size = vma->vm_end - vma->vm_start;
487 if (size != expected)
488 goto out;
489
490 start = vma->vm_start;
491 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
492 if (ring->pg_vec == NULL)
493 continue;
494
495 for (i = 0; i < ring->pg_vec_len; i++) {
496 struct page *page;
497 void *kaddr = ring->pg_vec[i];
498 unsigned int pg_num;
499
500 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
501 page = pgvec_to_page(kaddr);
502 err = vm_insert_page(vma, start, page);
503 if (err < 0)
504 goto out;
505 start += PAGE_SIZE;
506 kaddr += PAGE_SIZE;
507 }
508 }
509 }
510
511 atomic_inc(&nlk->mapped);
512 vma->vm_ops = &netlink_mmap_ops;
513 err = 0;
514out:
515 mutex_unlock(&nlk->pg_vec_lock);
7cdbac71 516 return err;
ccdfcc39 517}
9652e931 518
4682a035 519static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
9652e931
PM
520{
521#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
522 struct page *p_start, *p_end;
523
524 /* First page is flushed through netlink_{get,set}_status */
525 p_start = pgvec_to_page(hdr + PAGE_SIZE);
4682a035 526 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
9652e931
PM
527 while (p_start <= p_end) {
528 flush_dcache_page(p_start);
529 p_start++;
530 }
531#endif
532}
533
534static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
535{
536 smp_rmb();
537 flush_dcache_page(pgvec_to_page(hdr));
538 return hdr->nm_status;
539}
540
541static void netlink_set_status(struct nl_mmap_hdr *hdr,
542 enum nl_mmap_status status)
543{
a18e6a18 544 smp_mb();
9652e931
PM
545 hdr->nm_status = status;
546 flush_dcache_page(pgvec_to_page(hdr));
9652e931
PM
547}
548
549static struct nl_mmap_hdr *
550__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
551{
552 unsigned int pg_vec_pos, frame_off;
553
554 pg_vec_pos = pos / ring->frames_per_block;
555 frame_off = pos % ring->frames_per_block;
556
557 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
558}
559
560static struct nl_mmap_hdr *
561netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
562 enum nl_mmap_status status)
563{
564 struct nl_mmap_hdr *hdr;
565
566 hdr = __netlink_lookup_frame(ring, pos);
567 if (netlink_get_status(hdr) != status)
568 return NULL;
569
570 return hdr;
571}
572
573static struct nl_mmap_hdr *
574netlink_current_frame(const struct netlink_ring *ring,
575 enum nl_mmap_status status)
576{
577 return netlink_lookup_frame(ring, ring->head, status);
578}
579
580static struct nl_mmap_hdr *
581netlink_previous_frame(const struct netlink_ring *ring,
582 enum nl_mmap_status status)
583{
584 unsigned int prev;
585
586 prev = ring->head ? ring->head - 1 : ring->frame_max;
587 return netlink_lookup_frame(ring, prev, status);
588}
589
590static void netlink_increment_head(struct netlink_ring *ring)
591{
592 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
593}
594
595static void netlink_forward_ring(struct netlink_ring *ring)
596{
597 unsigned int head = ring->head, pos = head;
598 const struct nl_mmap_hdr *hdr;
599
600 do {
601 hdr = __netlink_lookup_frame(ring, pos);
602 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
603 break;
604 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
605 break;
606 netlink_increment_head(ring);
607 } while (ring->head != head);
608}
609
cd1df525
PM
610static bool netlink_dump_space(struct netlink_sock *nlk)
611{
612 struct netlink_ring *ring = &nlk->rx_ring;
613 struct nl_mmap_hdr *hdr;
614 unsigned int n;
615
616 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
617 if (hdr == NULL)
618 return false;
619
620 n = ring->head + ring->frame_max / 2;
621 if (n > ring->frame_max)
622 n -= ring->frame_max;
623
624 hdr = __netlink_lookup_frame(ring, n);
625
626 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
627}
628
9652e931
PM
629static unsigned int netlink_poll(struct file *file, struct socket *sock,
630 poll_table *wait)
631{
632 struct sock *sk = sock->sk;
633 struct netlink_sock *nlk = nlk_sk(sk);
634 unsigned int mask;
cd1df525 635 int err;
9652e931 636
cd1df525
PM
637 if (nlk->rx_ring.pg_vec != NULL) {
638 /* Memory mapped sockets don't call recvmsg(), so flow control
639 * for dumps is performed here. A dump is allowed to continue
640 * if at least half the ring is unused.
641 */
16b304f3 642 while (nlk->cb_running && netlink_dump_space(nlk)) {
cd1df525
PM
643 err = netlink_dump(sk);
644 if (err < 0) {
ac30ef83 645 sk->sk_err = -err;
cd1df525
PM
646 sk->sk_error_report(sk);
647 break;
648 }
649 }
650 netlink_rcv_wake(sk);
651 }
5fd96123 652
9652e931
PM
653 mask = datagram_poll(file, sock, wait);
654
655 spin_lock_bh(&sk->sk_receive_queue.lock);
656 if (nlk->rx_ring.pg_vec) {
657 netlink_forward_ring(&nlk->rx_ring);
658 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
659 mask |= POLLIN | POLLRDNORM;
660 }
661 spin_unlock_bh(&sk->sk_receive_queue.lock);
662
663 spin_lock_bh(&sk->sk_write_queue.lock);
664 if (nlk->tx_ring.pg_vec) {
665 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
666 mask |= POLLOUT | POLLWRNORM;
667 }
668 spin_unlock_bh(&sk->sk_write_queue.lock);
669
670 return mask;
671}
672
673static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
674{
675 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
676}
677
678static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
679 struct netlink_ring *ring,
680 struct nl_mmap_hdr *hdr)
681{
682 unsigned int size;
683 void *data;
684
685 size = ring->frame_size - NL_MMAP_HDRLEN;
686 data = (void *)hdr + NL_MMAP_HDRLEN;
687
688 skb->head = data;
689 skb->data = data;
690 skb_reset_tail_pointer(skb);
691 skb->end = skb->tail + size;
692 skb->len = 0;
693
694 skb->destructor = netlink_skb_destructor;
695 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
696 NETLINK_CB(skb).sk = sk;
697}
5fd96123
PM
698
699static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
700 u32 dst_portid, u32 dst_group,
701 struct sock_iocb *siocb)
702{
703 struct netlink_sock *nlk = nlk_sk(sk);
704 struct netlink_ring *ring;
705 struct nl_mmap_hdr *hdr;
706 struct sk_buff *skb;
707 unsigned int maxlen;
5fd96123
PM
708 int err = 0, len = 0;
709
5fd96123
PM
710 mutex_lock(&nlk->pg_vec_lock);
711
712 ring = &nlk->tx_ring;
713 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
714
715 do {
4682a035
DM
716 unsigned int nm_len;
717
5fd96123
PM
718 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
719 if (hdr == NULL) {
720 if (!(msg->msg_flags & MSG_DONTWAIT) &&
721 atomic_read(&nlk->tx_ring.pending))
722 schedule();
723 continue;
724 }
4682a035
DM
725
726 nm_len = ACCESS_ONCE(hdr->nm_len);
727 if (nm_len > maxlen) {
5fd96123
PM
728 err = -EINVAL;
729 goto out;
730 }
731
4682a035 732 netlink_frame_flush_dcache(hdr, nm_len);
5fd96123 733
4682a035
DM
734 skb = alloc_skb(nm_len, GFP_KERNEL);
735 if (skb == NULL) {
736 err = -ENOBUFS;
737 goto out;
5fd96123 738 }
4682a035
DM
739 __skb_put(skb, nm_len);
740 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
741 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
5fd96123
PM
742
743 netlink_increment_head(ring);
744
745 NETLINK_CB(skb).portid = nlk->portid;
746 NETLINK_CB(skb).dst_group = dst_group;
747 NETLINK_CB(skb).creds = siocb->scm->creds;
748
749 err = security_netlink_send(sk, skb);
750 if (err) {
751 kfree_skb(skb);
752 goto out;
753 }
754
755 if (unlikely(dst_group)) {
756 atomic_inc(&skb->users);
757 netlink_broadcast(sk, skb, dst_portid, dst_group,
758 GFP_KERNEL);
759 }
760 err = netlink_unicast(sk, skb, dst_portid,
761 msg->msg_flags & MSG_DONTWAIT);
762 if (err < 0)
763 goto out;
764 len += err;
765
766 } while (hdr != NULL ||
767 (!(msg->msg_flags & MSG_DONTWAIT) &&
768 atomic_read(&nlk->tx_ring.pending)));
769
770 if (len > 0)
771 err = len;
772out:
773 mutex_unlock(&nlk->pg_vec_lock);
774 return err;
775}
f9c22888
PM
776
777static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
778{
779 struct nl_mmap_hdr *hdr;
780
781 hdr = netlink_mmap_hdr(skb);
782 hdr->nm_len = skb->len;
783 hdr->nm_group = NETLINK_CB(skb).dst_group;
784 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
785 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
786 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
4682a035 787 netlink_frame_flush_dcache(hdr, hdr->nm_len);
f9c22888
PM
788 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
789
790 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
791 kfree_skb(skb);
792}
793
794static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
795{
796 struct netlink_sock *nlk = nlk_sk(sk);
797 struct netlink_ring *ring = &nlk->rx_ring;
798 struct nl_mmap_hdr *hdr;
799
800 spin_lock_bh(&sk->sk_receive_queue.lock);
801 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
802 if (hdr == NULL) {
803 spin_unlock_bh(&sk->sk_receive_queue.lock);
804 kfree_skb(skb);
cd1df525 805 netlink_overrun(sk);
f9c22888
PM
806 return;
807 }
808 netlink_increment_head(ring);
809 __skb_queue_tail(&sk->sk_receive_queue, skb);
810 spin_unlock_bh(&sk->sk_receive_queue.lock);
811
812 hdr->nm_len = skb->len;
813 hdr->nm_group = NETLINK_CB(skb).dst_group;
814 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
815 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
816 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
f9c22888
PM
817 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
818}
819
ccdfcc39 820#else /* CONFIG_NETLINK_MMAP */
9652e931 821#define netlink_skb_is_mmaped(skb) false
f9c22888 822#define netlink_rx_is_mmaped(sk) false
5fd96123 823#define netlink_tx_is_mmaped(sk) false
ccdfcc39 824#define netlink_mmap sock_no_mmap
9652e931 825#define netlink_poll datagram_poll
5fd96123 826#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
ccdfcc39
PM
827#endif /* CONFIG_NETLINK_MMAP */
828
cf0a018a
PM
829static void netlink_skb_destructor(struct sk_buff *skb)
830{
9652e931
PM
831#ifdef CONFIG_NETLINK_MMAP
832 struct nl_mmap_hdr *hdr;
833 struct netlink_ring *ring;
834 struct sock *sk;
835
836 /* If a packet from the kernel to userspace was freed because of an
837 * error without being delivered to userspace, the kernel must reset
838 * the status. In the direction userspace to kernel, the status is
839 * always reset here after the packet was processed and freed.
840 */
841 if (netlink_skb_is_mmaped(skb)) {
842 hdr = netlink_mmap_hdr(skb);
843 sk = NETLINK_CB(skb).sk;
844
5fd96123
PM
845 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
846 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
847 ring = &nlk_sk(sk)->tx_ring;
848 } else {
849 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
850 hdr->nm_len = 0;
851 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
852 }
853 ring = &nlk_sk(sk)->rx_ring;
9652e931 854 }
9652e931
PM
855
856 WARN_ON(atomic_read(&ring->pending) == 0);
857 atomic_dec(&ring->pending);
858 sock_put(sk);
859
5e71d9d7 860 skb->head = NULL;
9652e931
PM
861 }
862#endif
c05cdb1b 863 if (is_vmalloc_addr(skb->head)) {
3a36515f
PN
864 if (!skb->cloned ||
865 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
866 vfree(skb->head);
867
c05cdb1b
PNA
868 skb->head = NULL;
869 }
9652e931
PM
870 if (skb->sk != NULL)
871 sock_rfree(skb);
cf0a018a
PM
872}
873
874static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
875{
876 WARN_ON(skb->sk != NULL);
877 skb->sk = sk;
878 skb->destructor = netlink_skb_destructor;
879 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
880 sk_mem_charge(sk, skb->truesize);
881}
882
1da177e4
LT
883static void netlink_sock_destruct(struct sock *sk)
884{
3f660d66
HX
885 struct netlink_sock *nlk = nlk_sk(sk);
886
16b304f3
PS
887 if (nlk->cb_running) {
888 if (nlk->cb.done)
889 nlk->cb.done(&nlk->cb);
6dc878a8 890
16b304f3
PS
891 module_put(nlk->cb.module);
892 kfree_skb(nlk->cb.skb);
3f660d66
HX
893 }
894
1da177e4 895 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
896#ifdef CONFIG_NETLINK_MMAP
897 if (1) {
898 struct nl_mmap_req req;
899
900 memset(&req, 0, sizeof(req));
901 if (nlk->rx_ring.pg_vec)
902 netlink_set_ring(sk, &req, true, false);
903 memset(&req, 0, sizeof(req));
904 if (nlk->tx_ring.pg_vec)
905 netlink_set_ring(sk, &req, true, true);
906 }
907#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
908
909 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 910 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
911 return;
912 }
547b792c
IJ
913
914 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
915 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
916 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
917}
918
6ac552fd
PM
919/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
920 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
921 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
922 * this, _but_ remember, it adds useless work on UP machines.
923 */
924
d136f1bd 925void netlink_table_grab(void)
9a429c49 926 __acquires(nl_table_lock)
1da177e4 927{
d136f1bd
JB
928 might_sleep();
929
6abd219c 930 write_lock_irq(&nl_table_lock);
1da177e4
LT
931
932 if (atomic_read(&nl_table_users)) {
933 DECLARE_WAITQUEUE(wait, current);
934
935 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 936 for (;;) {
1da177e4
LT
937 set_current_state(TASK_UNINTERRUPTIBLE);
938 if (atomic_read(&nl_table_users) == 0)
939 break;
6abd219c 940 write_unlock_irq(&nl_table_lock);
1da177e4 941 schedule();
6abd219c 942 write_lock_irq(&nl_table_lock);
1da177e4
LT
943 }
944
945 __set_current_state(TASK_RUNNING);
946 remove_wait_queue(&nl_table_wait, &wait);
947 }
948}
949
d136f1bd 950void netlink_table_ungrab(void)
9a429c49 951 __releases(nl_table_lock)
1da177e4 952{
6abd219c 953 write_unlock_irq(&nl_table_lock);
1da177e4
LT
954 wake_up(&nl_table_wait);
955}
956
6ac552fd 957static inline void
1da177e4
LT
958netlink_lock_table(void)
959{
960 /* read_lock() synchronizes us to netlink_table_grab */
961
962 read_lock(&nl_table_lock);
963 atomic_inc(&nl_table_users);
964 read_unlock(&nl_table_lock);
965}
966
6ac552fd 967static inline void
1da177e4
LT
968netlink_unlock_table(void)
969{
970 if (atomic_dec_and_test(&nl_table_users))
971 wake_up(&nl_table_wait);
972}
973
e341694e 974struct netlink_compare_arg
1da177e4 975{
e341694e
TG
976 struct net *net;
977 u32 portid;
978};
1da177e4 979
e341694e 980static bool netlink_compare(void *ptr, void *arg)
1da177e4 981{
e341694e
TG
982 struct netlink_compare_arg *x = arg;
983 struct sock *sk = ptr;
1da177e4 984
e341694e
TG
985 return nlk_sk(sk)->portid == x->portid &&
986 net_eq(sock_net(sk), x->net);
1da177e4
LT
987}
988
e341694e
TG
989static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
990 struct net *net)
1da177e4 991{
e341694e
TG
992 struct netlink_compare_arg arg = {
993 .net = net,
994 .portid = portid,
995 };
1da177e4 996
8d24c0b4 997 return rhashtable_lookup_compare(&table->hash, &portid,
e341694e 998 &netlink_compare, &arg);
1da177e4
LT
999}
1000
e341694e 1001static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 1002{
e341694e
TG
1003 struct netlink_table *table = &nl_table[protocol];
1004 struct sock *sk;
1da177e4 1005
78fd1d0a 1006 read_lock(&nl_table_lock);
e341694e
TG
1007 rcu_read_lock();
1008 sk = __netlink_lookup(table, portid, net);
1009 if (sk)
1010 sock_hold(sk);
1011 rcu_read_unlock();
78fd1d0a 1012 read_unlock(&nl_table_lock);
1da177e4 1013
e341694e 1014 return sk;
1da177e4
LT
1015}
1016
90ddc4f0 1017static const struct proto_ops netlink_ops;
1da177e4 1018
4277a083
PM
1019static void
1020netlink_update_listeners(struct sock *sk)
1021{
1022 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
1023 unsigned long mask;
1024 unsigned int i;
6d772ac5
ED
1025 struct listeners *listeners;
1026
1027 listeners = nl_deref_protected(tbl->listeners);
1028 if (!listeners)
1029 return;
4277a083 1030
b4ff4f04 1031 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 1032 mask = 0;
b67bfe0d 1033 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
1034 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1035 mask |= nlk_sk(sk)->groups[i];
1036 }
6d772ac5 1037 listeners->masks[i] = mask;
4277a083
PM
1038 }
1039 /* this function is only called with the netlink table "grabbed", which
1040 * makes sure updates are visible before bind or setsockopt return. */
1041}
1042
15e47304 1043static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1da177e4 1044{
da12c90e 1045 struct netlink_table *table = &nl_table[sk->sk_protocol];
1da177e4 1046 int err = -EADDRINUSE;
1da177e4 1047
e341694e
TG
1048 mutex_lock(&nl_sk_hash_lock);
1049 if (__netlink_lookup(table, portid, net))
1da177e4
LT
1050 goto err;
1051
1052 err = -EBUSY;
15e47304 1053 if (nlk_sk(sk)->portid)
1da177e4
LT
1054 goto err;
1055
1056 err = -ENOMEM;
97defe1e
TG
1057 if (BITS_PER_LONG > 32 &&
1058 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1da177e4
LT
1059 goto err;
1060
15e47304 1061 nlk_sk(sk)->portid = portid;
e341694e 1062 sock_hold(sk);
6eba8224 1063 rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
1da177e4 1064 err = 0;
1da177e4 1065err:
e341694e 1066 mutex_unlock(&nl_sk_hash_lock);
1da177e4
LT
1067 return err;
1068}
1069
1070static void netlink_remove(struct sock *sk)
1071{
e341694e
TG
1072 struct netlink_table *table;
1073
1074 mutex_lock(&nl_sk_hash_lock);
1075 table = &nl_table[sk->sk_protocol];
6eba8224 1076 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
e341694e
TG
1077 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1078 __sock_put(sk);
1079 }
1080 mutex_unlock(&nl_sk_hash_lock);
1081
1da177e4 1082 netlink_table_grab();
b10dcb3b 1083 if (nlk_sk(sk)->subscriptions) {
1da177e4 1084 __sk_del_bind_node(sk);
b10dcb3b
JB
1085 netlink_update_listeners(sk);
1086 }
1da177e4
LT
1087 netlink_table_ungrab();
1088}
1089
1090static struct proto netlink_proto = {
1091 .name = "NETLINK",
1092 .owner = THIS_MODULE,
1093 .obj_size = sizeof(struct netlink_sock),
1094};
1095
1b8d7ae4
EB
1096static int __netlink_create(struct net *net, struct socket *sock,
1097 struct mutex *cb_mutex, int protocol)
1da177e4
LT
1098{
1099 struct sock *sk;
1100 struct netlink_sock *nlk;
ab33a171
PM
1101
1102 sock->ops = &netlink_ops;
1103
6257ff21 1104 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
ab33a171
PM
1105 if (!sk)
1106 return -ENOMEM;
1107
1108 sock_init_data(sock, sk);
1109
1110 nlk = nlk_sk(sk);
658cb354 1111 if (cb_mutex) {
ffa4d721 1112 nlk->cb_mutex = cb_mutex;
658cb354 1113 } else {
ffa4d721
PM
1114 nlk->cb_mutex = &nlk->cb_def_mutex;
1115 mutex_init(nlk->cb_mutex);
1116 }
ab33a171 1117 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
1118#ifdef CONFIG_NETLINK_MMAP
1119 mutex_init(&nlk->pg_vec_lock);
1120#endif
ab33a171
PM
1121
1122 sk->sk_destruct = netlink_sock_destruct;
1123 sk->sk_protocol = protocol;
1124 return 0;
1125}
1126
3f378b68
EP
1127static int netlink_create(struct net *net, struct socket *sock, int protocol,
1128 int kern)
ab33a171
PM
1129{
1130 struct module *module = NULL;
af65bdfc 1131 struct mutex *cb_mutex;
f7fa9b10 1132 struct netlink_sock *nlk;
023e2cfa
JB
1133 int (*bind)(struct net *net, int group);
1134 void (*unbind)(struct net *net, int group);
ab33a171 1135 int err = 0;
1da177e4
LT
1136
1137 sock->state = SS_UNCONNECTED;
1138
1139 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1140 return -ESOCKTNOSUPPORT;
1141
6ac552fd 1142 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
1143 return -EPROTONOSUPPORT;
1144
77247bbb 1145 netlink_lock_table();
95a5afca 1146#ifdef CONFIG_MODULES
ab33a171 1147 if (!nl_table[protocol].registered) {
77247bbb 1148 netlink_unlock_table();
4fdb3bb7 1149 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 1150 netlink_lock_table();
4fdb3bb7 1151 }
ab33a171
PM
1152#endif
1153 if (nl_table[protocol].registered &&
1154 try_module_get(nl_table[protocol].module))
1155 module = nl_table[protocol].module;
974c37e9
AD
1156 else
1157 err = -EPROTONOSUPPORT;
af65bdfc 1158 cb_mutex = nl_table[protocol].cb_mutex;
03292745 1159 bind = nl_table[protocol].bind;
4f520900 1160 unbind = nl_table[protocol].unbind;
77247bbb 1161 netlink_unlock_table();
4fdb3bb7 1162
974c37e9
AD
1163 if (err < 0)
1164 goto out;
1165
6ac552fd
PM
1166 err = __netlink_create(net, sock, cb_mutex, protocol);
1167 if (err < 0)
f7fa9b10
PM
1168 goto out_module;
1169
6f756a8c 1170 local_bh_disable();
c1fd3b94 1171 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
1172 local_bh_enable();
1173
f7fa9b10 1174 nlk = nlk_sk(sock->sk);
f7fa9b10 1175 nlk->module = module;
03292745 1176 nlk->netlink_bind = bind;
4f520900 1177 nlk->netlink_unbind = unbind;
ab33a171
PM
1178out:
1179 return err;
1da177e4 1180
ab33a171
PM
1181out_module:
1182 module_put(module);
1183 goto out;
1da177e4
LT
1184}
1185
1186static int netlink_release(struct socket *sock)
1187{
1188 struct sock *sk = sock->sk;
1189 struct netlink_sock *nlk;
1190
1191 if (!sk)
1192 return 0;
1193
1194 netlink_remove(sk);
ac57b3a9 1195 sock_orphan(sk);
1da177e4
LT
1196 nlk = nlk_sk(sk);
1197
3f660d66
HX
1198 /*
1199 * OK. Socket is unlinked, any packets that arrive now
1200 * will be purged.
1201 */
1da177e4 1202
1da177e4
LT
1203 sock->sk = NULL;
1204 wake_up_interruptible_all(&nlk->wait);
1205
1206 skb_queue_purge(&sk->sk_write_queue);
1207
15e47304 1208 if (nlk->portid) {
1da177e4 1209 struct netlink_notify n = {
3b1e0a65 1210 .net = sock_net(sk),
1da177e4 1211 .protocol = sk->sk_protocol,
15e47304 1212 .portid = nlk->portid,
1da177e4 1213 };
e041c683
AS
1214 atomic_notifier_call_chain(&netlink_chain,
1215 NETLINK_URELEASE, &n);
746fac4d 1216 }
4fdb3bb7 1217
5e7c001c 1218 module_put(nlk->module);
4fdb3bb7 1219
aed81560 1220 if (netlink_is_kernel(sk)) {
b10dcb3b 1221 netlink_table_grab();
869e58f8
DL
1222 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1223 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1224 struct listeners *old;
1225
1226 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1227 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1228 kfree_rcu(old, rcu);
869e58f8 1229 nl_table[sk->sk_protocol].module = NULL;
9785e10a 1230 nl_table[sk->sk_protocol].bind = NULL;
4f520900 1231 nl_table[sk->sk_protocol].unbind = NULL;
9785e10a 1232 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1233 nl_table[sk->sk_protocol].registered = 0;
1234 }
b10dcb3b 1235 netlink_table_ungrab();
658cb354 1236 }
77247bbb 1237
7d68536b
JB
1238 if (nlk->netlink_unbind) {
1239 int i;
1240
1241 for (i = 0; i < nlk->ngroups; i++)
1242 if (test_bit(i, nlk->groups))
023e2cfa 1243 nlk->netlink_unbind(sock_net(sk), i + 1);
7d68536b 1244 }
f7fa9b10
PM
1245 kfree(nlk->groups);
1246 nlk->groups = NULL;
1247
3755810c 1248 local_bh_disable();
c1fd3b94 1249 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1250 local_bh_enable();
1da177e4
LT
1251 sock_put(sk);
1252 return 0;
1253}
1254
1255static int netlink_autobind(struct socket *sock)
1256{
1257 struct sock *sk = sock->sk;
3b1e0a65 1258 struct net *net = sock_net(sk);
da12c90e 1259 struct netlink_table *table = &nl_table[sk->sk_protocol];
15e47304 1260 s32 portid = task_tgid_vnr(current);
1da177e4
LT
1261 int err;
1262 static s32 rover = -4097;
1263
1264retry:
1265 cond_resched();
78fd1d0a 1266 netlink_table_grab();
e341694e
TG
1267 rcu_read_lock();
1268 if (__netlink_lookup(table, portid, net)) {
1269 /* Bind collision, search negative portid values. */
1270 portid = rover--;
1271 if (rover > -4097)
1272 rover = -4097;
1273 rcu_read_unlock();
78fd1d0a 1274 netlink_table_ungrab();
e341694e 1275 goto retry;
1da177e4 1276 }
e341694e 1277 rcu_read_unlock();
78fd1d0a 1278 netlink_table_ungrab();
1da177e4 1279
15e47304 1280 err = netlink_insert(sk, net, portid);
1da177e4
LT
1281 if (err == -EADDRINUSE)
1282 goto retry;
d470e3b4
DM
1283
1284 /* If 2 threads race to autobind, that is fine. */
1285 if (err == -EBUSY)
1286 err = 0;
1287
1288 return err;
1da177e4
LT
1289}
1290
aa4cf945
EB
1291/**
1292 * __netlink_ns_capable - General netlink message capability test
1293 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1294 * @user_ns: The user namespace of the capability to use
1295 * @cap: The capability to use
1296 *
1297 * Test to see if the opener of the socket we received the message
1298 * from had when the netlink socket was created and the sender of the
1299 * message has has the capability @cap in the user namespace @user_ns.
1300 */
1301bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1302 struct user_namespace *user_ns, int cap)
1303{
2d7a85f4
EB
1304 return ((nsp->flags & NETLINK_SKB_DST) ||
1305 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1306 ns_capable(user_ns, cap);
aa4cf945
EB
1307}
1308EXPORT_SYMBOL(__netlink_ns_capable);
1309
1310/**
1311 * netlink_ns_capable - General netlink message capability test
1312 * @skb: socket buffer holding a netlink command from userspace
1313 * @user_ns: The user namespace of the capability to use
1314 * @cap: The capability to use
1315 *
1316 * Test to see if the opener of the socket we received the message
1317 * from had when the netlink socket was created and the sender of the
1318 * message has has the capability @cap in the user namespace @user_ns.
1319 */
1320bool netlink_ns_capable(const struct sk_buff *skb,
1321 struct user_namespace *user_ns, int cap)
1322{
1323 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1324}
1325EXPORT_SYMBOL(netlink_ns_capable);
1326
1327/**
1328 * netlink_capable - Netlink global message capability test
1329 * @skb: socket buffer holding a netlink command from userspace
1330 * @cap: The capability to use
1331 *
1332 * Test to see if the opener of the socket we received the message
1333 * from had when the netlink socket was created and the sender of the
1334 * message has has the capability @cap in all user namespaces.
1335 */
1336bool netlink_capable(const struct sk_buff *skb, int cap)
1337{
1338 return netlink_ns_capable(skb, &init_user_ns, cap);
1339}
1340EXPORT_SYMBOL(netlink_capable);
1341
1342/**
1343 * netlink_net_capable - Netlink network namespace message capability test
1344 * @skb: socket buffer holding a netlink command from userspace
1345 * @cap: The capability to use
1346 *
1347 * Test to see if the opener of the socket we received the message
1348 * from had when the netlink socket was created and the sender of the
1349 * message has has the capability @cap over the network namespace of
1350 * the socket we received the message from.
1351 */
1352bool netlink_net_capable(const struct sk_buff *skb, int cap)
1353{
1354 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1355}
1356EXPORT_SYMBOL(netlink_net_capable);
1357
5187cd05 1358static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
746fac4d 1359{
9785e10a 1360 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1361 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1362}
1da177e4 1363
f7fa9b10
PM
1364static void
1365netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1366{
1367 struct netlink_sock *nlk = nlk_sk(sk);
1368
1369 if (nlk->subscriptions && !subscriptions)
1370 __sk_del_bind_node(sk);
1371 else if (!nlk->subscriptions && subscriptions)
1372 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1373 nlk->subscriptions = subscriptions;
1374}
1375
b4ff4f04 1376static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1377{
1378 struct netlink_sock *nlk = nlk_sk(sk);
1379 unsigned int groups;
b4ff4f04 1380 unsigned long *new_groups;
513c2500
PM
1381 int err = 0;
1382
b4ff4f04
JB
1383 netlink_table_grab();
1384
513c2500 1385 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1386 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1387 err = -ENOENT;
b4ff4f04
JB
1388 goto out_unlock;
1389 }
513c2500 1390
b4ff4f04
JB
1391 if (nlk->ngroups >= groups)
1392 goto out_unlock;
513c2500 1393
b4ff4f04
JB
1394 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1395 if (new_groups == NULL) {
1396 err = -ENOMEM;
1397 goto out_unlock;
1398 }
6ac552fd 1399 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1400 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1401
1402 nlk->groups = new_groups;
513c2500 1403 nlk->ngroups = groups;
b4ff4f04
JB
1404 out_unlock:
1405 netlink_table_ungrab();
1406 return err;
513c2500
PM
1407}
1408
02c81ab9 1409static void netlink_undo_bind(int group, long unsigned int groups,
023e2cfa 1410 struct sock *sk)
4f520900 1411{
023e2cfa 1412 struct netlink_sock *nlk = nlk_sk(sk);
4f520900
RGB
1413 int undo;
1414
1415 if (!nlk->netlink_unbind)
1416 return;
1417
1418 for (undo = 0; undo < group; undo++)
6251edd9 1419 if (test_bit(undo, &groups))
023e2cfa 1420 nlk->netlink_unbind(sock_net(sk), undo);
4f520900
RGB
1421}
1422
6ac552fd
PM
1423static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1424 int addr_len)
1da177e4
LT
1425{
1426 struct sock *sk = sock->sk;
3b1e0a65 1427 struct net *net = sock_net(sk);
1da177e4
LT
1428 struct netlink_sock *nlk = nlk_sk(sk);
1429 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1430 int err;
4f520900 1431 long unsigned int groups = nladdr->nl_groups;
746fac4d 1432
4e4b5376
HFS
1433 if (addr_len < sizeof(struct sockaddr_nl))
1434 return -EINVAL;
1435
1da177e4
LT
1436 if (nladdr->nl_family != AF_NETLINK)
1437 return -EINVAL;
1438
1439 /* Only superuser is allowed to listen multicasts */
4f520900 1440 if (groups) {
5187cd05 1441 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1442 return -EPERM;
b4ff4f04
JB
1443 err = netlink_realloc_groups(sk);
1444 if (err)
1445 return err;
513c2500 1446 }
1da177e4 1447
4f520900 1448 if (nlk->portid)
15e47304 1449 if (nladdr->nl_pid != nlk->portid)
1da177e4 1450 return -EINVAL;
4f520900
RGB
1451
1452 if (nlk->netlink_bind && groups) {
1453 int group;
1454
1455 for (group = 0; group < nlk->ngroups; group++) {
1456 if (!test_bit(group, &groups))
1457 continue;
023e2cfa 1458 err = nlk->netlink_bind(net, group);
4f520900
RGB
1459 if (!err)
1460 continue;
023e2cfa 1461 netlink_undo_bind(group, groups, sk);
4f520900
RGB
1462 return err;
1463 }
1464 }
1465
1466 if (!nlk->portid) {
1da177e4 1467 err = nladdr->nl_pid ?
b4b51029 1468 netlink_insert(sk, net, nladdr->nl_pid) :
1da177e4 1469 netlink_autobind(sock);
4f520900 1470 if (err) {
023e2cfa 1471 netlink_undo_bind(nlk->ngroups, groups, sk);
1da177e4 1472 return err;
4f520900 1473 }
1da177e4
LT
1474 }
1475
4f520900 1476 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1477 return 0;
1478
1479 netlink_table_grab();
f7fa9b10 1480 netlink_update_subscriptions(sk, nlk->subscriptions +
4f520900 1481 hweight32(groups) -
746fac4d 1482 hweight32(nlk->groups[0]));
4f520900 1483 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
4277a083 1484 netlink_update_listeners(sk);
1da177e4
LT
1485 netlink_table_ungrab();
1486
1487 return 0;
1488}
1489
1490static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1491 int alen, int flags)
1492{
1493 int err = 0;
1494 struct sock *sk = sock->sk;
1495 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1496 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1497
6503d961
CG
1498 if (alen < sizeof(addr->sa_family))
1499 return -EINVAL;
1500
1da177e4
LT
1501 if (addr->sa_family == AF_UNSPEC) {
1502 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1503 nlk->dst_portid = 0;
d629b836 1504 nlk->dst_group = 0;
1da177e4
LT
1505 return 0;
1506 }
1507 if (addr->sa_family != AF_NETLINK)
1508 return -EINVAL;
1509
46833a86 1510 if ((nladdr->nl_groups || nladdr->nl_pid) &&
5187cd05 1511 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1512 return -EPERM;
1513
15e47304 1514 if (!nlk->portid)
1da177e4
LT
1515 err = netlink_autobind(sock);
1516
1517 if (err == 0) {
1518 sk->sk_state = NETLINK_CONNECTED;
15e47304 1519 nlk->dst_portid = nladdr->nl_pid;
d629b836 1520 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1521 }
1522
1523 return err;
1524}
1525
6ac552fd
PM
1526static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1527 int *addr_len, int peer)
1da177e4
LT
1528{
1529 struct sock *sk = sock->sk;
1530 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1531 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1532
1da177e4
LT
1533 nladdr->nl_family = AF_NETLINK;
1534 nladdr->nl_pad = 0;
1535 *addr_len = sizeof(*nladdr);
1536
1537 if (peer) {
15e47304 1538 nladdr->nl_pid = nlk->dst_portid;
d629b836 1539 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1540 } else {
15e47304 1541 nladdr->nl_pid = nlk->portid;
513c2500 1542 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1543 }
1544 return 0;
1545}
1546
15e47304 1547static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1548{
1da177e4
LT
1549 struct sock *sock;
1550 struct netlink_sock *nlk;
1551
15e47304 1552 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1553 if (!sock)
1554 return ERR_PTR(-ECONNREFUSED);
1555
1556 /* Don't bother queuing skb if kernel socket has no input function */
1557 nlk = nlk_sk(sock);
cd40b7d3 1558 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1559 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1560 sock_put(sock);
1561 return ERR_PTR(-ECONNREFUSED);
1562 }
1563 return sock;
1564}
1565
1566struct sock *netlink_getsockbyfilp(struct file *filp)
1567{
496ad9aa 1568 struct inode *inode = file_inode(filp);
1da177e4
LT
1569 struct sock *sock;
1570
1571 if (!S_ISSOCK(inode->i_mode))
1572 return ERR_PTR(-ENOTSOCK);
1573
1574 sock = SOCKET_I(inode)->sk;
1575 if (sock->sk_family != AF_NETLINK)
1576 return ERR_PTR(-EINVAL);
1577
1578 sock_hold(sock);
1579 return sock;
1580}
1581
3a36515f
PN
1582static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1583 int broadcast)
c05cdb1b
PNA
1584{
1585 struct sk_buff *skb;
1586 void *data;
1587
3a36515f 1588 if (size <= NLMSG_GOODSIZE || broadcast)
c05cdb1b
PNA
1589 return alloc_skb(size, GFP_KERNEL);
1590
3a36515f
PN
1591 size = SKB_DATA_ALIGN(size) +
1592 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c05cdb1b
PNA
1593
1594 data = vmalloc(size);
1595 if (data == NULL)
3a36515f 1596 return NULL;
c05cdb1b 1597
3a36515f
PN
1598 skb = build_skb(data, size);
1599 if (skb == NULL)
1600 vfree(data);
1601 else {
1602 skb->head_frag = 0;
1603 skb->destructor = netlink_skb_destructor;
1604 }
c05cdb1b
PNA
1605
1606 return skb;
c05cdb1b
PNA
1607}
1608
1da177e4
LT
1609/*
1610 * Attach a skb to a netlink socket.
1611 * The caller must hold a reference to the destination socket. On error, the
1612 * reference is dropped. The skb is not send to the destination, just all
1613 * all error checks are performed and memory in the queue is reserved.
1614 * Return values:
1615 * < 0: error. skb freed, reference to sock dropped.
1616 * 0: continue
1617 * 1: repeat lookup - reference dropped while waiting for socket memory.
1618 */
9457afee 1619int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1620 long *timeo, struct sock *ssk)
1da177e4
LT
1621{
1622 struct netlink_sock *nlk;
1623
1624 nlk = nlk_sk(sk);
1625
5fd96123
PM
1626 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1627 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1628 !netlink_skb_is_mmaped(skb)) {
1da177e4 1629 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1630 if (!*timeo) {
aed81560 1631 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1632 netlink_overrun(sk);
1633 sock_put(sk);
1634 kfree_skb(skb);
1635 return -EAGAIN;
1636 }
1637
1638 __set_current_state(TASK_INTERRUPTIBLE);
1639 add_wait_queue(&nlk->wait, &wait);
1640
1641 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cd967e05 1642 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1da177e4 1643 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1644 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1645
1646 __set_current_state(TASK_RUNNING);
1647 remove_wait_queue(&nlk->wait, &wait);
1648 sock_put(sk);
1649
1650 if (signal_pending(current)) {
1651 kfree_skb(skb);
c3d8d1e3 1652 return sock_intr_errno(*timeo);
1da177e4
LT
1653 }
1654 return 1;
1655 }
cf0a018a 1656 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1657 return 0;
1658}
1659
4a7e7c2a 1660static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1661{
1da177e4
LT
1662 int len = skb->len;
1663
bcbde0d4
DB
1664 netlink_deliver_tap(skb);
1665
f9c22888
PM
1666#ifdef CONFIG_NETLINK_MMAP
1667 if (netlink_skb_is_mmaped(skb))
1668 netlink_queue_mmaped_skb(sk, skb);
1669 else if (netlink_rx_is_mmaped(sk))
1670 netlink_ring_set_copied(sk, skb);
1671 else
1672#endif /* CONFIG_NETLINK_MMAP */
1673 skb_queue_tail(&sk->sk_receive_queue, skb);
676d2369 1674 sk->sk_data_ready(sk);
4a7e7c2a
ED
1675 return len;
1676}
1677
1678int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1679{
1680 int len = __netlink_sendskb(sk, skb);
1681
1da177e4
LT
1682 sock_put(sk);
1683 return len;
1684}
1685
1686void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1687{
1688 kfree_skb(skb);
1689 sock_put(sk);
1690}
1691
b57ef81f 1692static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1693{
1694 int delta;
1695
1298ca46 1696 WARN_ON(skb->sk != NULL);
5fd96123
PM
1697 if (netlink_skb_is_mmaped(skb))
1698 return skb;
1da177e4 1699
4305b541 1700 delta = skb->end - skb->tail;
c05cdb1b 1701 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1da177e4
LT
1702 return skb;
1703
1704 if (skb_shared(skb)) {
1705 struct sk_buff *nskb = skb_clone(skb, allocation);
1706 if (!nskb)
1707 return skb;
8460c00f 1708 consume_skb(skb);
1da177e4
LT
1709 skb = nskb;
1710 }
1711
1712 if (!pskb_expand_head(skb, 0, -delta, allocation))
1713 skb->truesize -= delta;
1714
1715 return skb;
1716}
1717
3fbc2905
EB
1718static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1719 struct sock *ssk)
cd40b7d3
DL
1720{
1721 int ret;
1722 struct netlink_sock *nlk = nlk_sk(sk);
1723
1724 ret = -ECONNREFUSED;
1725 if (nlk->netlink_rcv != NULL) {
1726 ret = skb->len;
cf0a018a 1727 netlink_skb_set_owner_r(skb, sk);
e32123e5 1728 NETLINK_CB(skb).sk = ssk;
73bfd370 1729 netlink_deliver_tap_kernel(sk, ssk, skb);
cd40b7d3 1730 nlk->netlink_rcv(skb);
bfb253c9
ED
1731 consume_skb(skb);
1732 } else {
1733 kfree_skb(skb);
cd40b7d3 1734 }
cd40b7d3
DL
1735 sock_put(sk);
1736 return ret;
1737}
1738
1739int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1740 u32 portid, int nonblock)
1da177e4
LT
1741{
1742 struct sock *sk;
1743 int err;
1744 long timeo;
1745
1746 skb = netlink_trim(skb, gfp_any());
1747
1748 timeo = sock_sndtimeo(ssk, nonblock);
1749retry:
15e47304 1750 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1751 if (IS_ERR(sk)) {
1752 kfree_skb(skb);
1753 return PTR_ERR(sk);
1754 }
cd40b7d3 1755 if (netlink_is_kernel(sk))
3fbc2905 1756 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1757
b1153f29 1758 if (sk_filter(sk, skb)) {
84874607 1759 err = skb->len;
b1153f29
SH
1760 kfree_skb(skb);
1761 sock_put(sk);
1762 return err;
1763 }
1764
9457afee 1765 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1766 if (err == 1)
1767 goto retry;
1768 if (err)
1769 return err;
1770
7ee015e0 1771 return netlink_sendskb(sk, skb);
1da177e4 1772}
6ac552fd 1773EXPORT_SYMBOL(netlink_unicast);
1da177e4 1774
f9c22888
PM
1775struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1776 u32 dst_portid, gfp_t gfp_mask)
1777{
1778#ifdef CONFIG_NETLINK_MMAP
1779 struct sock *sk = NULL;
1780 struct sk_buff *skb;
1781 struct netlink_ring *ring;
1782 struct nl_mmap_hdr *hdr;
1783 unsigned int maxlen;
1784
1785 sk = netlink_getsockbyportid(ssk, dst_portid);
1786 if (IS_ERR(sk))
1787 goto out;
1788
1789 ring = &nlk_sk(sk)->rx_ring;
1790 /* fast-path without atomic ops for common case: non-mmaped receiver */
1791 if (ring->pg_vec == NULL)
1792 goto out_put;
1793
aae9f0e2
TG
1794 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1795 goto out_put;
1796
f9c22888
PM
1797 skb = alloc_skb_head(gfp_mask);
1798 if (skb == NULL)
1799 goto err1;
1800
1801 spin_lock_bh(&sk->sk_receive_queue.lock);
1802 /* check again under lock */
1803 if (ring->pg_vec == NULL)
1804 goto out_free;
1805
aae9f0e2 1806 /* check again under lock */
f9c22888
PM
1807 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1808 if (maxlen < size)
1809 goto out_free;
1810
1811 netlink_forward_ring(ring);
1812 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1813 if (hdr == NULL)
1814 goto err2;
1815 netlink_ring_setup_skb(skb, sk, ring, hdr);
1816 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1817 atomic_inc(&ring->pending);
1818 netlink_increment_head(ring);
1819
1820 spin_unlock_bh(&sk->sk_receive_queue.lock);
1821 return skb;
1822
1823err2:
1824 kfree_skb(skb);
1825 spin_unlock_bh(&sk->sk_receive_queue.lock);
cd1df525 1826 netlink_overrun(sk);
f9c22888
PM
1827err1:
1828 sock_put(sk);
1829 return NULL;
1830
1831out_free:
1832 kfree_skb(skb);
1833 spin_unlock_bh(&sk->sk_receive_queue.lock);
1834out_put:
1835 sock_put(sk);
1836out:
1837#endif
1838 return alloc_skb(size, gfp_mask);
1839}
1840EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1841
4277a083
PM
1842int netlink_has_listeners(struct sock *sk, unsigned int group)
1843{
1844 int res = 0;
5c398dc8 1845 struct listeners *listeners;
4277a083 1846
aed81560 1847 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1848
1849 rcu_read_lock();
1850 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1851
6d772ac5 1852 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1853 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1854
1855 rcu_read_unlock();
1856
4277a083
PM
1857 return res;
1858}
1859EXPORT_SYMBOL_GPL(netlink_has_listeners);
1860
b57ef81f 1861static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1862{
1863 struct netlink_sock *nlk = nlk_sk(sk);
1864
1865 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cd967e05 1866 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
cf0a018a 1867 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1868 __netlink_sendskb(sk, skb);
2c645800 1869 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1870 }
1871 return -1;
1872}
1873
1874struct netlink_broadcast_data {
1875 struct sock *exclude_sk;
b4b51029 1876 struct net *net;
15e47304 1877 u32 portid;
1da177e4
LT
1878 u32 group;
1879 int failure;
ff491a73 1880 int delivery_failure;
1da177e4
LT
1881 int congested;
1882 int delivered;
7d877f3b 1883 gfp_t allocation;
1da177e4 1884 struct sk_buff *skb, *skb2;
910a7e90
EB
1885 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1886 void *tx_data;
1da177e4
LT
1887};
1888
46c9521f
RR
1889static void do_one_broadcast(struct sock *sk,
1890 struct netlink_broadcast_data *p)
1da177e4
LT
1891{
1892 struct netlink_sock *nlk = nlk_sk(sk);
1893 int val;
1894
1895 if (p->exclude_sk == sk)
46c9521f 1896 return;
1da177e4 1897
15e47304 1898 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1899 !test_bit(p->group - 1, nlk->groups))
46c9521f 1900 return;
1da177e4 1901
878628fb 1902 if (!net_eq(sock_net(sk), p->net))
46c9521f 1903 return;
b4b51029 1904
1da177e4
LT
1905 if (p->failure) {
1906 netlink_overrun(sk);
46c9521f 1907 return;
1da177e4
LT
1908 }
1909
1910 sock_hold(sk);
1911 if (p->skb2 == NULL) {
68acc024 1912 if (skb_shared(p->skb)) {
1da177e4
LT
1913 p->skb2 = skb_clone(p->skb, p->allocation);
1914 } else {
68acc024
TC
1915 p->skb2 = skb_get(p->skb);
1916 /*
1917 * skb ownership may have been set when
1918 * delivered to a previous socket.
1919 */
1920 skb_orphan(p->skb2);
1da177e4
LT
1921 }
1922 }
1923 if (p->skb2 == NULL) {
1924 netlink_overrun(sk);
1925 /* Clone failed. Notify ALL listeners. */
1926 p->failure = 1;
be0c22a4
PNA
1927 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1928 p->delivery_failure = 1;
910a7e90
EB
1929 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1930 kfree_skb(p->skb2);
1931 p->skb2 = NULL;
b1153f29
SH
1932 } else if (sk_filter(sk, p->skb2)) {
1933 kfree_skb(p->skb2);
1934 p->skb2 = NULL;
1da177e4
LT
1935 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1936 netlink_overrun(sk);
be0c22a4
PNA
1937 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1938 p->delivery_failure = 1;
1da177e4
LT
1939 } else {
1940 p->congested |= val;
1941 p->delivered = 1;
1942 p->skb2 = NULL;
1943 }
1944 sock_put(sk);
1da177e4
LT
1945}
1946
15e47304 1947int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
1948 u32 group, gfp_t allocation,
1949 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1950 void *filter_data)
1da177e4 1951{
3b1e0a65 1952 struct net *net = sock_net(ssk);
1da177e4 1953 struct netlink_broadcast_data info;
1da177e4
LT
1954 struct sock *sk;
1955
1956 skb = netlink_trim(skb, allocation);
1957
1958 info.exclude_sk = ssk;
b4b51029 1959 info.net = net;
15e47304 1960 info.portid = portid;
1da177e4
LT
1961 info.group = group;
1962 info.failure = 0;
ff491a73 1963 info.delivery_failure = 0;
1da177e4
LT
1964 info.congested = 0;
1965 info.delivered = 0;
1966 info.allocation = allocation;
1967 info.skb = skb;
1968 info.skb2 = NULL;
910a7e90
EB
1969 info.tx_filter = filter;
1970 info.tx_data = filter_data;
1da177e4
LT
1971
1972 /* While we sleep in clone, do not allow to change socket list */
1973
1974 netlink_lock_table();
1975
b67bfe0d 1976 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
1977 do_one_broadcast(sk, &info);
1978
70d4bf6d 1979 consume_skb(skb);
aa1c6a6f 1980
1da177e4
LT
1981 netlink_unlock_table();
1982
70d4bf6d
NH
1983 if (info.delivery_failure) {
1984 kfree_skb(info.skb2);
ff491a73 1985 return -ENOBUFS;
658cb354
ED
1986 }
1987 consume_skb(info.skb2);
ff491a73 1988
1da177e4
LT
1989 if (info.delivered) {
1990 if (info.congested && (allocation & __GFP_WAIT))
1991 yield();
1992 return 0;
1993 }
1da177e4
LT
1994 return -ESRCH;
1995}
910a7e90
EB
1996EXPORT_SYMBOL(netlink_broadcast_filtered);
1997
15e47304 1998int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
1999 u32 group, gfp_t allocation)
2000{
15e47304 2001 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
2002 NULL, NULL);
2003}
6ac552fd 2004EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
2005
2006struct netlink_set_err_data {
2007 struct sock *exclude_sk;
15e47304 2008 u32 portid;
1da177e4
LT
2009 u32 group;
2010 int code;
2011};
2012
b57ef81f 2013static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
2014{
2015 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 2016 int ret = 0;
1da177e4
LT
2017
2018 if (sk == p->exclude_sk)
2019 goto out;
2020
09ad9bc7 2021 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
2022 goto out;
2023
15e47304 2024 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 2025 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
2026 goto out;
2027
1a50307b
PNA
2028 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2029 ret = 1;
2030 goto out;
2031 }
2032
1da177e4
LT
2033 sk->sk_err = p->code;
2034 sk->sk_error_report(sk);
2035out:
1a50307b 2036 return ret;
1da177e4
LT
2037}
2038
4843b93c
PNA
2039/**
2040 * netlink_set_err - report error to broadcast listeners
2041 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 2042 * @portid: the PORTID of a process that we want to skip (if any)
840e93f2 2043 * @group: the broadcast group that will notice the error
4843b93c 2044 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
2045 *
2046 * This function returns the number of broadcast listeners that have set the
2047 * NETLINK_RECV_NO_ENOBUFS socket option.
4843b93c 2048 */
15e47304 2049int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
2050{
2051 struct netlink_set_err_data info;
1da177e4 2052 struct sock *sk;
1a50307b 2053 int ret = 0;
1da177e4
LT
2054
2055 info.exclude_sk = ssk;
15e47304 2056 info.portid = portid;
1da177e4 2057 info.group = group;
4843b93c
PNA
2058 /* sk->sk_err wants a positive error value */
2059 info.code = -code;
1da177e4
LT
2060
2061 read_lock(&nl_table_lock);
2062
b67bfe0d 2063 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 2064 ret += do_one_set_err(sk, &info);
1da177e4
LT
2065
2066 read_unlock(&nl_table_lock);
1a50307b 2067 return ret;
1da177e4 2068}
dd5b6ce6 2069EXPORT_SYMBOL(netlink_set_err);
1da177e4 2070
84659eb5
JB
2071/* must be called with netlink table grabbed */
2072static void netlink_update_socket_mc(struct netlink_sock *nlk,
2073 unsigned int group,
2074 int is_new)
2075{
2076 int old, new = !!is_new, subscriptions;
2077
2078 old = test_bit(group - 1, nlk->groups);
2079 subscriptions = nlk->subscriptions - old + new;
2080 if (new)
2081 __set_bit(group - 1, nlk->groups);
2082 else
2083 __clear_bit(group - 1, nlk->groups);
2084 netlink_update_subscriptions(&nlk->sk, subscriptions);
2085 netlink_update_listeners(&nlk->sk);
2086}
2087
9a4595bc 2088static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 2089 char __user *optval, unsigned int optlen)
9a4595bc
PM
2090{
2091 struct sock *sk = sock->sk;
2092 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
2093 unsigned int val = 0;
2094 int err;
9a4595bc
PM
2095
2096 if (level != SOL_NETLINK)
2097 return -ENOPROTOOPT;
2098
ccdfcc39
PM
2099 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2100 optlen >= sizeof(int) &&
eb496534 2101 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
2102 return -EFAULT;
2103
2104 switch (optname) {
2105 case NETLINK_PKTINFO:
2106 if (val)
2107 nlk->flags |= NETLINK_RECV_PKTINFO;
2108 else
2109 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2110 err = 0;
2111 break;
2112 case NETLINK_ADD_MEMBERSHIP:
2113 case NETLINK_DROP_MEMBERSHIP: {
5187cd05 2114 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 2115 return -EPERM;
b4ff4f04
JB
2116 err = netlink_realloc_groups(sk);
2117 if (err)
2118 return err;
9a4595bc
PM
2119 if (!val || val - 1 >= nlk->ngroups)
2120 return -EINVAL;
7774d5e0 2121 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
023e2cfa 2122 err = nlk->netlink_bind(sock_net(sk), val);
4f520900
RGB
2123 if (err)
2124 return err;
2125 }
9a4595bc 2126 netlink_table_grab();
84659eb5
JB
2127 netlink_update_socket_mc(nlk, val,
2128 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 2129 netlink_table_ungrab();
7774d5e0 2130 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
023e2cfa 2131 nlk->netlink_unbind(sock_net(sk), val);
03292745 2132
9a4595bc
PM
2133 err = 0;
2134 break;
2135 }
be0c22a4
PNA
2136 case NETLINK_BROADCAST_ERROR:
2137 if (val)
2138 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2139 else
2140 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2141 err = 0;
2142 break;
38938bfe
PNA
2143 case NETLINK_NO_ENOBUFS:
2144 if (val) {
2145 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
cd967e05 2146 clear_bit(NETLINK_CONGESTED, &nlk->state);
38938bfe 2147 wake_up_interruptible(&nlk->wait);
658cb354 2148 } else {
38938bfe 2149 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
658cb354 2150 }
38938bfe
PNA
2151 err = 0;
2152 break;
ccdfcc39
PM
2153#ifdef CONFIG_NETLINK_MMAP
2154 case NETLINK_RX_RING:
2155 case NETLINK_TX_RING: {
2156 struct nl_mmap_req req;
2157
2158 /* Rings might consume more memory than queue limits, require
2159 * CAP_NET_ADMIN.
2160 */
2161 if (!capable(CAP_NET_ADMIN))
2162 return -EPERM;
2163 if (optlen < sizeof(req))
2164 return -EINVAL;
2165 if (copy_from_user(&req, optval, sizeof(req)))
2166 return -EFAULT;
2167 err = netlink_set_ring(sk, &req, false,
2168 optname == NETLINK_TX_RING);
2169 break;
2170 }
2171#endif /* CONFIG_NETLINK_MMAP */
9a4595bc
PM
2172 default:
2173 err = -ENOPROTOOPT;
2174 }
2175 return err;
2176}
2177
2178static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 2179 char __user *optval, int __user *optlen)
9a4595bc
PM
2180{
2181 struct sock *sk = sock->sk;
2182 struct netlink_sock *nlk = nlk_sk(sk);
2183 int len, val, err;
2184
2185 if (level != SOL_NETLINK)
2186 return -ENOPROTOOPT;
2187
2188 if (get_user(len, optlen))
2189 return -EFAULT;
2190 if (len < 0)
2191 return -EINVAL;
2192
2193 switch (optname) {
2194 case NETLINK_PKTINFO:
2195 if (len < sizeof(int))
2196 return -EINVAL;
2197 len = sizeof(int);
2198 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
2199 if (put_user(len, optlen) ||
2200 put_user(val, optval))
2201 return -EFAULT;
9a4595bc
PM
2202 err = 0;
2203 break;
be0c22a4
PNA
2204 case NETLINK_BROADCAST_ERROR:
2205 if (len < sizeof(int))
2206 return -EINVAL;
2207 len = sizeof(int);
2208 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2209 if (put_user(len, optlen) ||
2210 put_user(val, optval))
2211 return -EFAULT;
2212 err = 0;
2213 break;
38938bfe
PNA
2214 case NETLINK_NO_ENOBUFS:
2215 if (len < sizeof(int))
2216 return -EINVAL;
2217 len = sizeof(int);
2218 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2219 if (put_user(len, optlen) ||
2220 put_user(val, optval))
2221 return -EFAULT;
2222 err = 0;
2223 break;
9a4595bc
PM
2224 default:
2225 err = -ENOPROTOOPT;
2226 }
2227 return err;
2228}
2229
2230static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2231{
2232 struct nl_pktinfo info;
2233
2234 info.group = NETLINK_CB(skb).dst_group;
2235 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2236}
2237
1da177e4
LT
2238static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2239 struct msghdr *msg, size_t len)
2240{
2241 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2242 struct sock *sk = sock->sk;
2243 struct netlink_sock *nlk = nlk_sk(sk);
342dfc30 2244 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
15e47304 2245 u32 dst_portid;
d629b836 2246 u32 dst_group;
1da177e4
LT
2247 struct sk_buff *skb;
2248 int err;
2249 struct scm_cookie scm;
2d7a85f4 2250 u32 netlink_skb_flags = 0;
1da177e4
LT
2251
2252 if (msg->msg_flags&MSG_OOB)
2253 return -EOPNOTSUPP;
2254
16e57262 2255 if (NULL == siocb->scm)
1da177e4 2256 siocb->scm = &scm;
16e57262 2257
e0e3cea4 2258 err = scm_send(sock, msg, siocb->scm, true);
1da177e4
LT
2259 if (err < 0)
2260 return err;
2261
2262 if (msg->msg_namelen) {
b47030c7 2263 err = -EINVAL;
1da177e4 2264 if (addr->nl_family != AF_NETLINK)
b47030c7 2265 goto out;
15e47304 2266 dst_portid = addr->nl_pid;
d629b836 2267 dst_group = ffs(addr->nl_groups);
b47030c7 2268 err = -EPERM;
15e47304 2269 if ((dst_group || dst_portid) &&
5187cd05 2270 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 2271 goto out;
2d7a85f4 2272 netlink_skb_flags |= NETLINK_SKB_DST;
1da177e4 2273 } else {
15e47304 2274 dst_portid = nlk->dst_portid;
d629b836 2275 dst_group = nlk->dst_group;
1da177e4
LT
2276 }
2277
15e47304 2278 if (!nlk->portid) {
1da177e4
LT
2279 err = netlink_autobind(sock);
2280 if (err)
2281 goto out;
2282 }
2283
5fd96123 2284 if (netlink_tx_is_mmaped(sk) &&
c0371da6 2285 msg->msg_iter.iov->iov_base == NULL) {
5fd96123
PM
2286 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2287 siocb);
2288 goto out;
2289 }
2290
1da177e4
LT
2291 err = -EMSGSIZE;
2292 if (len > sk->sk_sndbuf - 32)
2293 goto out;
2294 err = -ENOBUFS;
3a36515f 2295 skb = netlink_alloc_large_skb(len, dst_group);
6ac552fd 2296 if (skb == NULL)
1da177e4
LT
2297 goto out;
2298
15e47304 2299 NETLINK_CB(skb).portid = nlk->portid;
d629b836 2300 NETLINK_CB(skb).dst_group = dst_group;
dbe9a417 2301 NETLINK_CB(skb).creds = siocb->scm->creds;
2d7a85f4 2302 NETLINK_CB(skb).flags = netlink_skb_flags;
1da177e4 2303
1da177e4 2304 err = -EFAULT;
6ce8e9ce 2305 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
2306 kfree_skb(skb);
2307 goto out;
2308 }
2309
2310 err = security_netlink_send(sk, skb);
2311 if (err) {
2312 kfree_skb(skb);
2313 goto out;
2314 }
2315
d629b836 2316 if (dst_group) {
1da177e4 2317 atomic_inc(&skb->users);
15e47304 2318 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 2319 }
15e47304 2320 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
2321
2322out:
b47030c7 2323 scm_destroy(siocb->scm);
1da177e4
LT
2324 return err;
2325}
2326
2327static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2328 struct msghdr *msg, size_t len,
2329 int flags)
2330{
2331 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2332 struct scm_cookie scm;
2333 struct sock *sk = sock->sk;
2334 struct netlink_sock *nlk = nlk_sk(sk);
2335 int noblock = flags&MSG_DONTWAIT;
2336 size_t copied;
68d6ac6d 2337 struct sk_buff *skb, *data_skb;
b44d211e 2338 int err, ret;
1da177e4
LT
2339
2340 if (flags&MSG_OOB)
2341 return -EOPNOTSUPP;
2342
2343 copied = 0;
2344
6ac552fd
PM
2345 skb = skb_recv_datagram(sk, flags, noblock, &err);
2346 if (skb == NULL)
1da177e4
LT
2347 goto out;
2348
68d6ac6d
JB
2349 data_skb = skb;
2350
1dacc76d
JB
2351#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2352 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2353 /*
68d6ac6d
JB
2354 * If this skb has a frag_list, then here that means that we
2355 * will have to use the frag_list skb's data for compat tasks
2356 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2357 *
68d6ac6d
JB
2358 * If we need to send the compat skb, assign it to the
2359 * 'data_skb' variable so that it will be used below for data
2360 * copying. We keep 'skb' for everything else, including
2361 * freeing both later.
1dacc76d 2362 */
68d6ac6d
JB
2363 if (flags & MSG_CMSG_COMPAT)
2364 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2365 }
2366#endif
2367
9063e21f
ED
2368 /* Record the max length of recvmsg() calls for future allocations */
2369 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2370 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2371 16384);
2372
68d6ac6d 2373 copied = data_skb->len;
1da177e4
LT
2374 if (len < copied) {
2375 msg->msg_flags |= MSG_TRUNC;
2376 copied = len;
2377 }
2378
68d6ac6d 2379 skb_reset_transport_header(data_skb);
51f3d02b 2380 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1da177e4
LT
2381
2382 if (msg->msg_name) {
342dfc30 2383 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1da177e4
LT
2384 addr->nl_family = AF_NETLINK;
2385 addr->nl_pad = 0;
15e47304 2386 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2387 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2388 msg->msg_namelen = sizeof(*addr);
2389 }
2390
cc9a06cd
PM
2391 if (nlk->flags & NETLINK_RECV_PKTINFO)
2392 netlink_cmsg_recv_pktinfo(msg, skb);
2393
1da177e4
LT
2394 if (NULL == siocb->scm) {
2395 memset(&scm, 0, sizeof(scm));
2396 siocb->scm = &scm;
2397 }
2398 siocb->scm->creds = *NETLINK_CREDS(skb);
188ccb55 2399 if (flags & MSG_TRUNC)
68d6ac6d 2400 copied = data_skb->len;
daa3766e 2401
1da177e4
LT
2402 skb_free_datagram(sk, skb);
2403
16b304f3
PS
2404 if (nlk->cb_running &&
2405 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
b44d211e
AV
2406 ret = netlink_dump(sk);
2407 if (ret) {
ac30ef83 2408 sk->sk_err = -ret;
b44d211e
AV
2409 sk->sk_error_report(sk);
2410 }
2411 }
1da177e4
LT
2412
2413 scm_recv(sock, msg, siocb->scm, flags);
1da177e4
LT
2414out:
2415 netlink_rcv_wake(sk);
2416 return err ? : copied;
2417}
2418
676d2369 2419static void netlink_data_ready(struct sock *sk)
1da177e4 2420{
cd40b7d3 2421 BUG();
1da177e4
LT
2422}
2423
2424/*
746fac4d 2425 * We export these functions to other modules. They provide a
1da177e4
LT
2426 * complete set of kernel non-blocking support for message
2427 * queueing.
2428 */
2429
2430struct sock *
9f00d977
PNA
2431__netlink_kernel_create(struct net *net, int unit, struct module *module,
2432 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2433{
2434 struct socket *sock;
2435 struct sock *sk;
77247bbb 2436 struct netlink_sock *nlk;
5c398dc8 2437 struct listeners *listeners = NULL;
a31f2d17
PNA
2438 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2439 unsigned int groups;
1da177e4 2440
fab2caf6 2441 BUG_ON(!nl_table);
1da177e4 2442
6ac552fd 2443 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2444 return NULL;
2445
2446 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2447 return NULL;
2448
23fe1866
PE
2449 /*
2450 * We have to just have a reference on the net from sk, but don't
2451 * get_net it. Besides, we cannot get and then put the net here.
2452 * So we create one inside init_net and the move it to net.
2453 */
2454
2455 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2456 goto out_sock_release_nosk;
2457
2458 sk = sock->sk;
edf02087 2459 sk_change_net(sk, net);
4fdb3bb7 2460
a31f2d17 2461 if (!cfg || cfg->groups < 32)
4277a083 2462 groups = 32;
a31f2d17
PNA
2463 else
2464 groups = cfg->groups;
4277a083 2465
5c398dc8 2466 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2467 if (!listeners)
2468 goto out_sock_release;
2469
1da177e4 2470 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2471 if (cfg && cfg->input)
2472 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2473
b4b51029 2474 if (netlink_insert(sk, net, 0))
77247bbb 2475 goto out_sock_release;
4fdb3bb7 2476
77247bbb
PM
2477 nlk = nlk_sk(sk);
2478 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 2479
4fdb3bb7 2480 netlink_table_grab();
b4b51029
EB
2481 if (!nl_table[unit].registered) {
2482 nl_table[unit].groups = groups;
5c398dc8 2483 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2484 nl_table[unit].cb_mutex = cb_mutex;
2485 nl_table[unit].module = module;
9785e10a
PNA
2486 if (cfg) {
2487 nl_table[unit].bind = cfg->bind;
6251edd9 2488 nl_table[unit].unbind = cfg->unbind;
9785e10a 2489 nl_table[unit].flags = cfg->flags;
da12c90e
G
2490 if (cfg->compare)
2491 nl_table[unit].compare = cfg->compare;
9785e10a 2492 }
b4b51029 2493 nl_table[unit].registered = 1;
f937f1f4
JJ
2494 } else {
2495 kfree(listeners);
869e58f8 2496 nl_table[unit].registered++;
b4b51029 2497 }
4fdb3bb7 2498 netlink_table_ungrab();
77247bbb
PM
2499 return sk;
2500
4fdb3bb7 2501out_sock_release:
4277a083 2502 kfree(listeners);
9dfbec1f 2503 netlink_kernel_release(sk);
23fe1866
PE
2504 return NULL;
2505
2506out_sock_release_nosk:
4fdb3bb7 2507 sock_release(sock);
77247bbb 2508 return NULL;
1da177e4 2509}
9f00d977 2510EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2511
2512void
2513netlink_kernel_release(struct sock *sk)
2514{
edf02087 2515 sk_release_kernel(sk);
b7c6ba6e
DL
2516}
2517EXPORT_SYMBOL(netlink_kernel_release);
2518
d136f1bd 2519int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2520{
5c398dc8 2521 struct listeners *new, *old;
b4ff4f04 2522 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2523
2524 if (groups < 32)
2525 groups = 32;
2526
b4ff4f04 2527 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2528 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2529 if (!new)
d136f1bd 2530 return -ENOMEM;
6d772ac5 2531 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2532 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2533 rcu_assign_pointer(tbl->listeners, new);
2534
37b6b935 2535 kfree_rcu(old, rcu);
b4ff4f04
JB
2536 }
2537 tbl->groups = groups;
2538
d136f1bd
JB
2539 return 0;
2540}
2541
2542/**
2543 * netlink_change_ngroups - change number of multicast groups
2544 *
2545 * This changes the number of multicast groups that are available
2546 * on a certain netlink family. Note that it is not possible to
2547 * change the number of groups to below 32. Also note that it does
2548 * not implicitly call netlink_clear_multicast_users() when the
2549 * number of groups is reduced.
2550 *
2551 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2552 * @groups: The new number of groups.
2553 */
2554int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2555{
2556 int err;
2557
2558 netlink_table_grab();
2559 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2560 netlink_table_ungrab();
d136f1bd 2561
b4ff4f04
JB
2562 return err;
2563}
b4ff4f04 2564
b8273570
JB
2565void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2566{
2567 struct sock *sk;
b8273570
JB
2568 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2569
b67bfe0d 2570 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2571 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2572}
2573
a46621a3 2574struct nlmsghdr *
15e47304 2575__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2576{
2577 struct nlmsghdr *nlh;
573ce260 2578 int size = nlmsg_msg_size(len);
a46621a3 2579
23b45672 2580 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
a46621a3
DV
2581 nlh->nlmsg_type = type;
2582 nlh->nlmsg_len = size;
2583 nlh->nlmsg_flags = flags;
15e47304 2584 nlh->nlmsg_pid = portid;
a46621a3
DV
2585 nlh->nlmsg_seq = seq;
2586 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2587 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2588 return nlh;
2589}
2590EXPORT_SYMBOL(__nlmsg_put);
2591
1da177e4
LT
2592/*
2593 * It looks a bit ugly.
2594 * It would be better to create kernel thread.
2595 */
2596
2597static int netlink_dump(struct sock *sk)
2598{
2599 struct netlink_sock *nlk = nlk_sk(sk);
2600 struct netlink_callback *cb;
c7ac8679 2601 struct sk_buff *skb = NULL;
1da177e4 2602 struct nlmsghdr *nlh;
bf8b79e4 2603 int len, err = -ENOBUFS;
c7ac8679 2604 int alloc_size;
1da177e4 2605
af65bdfc 2606 mutex_lock(nlk->cb_mutex);
16b304f3 2607 if (!nlk->cb_running) {
bf8b79e4
TG
2608 err = -EINVAL;
2609 goto errout_skb;
1da177e4
LT
2610 }
2611
16b304f3 2612 cb = &nlk->cb;
c7ac8679
GR
2613 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2614
f9c22888
PM
2615 if (!netlink_rx_is_mmaped(sk) &&
2616 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2617 goto errout_skb;
9063e21f
ED
2618
2619 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2620 * required, but it makes sense to _attempt_ a 16K bytes allocation
2621 * to reduce number of system calls on dump operations, if user
2622 * ever provided a big enough buffer.
2623 */
2624 if (alloc_size < nlk->max_recvmsg_len) {
2625 skb = netlink_alloc_skb(sk,
2626 nlk->max_recvmsg_len,
2627 nlk->portid,
2628 GFP_KERNEL |
2629 __GFP_NOWARN |
2630 __GFP_NORETRY);
2631 /* available room should be exact amount to avoid MSG_TRUNC */
2632 if (skb)
2633 skb_reserve(skb, skb_tailroom(skb) -
2634 nlk->max_recvmsg_len);
2635 }
2636 if (!skb)
2637 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2638 GFP_KERNEL);
c7ac8679 2639 if (!skb)
c63d6ea3 2640 goto errout_skb;
f9c22888 2641 netlink_skb_set_owner_r(skb, sk);
c7ac8679 2642
1da177e4
LT
2643 len = cb->dump(skb, cb);
2644
2645 if (len > 0) {
af65bdfc 2646 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2647
2648 if (sk_filter(sk, skb))
2649 kfree_skb(skb);
4a7e7c2a
ED
2650 else
2651 __netlink_sendskb(sk, skb);
1da177e4
LT
2652 return 0;
2653 }
2654
bf8b79e4
TG
2655 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2656 if (!nlh)
2657 goto errout_skb;
2658
670dc283
JB
2659 nl_dump_check_consistent(cb, nlh);
2660
bf8b79e4
TG
2661 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2662
b1153f29
SH
2663 if (sk_filter(sk, skb))
2664 kfree_skb(skb);
4a7e7c2a
ED
2665 else
2666 __netlink_sendskb(sk, skb);
1da177e4 2667
a8f74b22
TG
2668 if (cb->done)
2669 cb->done(cb);
1da177e4 2670
16b304f3
PS
2671 nlk->cb_running = false;
2672 mutex_unlock(nlk->cb_mutex);
6dc878a8 2673 module_put(cb->module);
16b304f3 2674 consume_skb(cb->skb);
1da177e4 2675 return 0;
1797754e 2676
bf8b79e4 2677errout_skb:
af65bdfc 2678 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2679 kfree_skb(skb);
bf8b79e4 2680 return err;
1da177e4
LT
2681}
2682
6dc878a8
G
2683int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2684 const struct nlmsghdr *nlh,
2685 struct netlink_dump_control *control)
1da177e4
LT
2686{
2687 struct netlink_callback *cb;
2688 struct sock *sk;
2689 struct netlink_sock *nlk;
b44d211e 2690 int ret;
1da177e4 2691
f9c22888
PM
2692 /* Memory mapped dump requests need to be copied to avoid looping
2693 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2694 * a reference to the skb.
2695 */
2696 if (netlink_skb_is_mmaped(skb)) {
2697 skb = skb_copy(skb, GFP_KERNEL);
16b304f3 2698 if (skb == NULL)
f9c22888 2699 return -ENOBUFS;
f9c22888
PM
2700 } else
2701 atomic_inc(&skb->users);
2702
15e47304 2703 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4 2704 if (sk == NULL) {
16b304f3
PS
2705 ret = -ECONNREFUSED;
2706 goto error_free;
1da177e4 2707 }
6dc878a8 2708
16b304f3 2709 nlk = nlk_sk(sk);
af65bdfc 2710 mutex_lock(nlk->cb_mutex);
6dc878a8 2711 /* A dump is in progress... */
16b304f3 2712 if (nlk->cb_running) {
6dc878a8 2713 ret = -EBUSY;
16b304f3 2714 goto error_unlock;
1da177e4 2715 }
6dc878a8 2716 /* add reference of module which cb->dump belongs to */
16b304f3 2717 if (!try_module_get(control->module)) {
6dc878a8 2718 ret = -EPROTONOSUPPORT;
16b304f3 2719 goto error_unlock;
6dc878a8
G
2720 }
2721
16b304f3
PS
2722 cb = &nlk->cb;
2723 memset(cb, 0, sizeof(*cb));
2724 cb->dump = control->dump;
2725 cb->done = control->done;
2726 cb->nlh = nlh;
2727 cb->data = control->data;
2728 cb->module = control->module;
2729 cb->min_dump_alloc = control->min_dump_alloc;
2730 cb->skb = skb;
2731
2732 nlk->cb_running = true;
2733
af65bdfc 2734 mutex_unlock(nlk->cb_mutex);
1da177e4 2735
b44d211e 2736 ret = netlink_dump(sk);
1da177e4 2737 sock_put(sk);
5c58298c 2738
b44d211e
AV
2739 if (ret)
2740 return ret;
2741
5c58298c
DL
2742 /* We successfully started a dump, by returning -EINTR we
2743 * signal not to send ACK even if it was requested.
2744 */
2745 return -EINTR;
16b304f3
PS
2746
2747error_unlock:
2748 sock_put(sk);
2749 mutex_unlock(nlk->cb_mutex);
2750error_free:
2751 kfree_skb(skb);
2752 return ret;
1da177e4 2753}
6dc878a8 2754EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2755
2756void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2757{
2758 struct sk_buff *skb;
2759 struct nlmsghdr *rep;
2760 struct nlmsgerr *errmsg;
339bf98f 2761 size_t payload = sizeof(*errmsg);
1da177e4 2762
339bf98f
TG
2763 /* error messages get the original request appened */
2764 if (err)
2765 payload += nlmsg_len(nlh);
1da177e4 2766
f9c22888
PM
2767 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2768 NETLINK_CB(in_skb).portid, GFP_KERNEL);
1da177e4
LT
2769 if (!skb) {
2770 struct sock *sk;
2771
3b1e0a65 2772 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2773 in_skb->sk->sk_protocol,
15e47304 2774 NETLINK_CB(in_skb).portid);
1da177e4
LT
2775 if (sk) {
2776 sk->sk_err = ENOBUFS;
2777 sk->sk_error_report(sk);
2778 sock_put(sk);
2779 }
2780 return;
2781 }
2782
15e47304 2783 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2784 NLMSG_ERROR, payload, 0);
bf8b79e4 2785 errmsg = nlmsg_data(rep);
1da177e4 2786 errmsg->error = err;
bf8b79e4 2787 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2788 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2789}
6ac552fd 2790EXPORT_SYMBOL(netlink_ack);
1da177e4 2791
cd40b7d3 2792int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2793 struct nlmsghdr *))
82ace47a 2794{
82ace47a
TG
2795 struct nlmsghdr *nlh;
2796 int err;
2797
2798 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2799 int msglen;
2800
b529ccf2 2801 nlh = nlmsg_hdr(skb);
d35b6856 2802 err = 0;
82ace47a 2803
ad8e4b75 2804 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2805 return 0;
2806
d35b6856
TG
2807 /* Only requests are handled by the kernel */
2808 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2809 goto ack;
45e7ae7f
TG
2810
2811 /* Skip control messages */
2812 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2813 goto ack;
d35b6856 2814
1d00a4eb 2815 err = cb(skb, nlh);
5c58298c
DL
2816 if (err == -EINTR)
2817 goto skip;
2818
2819ack:
d35b6856 2820 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2821 netlink_ack(skb, nlh, err);
82ace47a 2822
5c58298c 2823skip:
6ac552fd 2824 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2825 if (msglen > skb->len)
2826 msglen = skb->len;
2827 skb_pull(skb, msglen);
82ace47a
TG
2828 }
2829
2830 return 0;
2831}
6ac552fd 2832EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 2833
d387f6ad
TG
2834/**
2835 * nlmsg_notify - send a notification netlink message
2836 * @sk: netlink socket to use
2837 * @skb: notification message
15e47304 2838 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
2839 * @group: destination multicast group or 0
2840 * @report: 1 to report back, 0 to disable
2841 * @flags: allocation flags
2842 */
15e47304 2843int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
2844 unsigned int group, int report, gfp_t flags)
2845{
2846 int err = 0;
2847
2848 if (group) {
15e47304 2849 int exclude_portid = 0;
d387f6ad
TG
2850
2851 if (report) {
2852 atomic_inc(&skb->users);
15e47304 2853 exclude_portid = portid;
d387f6ad
TG
2854 }
2855
1ce85fe4
PNA
2856 /* errors reported via destination sk->sk_err, but propagate
2857 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 2858 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
2859 }
2860
1ce85fe4
PNA
2861 if (report) {
2862 int err2;
2863
15e47304 2864 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
2865 if (!err || err == -ESRCH)
2866 err = err2;
2867 }
d387f6ad
TG
2868
2869 return err;
2870}
6ac552fd 2871EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 2872
1da177e4
LT
2873#ifdef CONFIG_PROC_FS
2874struct nl_seq_iter {
e372c414 2875 struct seq_net_private p;
1da177e4
LT
2876 int link;
2877 int hash_idx;
2878};
2879
2880static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2881{
2882 struct nl_seq_iter *iter = seq->private;
2883 int i, j;
e341694e 2884 struct netlink_sock *nlk;
1da177e4 2885 struct sock *s;
1da177e4
LT
2886 loff_t off = 0;
2887
6ac552fd 2888 for (i = 0; i < MAX_LINKS; i++) {
e341694e 2889 struct rhashtable *ht = &nl_table[i].hash;
67a24ac1 2890 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
e341694e
TG
2891
2892 for (j = 0; j < tbl->size; j++) {
88d6ed15
TG
2893 struct rhash_head *node;
2894
2895 rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
e341694e 2896 s = (struct sock *)nlk;
1da177e4 2897
1218854a 2898 if (sock_net(s) != seq_file_net(seq))
b4b51029 2899 continue;
1da177e4
LT
2900 if (off == pos) {
2901 iter->link = i;
2902 iter->hash_idx = j;
2903 return s;
2904 }
2905 ++off;
2906 }
2907 }
2908 }
2909 return NULL;
2910}
2911
2912static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
78fd1d0a 2913 __acquires(nl_table_lock) __acquires(RCU)
1da177e4 2914{
78fd1d0a 2915 read_lock(&nl_table_lock);
e341694e 2916 rcu_read_lock();
1da177e4
LT
2917 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2918}
2919
2920static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2921{
78fd1d0a 2922 struct rhashtable *ht;
88d6ed15
TG
2923 const struct bucket_table *tbl;
2924 struct rhash_head *node;
e341694e 2925 struct netlink_sock *nlk;
1da177e4 2926 struct nl_seq_iter *iter;
da12c90e 2927 struct net *net;
1da177e4
LT
2928 int i, j;
2929
2930 ++*pos;
2931
2932 if (v == SEQ_START_TOKEN)
2933 return netlink_seq_socket_idx(seq, 0);
746fac4d 2934
da12c90e 2935 net = seq_file_net(seq);
b4b51029 2936 iter = seq->private;
e341694e
TG
2937 nlk = v;
2938
78fd1d0a
TG
2939 i = iter->link;
2940 ht = &nl_table[i].hash;
88d6ed15
TG
2941 tbl = rht_dereference_rcu(ht->tbl, ht);
2942 rht_for_each_entry_rcu_continue(nlk, node, nlk->node.next, tbl, iter->hash_idx, node)
e341694e
TG
2943 if (net_eq(sock_net((struct sock *)nlk), net))
2944 return nlk;
1da177e4 2945
1da177e4
LT
2946 j = iter->hash_idx + 1;
2947
2948 do {
da12c90e 2949
e341694e 2950 for (; j < tbl->size; j++) {
88d6ed15 2951 rht_for_each_entry_rcu(nlk, node, tbl, j, node) {
e341694e
TG
2952 if (net_eq(sock_net((struct sock *)nlk), net)) {
2953 iter->link = i;
2954 iter->hash_idx = j;
2955 return nlk;
2956 }
1da177e4
LT
2957 }
2958 }
2959
2960 j = 0;
2961 } while (++i < MAX_LINKS);
2962
2963 return NULL;
2964}
2965
2966static void netlink_seq_stop(struct seq_file *seq, void *v)
78fd1d0a 2967 __releases(RCU) __releases(nl_table_lock)
1da177e4 2968{
e341694e 2969 rcu_read_unlock();
78fd1d0a 2970 read_unlock(&nl_table_lock);
1da177e4
LT
2971}
2972
2973
2974static int netlink_seq_show(struct seq_file *seq, void *v)
2975{
658cb354 2976 if (v == SEQ_START_TOKEN) {
1da177e4
LT
2977 seq_puts(seq,
2978 "sk Eth Pid Groups "
cf0aa4e0 2979 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 2980 } else {
1da177e4
LT
2981 struct sock *s = v;
2982 struct netlink_sock *nlk = nlk_sk(s);
2983
16b304f3 2984 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
1da177e4
LT
2985 s,
2986 s->sk_protocol,
15e47304 2987 nlk->portid,
513c2500 2988 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
2989 sk_rmem_alloc_get(s),
2990 sk_wmem_alloc_get(s),
16b304f3 2991 nlk->cb_running,
38938bfe 2992 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
2993 atomic_read(&s->sk_drops),
2994 sock_i_ino(s)
1da177e4
LT
2995 );
2996
2997 }
2998 return 0;
2999}
3000
56b3d975 3001static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
3002 .start = netlink_seq_start,
3003 .next = netlink_seq_next,
3004 .stop = netlink_seq_stop,
3005 .show = netlink_seq_show,
3006};
3007
3008
3009static int netlink_seq_open(struct inode *inode, struct file *file)
3010{
e372c414
DL
3011 return seq_open_net(inode, file, &netlink_seq_ops,
3012 sizeof(struct nl_seq_iter));
b4b51029
EB
3013}
3014
da7071d7 3015static const struct file_operations netlink_seq_fops = {
1da177e4
LT
3016 .owner = THIS_MODULE,
3017 .open = netlink_seq_open,
3018 .read = seq_read,
3019 .llseek = seq_lseek,
e372c414 3020 .release = seq_release_net,
1da177e4
LT
3021};
3022
3023#endif
3024
3025int netlink_register_notifier(struct notifier_block *nb)
3026{
e041c683 3027 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 3028}
6ac552fd 3029EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
3030
3031int netlink_unregister_notifier(struct notifier_block *nb)
3032{
e041c683 3033 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 3034}
6ac552fd 3035EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 3036
90ddc4f0 3037static const struct proto_ops netlink_ops = {
1da177e4
LT
3038 .family = PF_NETLINK,
3039 .owner = THIS_MODULE,
3040 .release = netlink_release,
3041 .bind = netlink_bind,
3042 .connect = netlink_connect,
3043 .socketpair = sock_no_socketpair,
3044 .accept = sock_no_accept,
3045 .getname = netlink_getname,
9652e931 3046 .poll = netlink_poll,
1da177e4
LT
3047 .ioctl = sock_no_ioctl,
3048 .listen = sock_no_listen,
3049 .shutdown = sock_no_shutdown,
9a4595bc
PM
3050 .setsockopt = netlink_setsockopt,
3051 .getsockopt = netlink_getsockopt,
1da177e4
LT
3052 .sendmsg = netlink_sendmsg,
3053 .recvmsg = netlink_recvmsg,
ccdfcc39 3054 .mmap = netlink_mmap,
1da177e4
LT
3055 .sendpage = sock_no_sendpage,
3056};
3057
ec1b4cf7 3058static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
3059 .family = PF_NETLINK,
3060 .create = netlink_create,
3061 .owner = THIS_MODULE, /* for consistency 8) */
3062};
3063
4665079c 3064static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
3065{
3066#ifdef CONFIG_PROC_FS
d4beaa66 3067 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
3068 return -ENOMEM;
3069#endif
3070 return 0;
3071}
3072
4665079c 3073static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
3074{
3075#ifdef CONFIG_PROC_FS
ece31ffd 3076 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
3077#endif
3078}
3079
b963ea89
DM
3080static void __init netlink_add_usersock_entry(void)
3081{
5c398dc8 3082 struct listeners *listeners;
b963ea89
DM
3083 int groups = 32;
3084
5c398dc8 3085 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 3086 if (!listeners)
5c398dc8 3087 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
3088
3089 netlink_table_grab();
3090
3091 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 3092 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
3093 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3094 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 3095 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
3096
3097 netlink_table_ungrab();
3098}
3099
022cbae6 3100static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
3101 .init = netlink_net_init,
3102 .exit = netlink_net_exit,
3103};
3104
1da177e4
LT
3105static int __init netlink_proto_init(void)
3106{
1da177e4 3107 int i;
1da177e4 3108 int err = proto_register(&netlink_proto, 0);
e341694e
TG
3109 struct rhashtable_params ht_params = {
3110 .head_offset = offsetof(struct netlink_sock, node),
3111 .key_offset = offsetof(struct netlink_sock, portid),
3112 .key_len = sizeof(u32), /* portid */
7f19fc5e 3113 .hashfn = jhash,
e341694e
TG
3114 .max_shift = 16, /* 64K */
3115 .grow_decision = rht_grow_above_75,
3116 .shrink_decision = rht_shrink_below_30,
e341694e 3117 };
1da177e4
LT
3118
3119 if (err != 0)
3120 goto out;
3121
fab25745 3122 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3123
0da974f4 3124 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
3125 if (!nl_table)
3126 goto panic;
1da177e4 3127
1da177e4 3128 for (i = 0; i < MAX_LINKS; i++) {
e341694e
TG
3129 if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
3130 while (--i > 0)
3131 rhashtable_destroy(&nl_table[i].hash);
1da177e4 3132 kfree(nl_table);
fab2caf6 3133 goto panic;
1da177e4 3134 }
1da177e4
LT
3135 }
3136
bcbde0d4
DB
3137 INIT_LIST_HEAD(&netlink_tap_all);
3138
b963ea89
DM
3139 netlink_add_usersock_entry();
3140
1da177e4 3141 sock_register(&netlink_family_ops);
b4b51029 3142 register_pernet_subsys(&netlink_net_ops);
746fac4d 3143 /* The netlink device handler may be needed early. */
1da177e4
LT
3144 rtnetlink_init();
3145out:
3146 return err;
fab2caf6
AM
3147panic:
3148 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
3149}
3150
1da177e4 3151core_initcall(netlink_proto_init);
This page took 1.037107 seconds and 5 git commands to generate.