netlink: Use random autobind rover
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
cd1df525 6 * Patrick McHardy <kaber@trash.net>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
746fac4d 12 *
1da177e4
LT
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
1da177e4
LT
23 */
24
1da177e4
LT
25#include <linux/module.h>
26
4fc268d2 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/init.h>
1da177e4
LT
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
1da177e4
LT
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
af65bdfc 58#include <linux/mutex.h>
ccdfcc39 59#include <linux/vmalloc.h>
bcbde0d4 60#include <linux/if_arp.h>
e341694e 61#include <linux/rhashtable.h>
9652e931 62#include <asm/cacheflush.h>
e341694e 63#include <linux/hash.h>
ee1c2442 64#include <linux/genetlink.h>
54e0f520 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4
LT
67#include <net/sock.h>
68#include <net/scm.h>
82ace47a 69#include <net/netlink.h>
1da177e4 70
0f29c768 71#include "af_netlink.h"
1da177e4 72
5c398dc8
ED
73struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
6c04bb18
JB
76};
77
cd967e05 78/* state bits */
cc3a572f 79#define NETLINK_S_CONGESTED 0x0
cd967e05
PM
80
81/* flags */
cc3a572f
ND
82#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
59324cf3 86#define NETLINK_F_LISTEN_ALL_NSID 0x10
77247bbb 87
035c4c16 88static inline int netlink_is_kernel(struct sock *sk)
aed81560 89{
cc3a572f 90 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
aed81560
DL
91}
92
0f29c768
AV
93struct netlink_table *nl_table;
94EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
95
96static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
97
98static int netlink_dump(struct sock *sk);
9652e931 99static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 100
78fd1d0a 101/* nl_table locking explained:
21e4902a 102 * Lookup and traversal are protected with an RCU read-side lock. Insertion
c5adde94 103 * and removal are protected with per bucket lock while using RCU list
21e4902a
TG
104 * modification primitives and may run in parallel to RCU protected lookups.
105 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
106 * been acquired * either during or after the socket has been removed from
107 * the list and after an RCU grace period.
78fd1d0a 108 */
0f29c768
AV
109DEFINE_RWLOCK(nl_table_lock);
110EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
111static atomic_t nl_table_users = ATOMIC_INIT(0);
112
6d772ac5
ED
113#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
114
e041c683 115static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 116
bcbde0d4
DB
117static DEFINE_SPINLOCK(netlink_tap_lock);
118static struct list_head netlink_tap_all __read_mostly;
119
c428ecd1
HX
120static const struct rhashtable_params netlink_rhashtable_params;
121
b57ef81f 122static inline u32 netlink_group_mask(u32 group)
d629b836
PM
123{
124 return group ? 1 << (group - 1) : 0;
125}
126
bcbde0d4
DB
127int netlink_add_tap(struct netlink_tap *nt)
128{
129 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
130 return -EINVAL;
131
132 spin_lock(&netlink_tap_lock);
133 list_add_rcu(&nt->list, &netlink_tap_all);
134 spin_unlock(&netlink_tap_lock);
135
fcd4d35e 136 __module_get(nt->module);
bcbde0d4
DB
137
138 return 0;
139}
140EXPORT_SYMBOL_GPL(netlink_add_tap);
141
2173f8d9 142static int __netlink_remove_tap(struct netlink_tap *nt)
bcbde0d4
DB
143{
144 bool found = false;
145 struct netlink_tap *tmp;
146
147 spin_lock(&netlink_tap_lock);
148
149 list_for_each_entry(tmp, &netlink_tap_all, list) {
150 if (nt == tmp) {
151 list_del_rcu(&nt->list);
152 found = true;
153 goto out;
154 }
155 }
156
157 pr_warn("__netlink_remove_tap: %p not found\n", nt);
158out:
159 spin_unlock(&netlink_tap_lock);
160
161 if (found && nt->module)
162 module_put(nt->module);
163
164 return found ? 0 : -ENODEV;
165}
bcbde0d4
DB
166
167int netlink_remove_tap(struct netlink_tap *nt)
168{
169 int ret;
170
171 ret = __netlink_remove_tap(nt);
172 synchronize_net();
173
174 return ret;
175}
176EXPORT_SYMBOL_GPL(netlink_remove_tap);
177
5ffd5cdd
DB
178static bool netlink_filter_tap(const struct sk_buff *skb)
179{
180 struct sock *sk = skb->sk;
5ffd5cdd
DB
181
182 /* We take the more conservative approach and
183 * whitelist socket protocols that may pass.
184 */
185 switch (sk->sk_protocol) {
186 case NETLINK_ROUTE:
187 case NETLINK_USERSOCK:
188 case NETLINK_SOCK_DIAG:
189 case NETLINK_NFLOG:
190 case NETLINK_XFRM:
191 case NETLINK_FIB_LOOKUP:
192 case NETLINK_NETFILTER:
193 case NETLINK_GENERIC:
498044bb 194 return true;
5ffd5cdd
DB
195 }
196
498044bb 197 return false;
5ffd5cdd
DB
198}
199
bcbde0d4
DB
200static int __netlink_deliver_tap_skb(struct sk_buff *skb,
201 struct net_device *dev)
202{
203 struct sk_buff *nskb;
5ffd5cdd 204 struct sock *sk = skb->sk;
bcbde0d4
DB
205 int ret = -ENOMEM;
206
207 dev_hold(dev);
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (nskb) {
210 nskb->dev = dev;
5ffd5cdd 211 nskb->protocol = htons((u16) sk->sk_protocol);
604d13c9
DB
212 nskb->pkt_type = netlink_is_kernel(sk) ?
213 PACKET_KERNEL : PACKET_USER;
4e48ed88 214 skb_reset_network_header(nskb);
bcbde0d4
DB
215 ret = dev_queue_xmit(nskb);
216 if (unlikely(ret > 0))
217 ret = net_xmit_errno(ret);
218 }
219
220 dev_put(dev);
221 return ret;
222}
223
224static void __netlink_deliver_tap(struct sk_buff *skb)
225{
226 int ret;
227 struct netlink_tap *tmp;
228
5ffd5cdd
DB
229 if (!netlink_filter_tap(skb))
230 return;
231
bcbde0d4
DB
232 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
233 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
234 if (unlikely(ret))
235 break;
236 }
237}
238
239static void netlink_deliver_tap(struct sk_buff *skb)
240{
241 rcu_read_lock();
242
243 if (unlikely(!list_empty(&netlink_tap_all)))
244 __netlink_deliver_tap(skb);
245
246 rcu_read_unlock();
247}
248
73bfd370
DB
249static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
250 struct sk_buff *skb)
251{
252 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
253 netlink_deliver_tap(skb);
254}
255
cd1df525
PM
256static void netlink_overrun(struct sock *sk)
257{
258 struct netlink_sock *nlk = nlk_sk(sk);
259
cc3a572f
ND
260 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
261 if (!test_and_set_bit(NETLINK_S_CONGESTED,
262 &nlk_sk(sk)->state)) {
cd1df525
PM
263 sk->sk_err = ENOBUFS;
264 sk->sk_error_report(sk);
265 }
266 }
267 atomic_inc(&sk->sk_drops);
268}
269
270static void netlink_rcv_wake(struct sock *sk)
271{
272 struct netlink_sock *nlk = nlk_sk(sk);
273
274 if (skb_queue_empty(&sk->sk_receive_queue))
cc3a572f
ND
275 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
276 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
cd1df525
PM
277 wake_up_interruptible(&nlk->wait);
278}
279
ccdfcc39 280#ifdef CONFIG_NETLINK_MMAP
9652e931
PM
281static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
282{
283 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
284}
285
f9c22888
PM
286static bool netlink_rx_is_mmaped(struct sock *sk)
287{
288 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
289}
290
5fd96123
PM
291static bool netlink_tx_is_mmaped(struct sock *sk)
292{
293 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
294}
295
ccdfcc39
PM
296static __pure struct page *pgvec_to_page(const void *addr)
297{
298 if (is_vmalloc_addr(addr))
299 return vmalloc_to_page(addr);
300 else
301 return virt_to_page(addr);
302}
303
304static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
305{
306 unsigned int i;
307
308 for (i = 0; i < len; i++) {
309 if (pg_vec[i] != NULL) {
310 if (is_vmalloc_addr(pg_vec[i]))
311 vfree(pg_vec[i]);
312 else
313 free_pages((unsigned long)pg_vec[i], order);
314 }
315 }
316 kfree(pg_vec);
317}
318
319static void *alloc_one_pg_vec_page(unsigned long order)
320{
321 void *buffer;
322 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
323 __GFP_NOWARN | __GFP_NORETRY;
324
325 buffer = (void *)__get_free_pages(gfp_flags, order);
326 if (buffer != NULL)
327 return buffer;
328
329 buffer = vzalloc((1 << order) * PAGE_SIZE);
330 if (buffer != NULL)
331 return buffer;
332
333 gfp_flags &= ~__GFP_NORETRY;
334 return (void *)__get_free_pages(gfp_flags, order);
335}
336
337static void **alloc_pg_vec(struct netlink_sock *nlk,
338 struct nl_mmap_req *req, unsigned int order)
339{
340 unsigned int block_nr = req->nm_block_nr;
341 unsigned int i;
8a849bb7 342 void **pg_vec;
ccdfcc39
PM
343
344 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
345 if (pg_vec == NULL)
346 return NULL;
347
348 for (i = 0; i < block_nr; i++) {
8a849bb7 349 pg_vec[i] = alloc_one_pg_vec_page(order);
ccdfcc39
PM
350 if (pg_vec[i] == NULL)
351 goto err1;
352 }
353
354 return pg_vec;
355err1:
356 free_pg_vec(pg_vec, order, block_nr);
357 return NULL;
358}
359
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring)
362{
363 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL;
367 unsigned int order = 0;
368 int err;
369
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372
373 if (!closing) {
374 if (atomic_read(&nlk->mapped))
375 return -EBUSY;
376 if (atomic_read(&ring->pending))
377 return -EBUSY;
378 }
379
380 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL)
382 return -EBUSY;
383
384 if ((int)req->nm_block_size <= 0)
385 return -EINVAL;
74e83b23 386 if (!PAGE_ALIGNED(req->nm_block_size))
ccdfcc39
PM
387 return -EINVAL;
388 if (req->nm_frame_size < NL_MMAP_HDRLEN)
389 return -EINVAL;
390 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
391 return -EINVAL;
392
393 ring->frames_per_block = req->nm_block_size /
394 req->nm_frame_size;
395 if (ring->frames_per_block == 0)
396 return -EINVAL;
397 if (ring->frames_per_block * req->nm_block_nr !=
398 req->nm_frame_nr)
399 return -EINVAL;
400
401 order = get_order(req->nm_block_size);
402 pg_vec = alloc_pg_vec(nlk, req, order);
403 if (pg_vec == NULL)
404 return -ENOMEM;
405 } else {
406 if (req->nm_frame_nr)
407 return -EINVAL;
408 }
409
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) {
413 err = 0;
414 spin_lock_bh(&queue->lock);
415
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 }
430 mutex_unlock(&nlk->pg_vec_lock);
431
432 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err;
435}
436
437static void netlink_mm_open(struct vm_area_struct *vma)
438{
439 struct file *file = vma->vm_file;
440 struct socket *sock = file->private_data;
441 struct sock *sk = sock->sk;
442
443 if (sk)
444 atomic_inc(&nlk_sk(sk)->mapped);
445}
446
447static void netlink_mm_close(struct vm_area_struct *vma)
448{
449 struct file *file = vma->vm_file;
450 struct socket *sock = file->private_data;
451 struct sock *sk = sock->sk;
452
453 if (sk)
454 atomic_dec(&nlk_sk(sk)->mapped);
455}
456
457static const struct vm_operations_struct netlink_mmap_ops = {
458 .open = netlink_mm_open,
459 .close = netlink_mm_close,
460};
461
462static int netlink_mmap(struct file *file, struct socket *sock,
463 struct vm_area_struct *vma)
464{
465 struct sock *sk = sock->sk;
466 struct netlink_sock *nlk = nlk_sk(sk);
467 struct netlink_ring *ring;
468 unsigned long start, size, expected;
469 unsigned int i;
470 int err = -EINVAL;
471
472 if (vma->vm_pgoff)
473 return -EINVAL;
474
475 mutex_lock(&nlk->pg_vec_lock);
476
477 expected = 0;
478 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
479 if (ring->pg_vec == NULL)
480 continue;
481 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
482 }
483
484 if (expected == 0)
485 goto out;
486
487 size = vma->vm_end - vma->vm_start;
488 if (size != expected)
489 goto out;
490
491 start = vma->vm_start;
492 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
493 if (ring->pg_vec == NULL)
494 continue;
495
496 for (i = 0; i < ring->pg_vec_len; i++) {
497 struct page *page;
498 void *kaddr = ring->pg_vec[i];
499 unsigned int pg_num;
500
501 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
502 page = pgvec_to_page(kaddr);
503 err = vm_insert_page(vma, start, page);
504 if (err < 0)
505 goto out;
506 start += PAGE_SIZE;
507 kaddr += PAGE_SIZE;
508 }
509 }
510 }
511
512 atomic_inc(&nlk->mapped);
513 vma->vm_ops = &netlink_mmap_ops;
514 err = 0;
515out:
516 mutex_unlock(&nlk->pg_vec_lock);
7cdbac71 517 return err;
ccdfcc39 518}
9652e931 519
4682a035 520static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
9652e931
PM
521{
522#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
523 struct page *p_start, *p_end;
524
525 /* First page is flushed through netlink_{get,set}_status */
526 p_start = pgvec_to_page(hdr + PAGE_SIZE);
4682a035 527 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
9652e931
PM
528 while (p_start <= p_end) {
529 flush_dcache_page(p_start);
530 p_start++;
531 }
532#endif
533}
534
535static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
536{
537 smp_rmb();
538 flush_dcache_page(pgvec_to_page(hdr));
539 return hdr->nm_status;
540}
541
542static void netlink_set_status(struct nl_mmap_hdr *hdr,
543 enum nl_mmap_status status)
544{
a18e6a18 545 smp_mb();
9652e931
PM
546 hdr->nm_status = status;
547 flush_dcache_page(pgvec_to_page(hdr));
9652e931
PM
548}
549
550static struct nl_mmap_hdr *
551__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
552{
553 unsigned int pg_vec_pos, frame_off;
554
555 pg_vec_pos = pos / ring->frames_per_block;
556 frame_off = pos % ring->frames_per_block;
557
558 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
559}
560
561static struct nl_mmap_hdr *
562netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
563 enum nl_mmap_status status)
564{
565 struct nl_mmap_hdr *hdr;
566
567 hdr = __netlink_lookup_frame(ring, pos);
568 if (netlink_get_status(hdr) != status)
569 return NULL;
570
571 return hdr;
572}
573
574static struct nl_mmap_hdr *
575netlink_current_frame(const struct netlink_ring *ring,
576 enum nl_mmap_status status)
577{
578 return netlink_lookup_frame(ring, ring->head, status);
579}
580
581static struct nl_mmap_hdr *
582netlink_previous_frame(const struct netlink_ring *ring,
583 enum nl_mmap_status status)
584{
585 unsigned int prev;
586
587 prev = ring->head ? ring->head - 1 : ring->frame_max;
588 return netlink_lookup_frame(ring, prev, status);
589}
590
591static void netlink_increment_head(struct netlink_ring *ring)
592{
593 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
594}
595
596static void netlink_forward_ring(struct netlink_ring *ring)
597{
598 unsigned int head = ring->head, pos = head;
599 const struct nl_mmap_hdr *hdr;
600
601 do {
602 hdr = __netlink_lookup_frame(ring, pos);
603 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
604 break;
605 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
606 break;
607 netlink_increment_head(ring);
608 } while (ring->head != head);
609}
610
cd1df525
PM
611static bool netlink_dump_space(struct netlink_sock *nlk)
612{
613 struct netlink_ring *ring = &nlk->rx_ring;
614 struct nl_mmap_hdr *hdr;
615 unsigned int n;
616
617 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
618 if (hdr == NULL)
619 return false;
620
621 n = ring->head + ring->frame_max / 2;
622 if (n > ring->frame_max)
623 n -= ring->frame_max;
624
625 hdr = __netlink_lookup_frame(ring, n);
626
627 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
628}
629
9652e931
PM
630static unsigned int netlink_poll(struct file *file, struct socket *sock,
631 poll_table *wait)
632{
633 struct sock *sk = sock->sk;
634 struct netlink_sock *nlk = nlk_sk(sk);
635 unsigned int mask;
cd1df525 636 int err;
9652e931 637
cd1df525
PM
638 if (nlk->rx_ring.pg_vec != NULL) {
639 /* Memory mapped sockets don't call recvmsg(), so flow control
640 * for dumps is performed here. A dump is allowed to continue
641 * if at least half the ring is unused.
642 */
16b304f3 643 while (nlk->cb_running && netlink_dump_space(nlk)) {
cd1df525
PM
644 err = netlink_dump(sk);
645 if (err < 0) {
ac30ef83 646 sk->sk_err = -err;
cd1df525
PM
647 sk->sk_error_report(sk);
648 break;
649 }
650 }
651 netlink_rcv_wake(sk);
652 }
5fd96123 653
9652e931
PM
654 mask = datagram_poll(file, sock, wait);
655
656 spin_lock_bh(&sk->sk_receive_queue.lock);
657 if (nlk->rx_ring.pg_vec) {
658 netlink_forward_ring(&nlk->rx_ring);
659 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
660 mask |= POLLIN | POLLRDNORM;
661 }
662 spin_unlock_bh(&sk->sk_receive_queue.lock);
663
664 spin_lock_bh(&sk->sk_write_queue.lock);
665 if (nlk->tx_ring.pg_vec) {
666 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
667 mask |= POLLOUT | POLLWRNORM;
668 }
669 spin_unlock_bh(&sk->sk_write_queue.lock);
670
671 return mask;
672}
673
674static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
675{
676 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
677}
678
679static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
680 struct netlink_ring *ring,
681 struct nl_mmap_hdr *hdr)
682{
683 unsigned int size;
684 void *data;
685
686 size = ring->frame_size - NL_MMAP_HDRLEN;
687 data = (void *)hdr + NL_MMAP_HDRLEN;
688
689 skb->head = data;
690 skb->data = data;
691 skb_reset_tail_pointer(skb);
692 skb->end = skb->tail + size;
693 skb->len = 0;
694
695 skb->destructor = netlink_skb_destructor;
696 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
697 NETLINK_CB(skb).sk = sk;
698}
5fd96123
PM
699
700static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
701 u32 dst_portid, u32 dst_group,
7cc05662 702 struct scm_cookie *scm)
5fd96123
PM
703{
704 struct netlink_sock *nlk = nlk_sk(sk);
705 struct netlink_ring *ring;
706 struct nl_mmap_hdr *hdr;
707 struct sk_buff *skb;
708 unsigned int maxlen;
5fd96123
PM
709 int err = 0, len = 0;
710
5fd96123
PM
711 mutex_lock(&nlk->pg_vec_lock);
712
713 ring = &nlk->tx_ring;
714 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
715
716 do {
4682a035
DM
717 unsigned int nm_len;
718
5fd96123
PM
719 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
720 if (hdr == NULL) {
721 if (!(msg->msg_flags & MSG_DONTWAIT) &&
722 atomic_read(&nlk->tx_ring.pending))
723 schedule();
724 continue;
725 }
4682a035
DM
726
727 nm_len = ACCESS_ONCE(hdr->nm_len);
728 if (nm_len > maxlen) {
5fd96123
PM
729 err = -EINVAL;
730 goto out;
731 }
732
4682a035 733 netlink_frame_flush_dcache(hdr, nm_len);
5fd96123 734
4682a035
DM
735 skb = alloc_skb(nm_len, GFP_KERNEL);
736 if (skb == NULL) {
737 err = -ENOBUFS;
738 goto out;
5fd96123 739 }
4682a035
DM
740 __skb_put(skb, nm_len);
741 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
742 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
5fd96123
PM
743
744 netlink_increment_head(ring);
745
746 NETLINK_CB(skb).portid = nlk->portid;
747 NETLINK_CB(skb).dst_group = dst_group;
7cc05662 748 NETLINK_CB(skb).creds = scm->creds;
5fd96123
PM
749
750 err = security_netlink_send(sk, skb);
751 if (err) {
752 kfree_skb(skb);
753 goto out;
754 }
755
756 if (unlikely(dst_group)) {
757 atomic_inc(&skb->users);
758 netlink_broadcast(sk, skb, dst_portid, dst_group,
759 GFP_KERNEL);
760 }
761 err = netlink_unicast(sk, skb, dst_portid,
762 msg->msg_flags & MSG_DONTWAIT);
763 if (err < 0)
764 goto out;
765 len += err;
766
767 } while (hdr != NULL ||
768 (!(msg->msg_flags & MSG_DONTWAIT) &&
769 atomic_read(&nlk->tx_ring.pending)));
770
771 if (len > 0)
772 err = len;
773out:
774 mutex_unlock(&nlk->pg_vec_lock);
775 return err;
776}
f9c22888
PM
777
778static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
779{
780 struct nl_mmap_hdr *hdr;
781
782 hdr = netlink_mmap_hdr(skb);
783 hdr->nm_len = skb->len;
784 hdr->nm_group = NETLINK_CB(skb).dst_group;
785 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
786 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
787 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
4682a035 788 netlink_frame_flush_dcache(hdr, hdr->nm_len);
f9c22888
PM
789 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
790
791 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
792 kfree_skb(skb);
793}
794
795static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
796{
797 struct netlink_sock *nlk = nlk_sk(sk);
798 struct netlink_ring *ring = &nlk->rx_ring;
799 struct nl_mmap_hdr *hdr;
800
801 spin_lock_bh(&sk->sk_receive_queue.lock);
802 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
803 if (hdr == NULL) {
804 spin_unlock_bh(&sk->sk_receive_queue.lock);
805 kfree_skb(skb);
cd1df525 806 netlink_overrun(sk);
f9c22888
PM
807 return;
808 }
809 netlink_increment_head(ring);
810 __skb_queue_tail(&sk->sk_receive_queue, skb);
811 spin_unlock_bh(&sk->sk_receive_queue.lock);
812
813 hdr->nm_len = skb->len;
814 hdr->nm_group = NETLINK_CB(skb).dst_group;
815 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
816 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
817 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
f9c22888
PM
818 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
819}
820
ccdfcc39 821#else /* CONFIG_NETLINK_MMAP */
9652e931 822#define netlink_skb_is_mmaped(skb) false
f9c22888 823#define netlink_rx_is_mmaped(sk) false
5fd96123 824#define netlink_tx_is_mmaped(sk) false
ccdfcc39 825#define netlink_mmap sock_no_mmap
9652e931 826#define netlink_poll datagram_poll
7cc05662 827#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
ccdfcc39
PM
828#endif /* CONFIG_NETLINK_MMAP */
829
cf0a018a
PM
830static void netlink_skb_destructor(struct sk_buff *skb)
831{
9652e931
PM
832#ifdef CONFIG_NETLINK_MMAP
833 struct nl_mmap_hdr *hdr;
834 struct netlink_ring *ring;
835 struct sock *sk;
836
837 /* If a packet from the kernel to userspace was freed because of an
838 * error without being delivered to userspace, the kernel must reset
839 * the status. In the direction userspace to kernel, the status is
840 * always reset here after the packet was processed and freed.
841 */
842 if (netlink_skb_is_mmaped(skb)) {
843 hdr = netlink_mmap_hdr(skb);
844 sk = NETLINK_CB(skb).sk;
845
5fd96123
PM
846 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
847 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
848 ring = &nlk_sk(sk)->tx_ring;
849 } else {
850 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
851 hdr->nm_len = 0;
852 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
853 }
854 ring = &nlk_sk(sk)->rx_ring;
9652e931 855 }
9652e931
PM
856
857 WARN_ON(atomic_read(&ring->pending) == 0);
858 atomic_dec(&ring->pending);
859 sock_put(sk);
860
5e71d9d7 861 skb->head = NULL;
9652e931
PM
862 }
863#endif
c05cdb1b 864 if (is_vmalloc_addr(skb->head)) {
3a36515f
PN
865 if (!skb->cloned ||
866 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
867 vfree(skb->head);
868
c05cdb1b
PNA
869 skb->head = NULL;
870 }
9652e931
PM
871 if (skb->sk != NULL)
872 sock_rfree(skb);
cf0a018a
PM
873}
874
875static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
876{
877 WARN_ON(skb->sk != NULL);
878 skb->sk = sk;
879 skb->destructor = netlink_skb_destructor;
880 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
881 sk_mem_charge(sk, skb->truesize);
882}
883
1da177e4
LT
884static void netlink_sock_destruct(struct sock *sk)
885{
3f660d66
HX
886 struct netlink_sock *nlk = nlk_sk(sk);
887
16b304f3
PS
888 if (nlk->cb_running) {
889 if (nlk->cb.done)
890 nlk->cb.done(&nlk->cb);
6dc878a8 891
16b304f3
PS
892 module_put(nlk->cb.module);
893 kfree_skb(nlk->cb.skb);
3f660d66
HX
894 }
895
1da177e4 896 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
897#ifdef CONFIG_NETLINK_MMAP
898 if (1) {
899 struct nl_mmap_req req;
900
901 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false);
904 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true);
907 }
908#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
909
910 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 911 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
912 return;
913 }
547b792c
IJ
914
915 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
916 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
917 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
918}
919
6ac552fd
PM
920/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
921 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
922 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
923 * this, _but_ remember, it adds useless work on UP machines.
924 */
925
d136f1bd 926void netlink_table_grab(void)
9a429c49 927 __acquires(nl_table_lock)
1da177e4 928{
d136f1bd
JB
929 might_sleep();
930
6abd219c 931 write_lock_irq(&nl_table_lock);
1da177e4
LT
932
933 if (atomic_read(&nl_table_users)) {
934 DECLARE_WAITQUEUE(wait, current);
935
936 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 937 for (;;) {
1da177e4
LT
938 set_current_state(TASK_UNINTERRUPTIBLE);
939 if (atomic_read(&nl_table_users) == 0)
940 break;
6abd219c 941 write_unlock_irq(&nl_table_lock);
1da177e4 942 schedule();
6abd219c 943 write_lock_irq(&nl_table_lock);
1da177e4
LT
944 }
945
946 __set_current_state(TASK_RUNNING);
947 remove_wait_queue(&nl_table_wait, &wait);
948 }
949}
950
d136f1bd 951void netlink_table_ungrab(void)
9a429c49 952 __releases(nl_table_lock)
1da177e4 953{
6abd219c 954 write_unlock_irq(&nl_table_lock);
1da177e4
LT
955 wake_up(&nl_table_wait);
956}
957
6ac552fd 958static inline void
1da177e4
LT
959netlink_lock_table(void)
960{
961 /* read_lock() synchronizes us to netlink_table_grab */
962
963 read_lock(&nl_table_lock);
964 atomic_inc(&nl_table_users);
965 read_unlock(&nl_table_lock);
966}
967
6ac552fd 968static inline void
1da177e4
LT
969netlink_unlock_table(void)
970{
971 if (atomic_dec_and_test(&nl_table_users))
972 wake_up(&nl_table_wait);
973}
974
e341694e 975struct netlink_compare_arg
1da177e4 976{
c428ecd1 977 possible_net_t pnet;
e341694e
TG
978 u32 portid;
979};
1da177e4 980
8f2ddaac
HX
981/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
982#define netlink_compare_arg_len \
983 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
c428ecd1
HX
984
985static inline int netlink_compare(struct rhashtable_compare_arg *arg,
986 const void *ptr)
1da177e4 987{
c428ecd1
HX
988 const struct netlink_compare_arg *x = arg->key;
989 const struct netlink_sock *nlk = ptr;
1da177e4 990
c428ecd1
HX
991 return nlk->portid != x->portid ||
992 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
993}
994
995static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
996 struct net *net, u32 portid)
997{
998 memset(arg, 0, sizeof(*arg));
999 write_pnet(&arg->pnet, net);
1000 arg->portid = portid;
1da177e4
LT
1001}
1002
e341694e
TG
1003static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1004 struct net *net)
1da177e4 1005{
c428ecd1 1006 struct netlink_compare_arg arg;
1da177e4 1007
c428ecd1
HX
1008 netlink_compare_arg_init(&arg, net, portid);
1009 return rhashtable_lookup_fast(&table->hash, &arg,
1010 netlink_rhashtable_params);
1da177e4
LT
1011}
1012
c428ecd1 1013static int __netlink_insert(struct netlink_table *table, struct sock *sk)
c5adde94 1014{
c428ecd1 1015 struct netlink_compare_arg arg;
c5adde94 1016
c428ecd1
HX
1017 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1018 return rhashtable_lookup_insert_key(&table->hash, &arg,
1019 &nlk_sk(sk)->node,
1020 netlink_rhashtable_params);
c5adde94
YX
1021}
1022
e341694e 1023static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 1024{
e341694e
TG
1025 struct netlink_table *table = &nl_table[protocol];
1026 struct sock *sk;
1da177e4 1027
e341694e
TG
1028 rcu_read_lock();
1029 sk = __netlink_lookup(table, portid, net);
1030 if (sk)
1031 sock_hold(sk);
1032 rcu_read_unlock();
1da177e4 1033
e341694e 1034 return sk;
1da177e4
LT
1035}
1036
90ddc4f0 1037static const struct proto_ops netlink_ops;
1da177e4 1038
4277a083
PM
1039static void
1040netlink_update_listeners(struct sock *sk)
1041{
1042 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
1043 unsigned long mask;
1044 unsigned int i;
6d772ac5
ED
1045 struct listeners *listeners;
1046
1047 listeners = nl_deref_protected(tbl->listeners);
1048 if (!listeners)
1049 return;
4277a083 1050
b4ff4f04 1051 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 1052 mask = 0;
b67bfe0d 1053 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
1054 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1055 mask |= nlk_sk(sk)->groups[i];
1056 }
6d772ac5 1057 listeners->masks[i] = mask;
4277a083
PM
1058 }
1059 /* this function is only called with the netlink table "grabbed", which
1060 * makes sure updates are visible before bind or setsockopt return. */
1061}
1062
8ea65f4a 1063static int netlink_insert(struct sock *sk, u32 portid)
1da177e4 1064{
da12c90e 1065 struct netlink_table *table = &nl_table[sk->sk_protocol];
919d9db9 1066 int err;
1da177e4 1067
c5adde94 1068 lock_sock(sk);
1da177e4
LT
1069
1070 err = -EBUSY;
15e47304 1071 if (nlk_sk(sk)->portid)
1da177e4
LT
1072 goto err;
1073
1074 err = -ENOMEM;
97defe1e
TG
1075 if (BITS_PER_LONG > 32 &&
1076 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1da177e4
LT
1077 goto err;
1078
15e47304 1079 nlk_sk(sk)->portid = portid;
e341694e 1080 sock_hold(sk);
919d9db9 1081
c428ecd1
HX
1082 err = __netlink_insert(table, sk);
1083 if (err) {
1084 if (err == -EEXIST)
1085 err = -EADDRINUSE;
c5adde94 1086 sock_put(sk);
919d9db9
HX
1087 }
1088
1da177e4 1089err:
c5adde94 1090 release_sock(sk);
1da177e4
LT
1091 return err;
1092}
1093
1094static void netlink_remove(struct sock *sk)
1095{
e341694e
TG
1096 struct netlink_table *table;
1097
e341694e 1098 table = &nl_table[sk->sk_protocol];
c428ecd1
HX
1099 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1100 netlink_rhashtable_params)) {
e341694e
TG
1101 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1102 __sock_put(sk);
1103 }
e341694e 1104
1da177e4 1105 netlink_table_grab();
b10dcb3b 1106 if (nlk_sk(sk)->subscriptions) {
1da177e4 1107 __sk_del_bind_node(sk);
b10dcb3b
JB
1108 netlink_update_listeners(sk);
1109 }
ee1c2442
JB
1110 if (sk->sk_protocol == NETLINK_GENERIC)
1111 atomic_inc(&genl_sk_destructing_cnt);
1da177e4
LT
1112 netlink_table_ungrab();
1113}
1114
1115static struct proto netlink_proto = {
1116 .name = "NETLINK",
1117 .owner = THIS_MODULE,
1118 .obj_size = sizeof(struct netlink_sock),
1119};
1120
1b8d7ae4 1121static int __netlink_create(struct net *net, struct socket *sock,
11aa9c28
EB
1122 struct mutex *cb_mutex, int protocol,
1123 int kern)
1da177e4
LT
1124{
1125 struct sock *sk;
1126 struct netlink_sock *nlk;
ab33a171
PM
1127
1128 sock->ops = &netlink_ops;
1129
11aa9c28 1130 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
ab33a171
PM
1131 if (!sk)
1132 return -ENOMEM;
1133
1134 sock_init_data(sock, sk);
1135
1136 nlk = nlk_sk(sk);
658cb354 1137 if (cb_mutex) {
ffa4d721 1138 nlk->cb_mutex = cb_mutex;
658cb354 1139 } else {
ffa4d721
PM
1140 nlk->cb_mutex = &nlk->cb_def_mutex;
1141 mutex_init(nlk->cb_mutex);
1142 }
ab33a171 1143 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
1144#ifdef CONFIG_NETLINK_MMAP
1145 mutex_init(&nlk->pg_vec_lock);
1146#endif
ab33a171
PM
1147
1148 sk->sk_destruct = netlink_sock_destruct;
1149 sk->sk_protocol = protocol;
1150 return 0;
1151}
1152
3f378b68
EP
1153static int netlink_create(struct net *net, struct socket *sock, int protocol,
1154 int kern)
ab33a171
PM
1155{
1156 struct module *module = NULL;
af65bdfc 1157 struct mutex *cb_mutex;
f7fa9b10 1158 struct netlink_sock *nlk;
023e2cfa
JB
1159 int (*bind)(struct net *net, int group);
1160 void (*unbind)(struct net *net, int group);
ab33a171 1161 int err = 0;
1da177e4
LT
1162
1163 sock->state = SS_UNCONNECTED;
1164
1165 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1166 return -ESOCKTNOSUPPORT;
1167
6ac552fd 1168 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
1169 return -EPROTONOSUPPORT;
1170
77247bbb 1171 netlink_lock_table();
95a5afca 1172#ifdef CONFIG_MODULES
ab33a171 1173 if (!nl_table[protocol].registered) {
77247bbb 1174 netlink_unlock_table();
4fdb3bb7 1175 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 1176 netlink_lock_table();
4fdb3bb7 1177 }
ab33a171
PM
1178#endif
1179 if (nl_table[protocol].registered &&
1180 try_module_get(nl_table[protocol].module))
1181 module = nl_table[protocol].module;
974c37e9
AD
1182 else
1183 err = -EPROTONOSUPPORT;
af65bdfc 1184 cb_mutex = nl_table[protocol].cb_mutex;
03292745 1185 bind = nl_table[protocol].bind;
4f520900 1186 unbind = nl_table[protocol].unbind;
77247bbb 1187 netlink_unlock_table();
4fdb3bb7 1188
974c37e9
AD
1189 if (err < 0)
1190 goto out;
1191
11aa9c28 1192 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
6ac552fd 1193 if (err < 0)
f7fa9b10
PM
1194 goto out_module;
1195
6f756a8c 1196 local_bh_disable();
c1fd3b94 1197 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
1198 local_bh_enable();
1199
f7fa9b10 1200 nlk = nlk_sk(sock->sk);
f7fa9b10 1201 nlk->module = module;
03292745 1202 nlk->netlink_bind = bind;
4f520900 1203 nlk->netlink_unbind = unbind;
ab33a171
PM
1204out:
1205 return err;
1da177e4 1206
ab33a171
PM
1207out_module:
1208 module_put(module);
1209 goto out;
1da177e4
LT
1210}
1211
21e4902a
TG
1212static void deferred_put_nlk_sk(struct rcu_head *head)
1213{
1214 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1215
1216 sock_put(&nlk->sk);
1217}
1218
1da177e4
LT
1219static int netlink_release(struct socket *sock)
1220{
1221 struct sock *sk = sock->sk;
1222 struct netlink_sock *nlk;
1223
1224 if (!sk)
1225 return 0;
1226
1227 netlink_remove(sk);
ac57b3a9 1228 sock_orphan(sk);
1da177e4
LT
1229 nlk = nlk_sk(sk);
1230
3f660d66
HX
1231 /*
1232 * OK. Socket is unlinked, any packets that arrive now
1233 * will be purged.
1234 */
1da177e4 1235
ee1c2442
JB
1236 /* must not acquire netlink_table_lock in any way again before unbind
1237 * and notifying genetlink is done as otherwise it might deadlock
1238 */
1239 if (nlk->netlink_unbind) {
1240 int i;
1241
1242 for (i = 0; i < nlk->ngroups; i++)
1243 if (test_bit(i, nlk->groups))
1244 nlk->netlink_unbind(sock_net(sk), i + 1);
1245 }
1246 if (sk->sk_protocol == NETLINK_GENERIC &&
1247 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1248 wake_up(&genl_sk_destructing_waitq);
1249
1da177e4
LT
1250 sock->sk = NULL;
1251 wake_up_interruptible_all(&nlk->wait);
1252
1253 skb_queue_purge(&sk->sk_write_queue);
1254
15e47304 1255 if (nlk->portid) {
1da177e4 1256 struct netlink_notify n = {
3b1e0a65 1257 .net = sock_net(sk),
1da177e4 1258 .protocol = sk->sk_protocol,
15e47304 1259 .portid = nlk->portid,
1da177e4 1260 };
e041c683
AS
1261 atomic_notifier_call_chain(&netlink_chain,
1262 NETLINK_URELEASE, &n);
746fac4d 1263 }
4fdb3bb7 1264
5e7c001c 1265 module_put(nlk->module);
4fdb3bb7 1266
aed81560 1267 if (netlink_is_kernel(sk)) {
b10dcb3b 1268 netlink_table_grab();
869e58f8
DL
1269 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1270 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1271 struct listeners *old;
1272
1273 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1274 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1275 kfree_rcu(old, rcu);
869e58f8 1276 nl_table[sk->sk_protocol].module = NULL;
9785e10a 1277 nl_table[sk->sk_protocol].bind = NULL;
4f520900 1278 nl_table[sk->sk_protocol].unbind = NULL;
9785e10a 1279 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1280 nl_table[sk->sk_protocol].registered = 0;
1281 }
b10dcb3b 1282 netlink_table_ungrab();
658cb354 1283 }
77247bbb 1284
f7fa9b10
PM
1285 kfree(nlk->groups);
1286 nlk->groups = NULL;
1287
3755810c 1288 local_bh_disable();
c1fd3b94 1289 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1290 local_bh_enable();
21e4902a 1291 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1da177e4
LT
1292 return 0;
1293}
1294
1295static int netlink_autobind(struct socket *sock)
1296{
1297 struct sock *sk = sock->sk;
3b1e0a65 1298 struct net *net = sock_net(sk);
da12c90e 1299 struct netlink_table *table = &nl_table[sk->sk_protocol];
15e47304 1300 s32 portid = task_tgid_vnr(current);
1da177e4 1301 int err;
b9fbe709
HX
1302 s32 rover = -4096;
1303 bool ok;
1da177e4
LT
1304
1305retry:
1306 cond_resched();
e341694e 1307 rcu_read_lock();
b9fbe709
HX
1308 ok = !__netlink_lookup(table, portid, net);
1309 rcu_read_unlock();
1310 if (!ok) {
e341694e 1311 /* Bind collision, search negative portid values. */
b9fbe709
HX
1312 if (rover == -4096)
1313 /* rover will be in range [S32_MIN, -4097] */
1314 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1315 else if (rover >= -4096)
e341694e 1316 rover = -4097;
b9fbe709 1317 portid = rover--;
e341694e 1318 goto retry;
1da177e4 1319 }
1da177e4 1320
8ea65f4a 1321 err = netlink_insert(sk, portid);
1da177e4
LT
1322 if (err == -EADDRINUSE)
1323 goto retry;
d470e3b4
DM
1324
1325 /* If 2 threads race to autobind, that is fine. */
1326 if (err == -EBUSY)
1327 err = 0;
1328
1329 return err;
1da177e4
LT
1330}
1331
aa4cf945
EB
1332/**
1333 * __netlink_ns_capable - General netlink message capability test
1334 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1335 * @user_ns: The user namespace of the capability to use
1336 * @cap: The capability to use
1337 *
1338 * Test to see if the opener of the socket we received the message
1339 * from had when the netlink socket was created and the sender of the
1340 * message has has the capability @cap in the user namespace @user_ns.
1341 */
1342bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1343 struct user_namespace *user_ns, int cap)
1344{
2d7a85f4
EB
1345 return ((nsp->flags & NETLINK_SKB_DST) ||
1346 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1347 ns_capable(user_ns, cap);
aa4cf945
EB
1348}
1349EXPORT_SYMBOL(__netlink_ns_capable);
1350
1351/**
1352 * netlink_ns_capable - General netlink message capability test
1353 * @skb: socket buffer holding a netlink command from userspace
1354 * @user_ns: The user namespace of the capability to use
1355 * @cap: The capability to use
1356 *
1357 * Test to see if the opener of the socket we received the message
1358 * from had when the netlink socket was created and the sender of the
1359 * message has has the capability @cap in the user namespace @user_ns.
1360 */
1361bool netlink_ns_capable(const struct sk_buff *skb,
1362 struct user_namespace *user_ns, int cap)
1363{
1364 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1365}
1366EXPORT_SYMBOL(netlink_ns_capable);
1367
1368/**
1369 * netlink_capable - Netlink global message capability test
1370 * @skb: socket buffer holding a netlink command from userspace
1371 * @cap: The capability to use
1372 *
1373 * Test to see if the opener of the socket we received the message
1374 * from had when the netlink socket was created and the sender of the
1375 * message has has the capability @cap in all user namespaces.
1376 */
1377bool netlink_capable(const struct sk_buff *skb, int cap)
1378{
1379 return netlink_ns_capable(skb, &init_user_ns, cap);
1380}
1381EXPORT_SYMBOL(netlink_capable);
1382
1383/**
1384 * netlink_net_capable - Netlink network namespace message capability test
1385 * @skb: socket buffer holding a netlink command from userspace
1386 * @cap: The capability to use
1387 *
1388 * Test to see if the opener of the socket we received the message
1389 * from had when the netlink socket was created and the sender of the
1390 * message has has the capability @cap over the network namespace of
1391 * the socket we received the message from.
1392 */
1393bool netlink_net_capable(const struct sk_buff *skb, int cap)
1394{
1395 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1396}
1397EXPORT_SYMBOL(netlink_net_capable);
1398
5187cd05 1399static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
746fac4d 1400{
9785e10a 1401 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1402 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1403}
1da177e4 1404
f7fa9b10
PM
1405static void
1406netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1407{
1408 struct netlink_sock *nlk = nlk_sk(sk);
1409
1410 if (nlk->subscriptions && !subscriptions)
1411 __sk_del_bind_node(sk);
1412 else if (!nlk->subscriptions && subscriptions)
1413 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1414 nlk->subscriptions = subscriptions;
1415}
1416
b4ff4f04 1417static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1418{
1419 struct netlink_sock *nlk = nlk_sk(sk);
1420 unsigned int groups;
b4ff4f04 1421 unsigned long *new_groups;
513c2500
PM
1422 int err = 0;
1423
b4ff4f04
JB
1424 netlink_table_grab();
1425
513c2500 1426 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1427 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1428 err = -ENOENT;
b4ff4f04
JB
1429 goto out_unlock;
1430 }
513c2500 1431
b4ff4f04
JB
1432 if (nlk->ngroups >= groups)
1433 goto out_unlock;
513c2500 1434
b4ff4f04
JB
1435 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1436 if (new_groups == NULL) {
1437 err = -ENOMEM;
1438 goto out_unlock;
1439 }
6ac552fd 1440 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1441 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1442
1443 nlk->groups = new_groups;
513c2500 1444 nlk->ngroups = groups;
b4ff4f04
JB
1445 out_unlock:
1446 netlink_table_ungrab();
1447 return err;
513c2500
PM
1448}
1449
02c81ab9 1450static void netlink_undo_bind(int group, long unsigned int groups,
023e2cfa 1451 struct sock *sk)
4f520900 1452{
023e2cfa 1453 struct netlink_sock *nlk = nlk_sk(sk);
4f520900
RGB
1454 int undo;
1455
1456 if (!nlk->netlink_unbind)
1457 return;
1458
1459 for (undo = 0; undo < group; undo++)
6251edd9 1460 if (test_bit(undo, &groups))
8b7c36d8 1461 nlk->netlink_unbind(sock_net(sk), undo + 1);
4f520900
RGB
1462}
1463
6ac552fd
PM
1464static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1465 int addr_len)
1da177e4
LT
1466{
1467 struct sock *sk = sock->sk;
3b1e0a65 1468 struct net *net = sock_net(sk);
1da177e4
LT
1469 struct netlink_sock *nlk = nlk_sk(sk);
1470 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1471 int err;
4f520900 1472 long unsigned int groups = nladdr->nl_groups;
746fac4d 1473
4e4b5376
HFS
1474 if (addr_len < sizeof(struct sockaddr_nl))
1475 return -EINVAL;
1476
1da177e4
LT
1477 if (nladdr->nl_family != AF_NETLINK)
1478 return -EINVAL;
1479
1480 /* Only superuser is allowed to listen multicasts */
4f520900 1481 if (groups) {
5187cd05 1482 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1483 return -EPERM;
b4ff4f04
JB
1484 err = netlink_realloc_groups(sk);
1485 if (err)
1486 return err;
513c2500 1487 }
1da177e4 1488
4f520900 1489 if (nlk->portid)
15e47304 1490 if (nladdr->nl_pid != nlk->portid)
1da177e4 1491 return -EINVAL;
4f520900
RGB
1492
1493 if (nlk->netlink_bind && groups) {
1494 int group;
1495
1496 for (group = 0; group < nlk->ngroups; group++) {
1497 if (!test_bit(group, &groups))
1498 continue;
8b7c36d8 1499 err = nlk->netlink_bind(net, group + 1);
4f520900
RGB
1500 if (!err)
1501 continue;
023e2cfa 1502 netlink_undo_bind(group, groups, sk);
4f520900
RGB
1503 return err;
1504 }
1505 }
1506
1507 if (!nlk->portid) {
1da177e4 1508 err = nladdr->nl_pid ?
8ea65f4a 1509 netlink_insert(sk, nladdr->nl_pid) :
1da177e4 1510 netlink_autobind(sock);
4f520900 1511 if (err) {
023e2cfa 1512 netlink_undo_bind(nlk->ngroups, groups, sk);
1da177e4 1513 return err;
4f520900 1514 }
1da177e4
LT
1515 }
1516
4f520900 1517 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1518 return 0;
1519
1520 netlink_table_grab();
f7fa9b10 1521 netlink_update_subscriptions(sk, nlk->subscriptions +
4f520900 1522 hweight32(groups) -
746fac4d 1523 hweight32(nlk->groups[0]));
4f520900 1524 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
4277a083 1525 netlink_update_listeners(sk);
1da177e4
LT
1526 netlink_table_ungrab();
1527
1528 return 0;
1529}
1530
1531static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1532 int alen, int flags)
1533{
1534 int err = 0;
1535 struct sock *sk = sock->sk;
1536 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1537 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1538
6503d961
CG
1539 if (alen < sizeof(addr->sa_family))
1540 return -EINVAL;
1541
1da177e4
LT
1542 if (addr->sa_family == AF_UNSPEC) {
1543 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1544 nlk->dst_portid = 0;
d629b836 1545 nlk->dst_group = 0;
1da177e4
LT
1546 return 0;
1547 }
1548 if (addr->sa_family != AF_NETLINK)
1549 return -EINVAL;
1550
46833a86 1551 if ((nladdr->nl_groups || nladdr->nl_pid) &&
5187cd05 1552 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1553 return -EPERM;
1554
15e47304 1555 if (!nlk->portid)
1da177e4
LT
1556 err = netlink_autobind(sock);
1557
1558 if (err == 0) {
1559 sk->sk_state = NETLINK_CONNECTED;
15e47304 1560 nlk->dst_portid = nladdr->nl_pid;
d629b836 1561 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1562 }
1563
1564 return err;
1565}
1566
6ac552fd
PM
1567static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1568 int *addr_len, int peer)
1da177e4
LT
1569{
1570 struct sock *sk = sock->sk;
1571 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1572 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1573
1da177e4
LT
1574 nladdr->nl_family = AF_NETLINK;
1575 nladdr->nl_pad = 0;
1576 *addr_len = sizeof(*nladdr);
1577
1578 if (peer) {
15e47304 1579 nladdr->nl_pid = nlk->dst_portid;
d629b836 1580 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1581 } else {
15e47304 1582 nladdr->nl_pid = nlk->portid;
513c2500 1583 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1584 }
1585 return 0;
1586}
1587
15e47304 1588static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1589{
1da177e4
LT
1590 struct sock *sock;
1591 struct netlink_sock *nlk;
1592
15e47304 1593 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1594 if (!sock)
1595 return ERR_PTR(-ECONNREFUSED);
1596
1597 /* Don't bother queuing skb if kernel socket has no input function */
1598 nlk = nlk_sk(sock);
cd40b7d3 1599 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1600 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1601 sock_put(sock);
1602 return ERR_PTR(-ECONNREFUSED);
1603 }
1604 return sock;
1605}
1606
1607struct sock *netlink_getsockbyfilp(struct file *filp)
1608{
496ad9aa 1609 struct inode *inode = file_inode(filp);
1da177e4
LT
1610 struct sock *sock;
1611
1612 if (!S_ISSOCK(inode->i_mode))
1613 return ERR_PTR(-ENOTSOCK);
1614
1615 sock = SOCKET_I(inode)->sk;
1616 if (sock->sk_family != AF_NETLINK)
1617 return ERR_PTR(-EINVAL);
1618
1619 sock_hold(sock);
1620 return sock;
1621}
1622
3a36515f
PN
1623static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1624 int broadcast)
c05cdb1b
PNA
1625{
1626 struct sk_buff *skb;
1627 void *data;
1628
3a36515f 1629 if (size <= NLMSG_GOODSIZE || broadcast)
c05cdb1b
PNA
1630 return alloc_skb(size, GFP_KERNEL);
1631
3a36515f
PN
1632 size = SKB_DATA_ALIGN(size) +
1633 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c05cdb1b
PNA
1634
1635 data = vmalloc(size);
1636 if (data == NULL)
3a36515f 1637 return NULL;
c05cdb1b 1638
2ea2f62c 1639 skb = __build_skb(data, size);
3a36515f
PN
1640 if (skb == NULL)
1641 vfree(data);
2ea2f62c 1642 else
3a36515f 1643 skb->destructor = netlink_skb_destructor;
c05cdb1b
PNA
1644
1645 return skb;
c05cdb1b
PNA
1646}
1647
1da177e4
LT
1648/*
1649 * Attach a skb to a netlink socket.
1650 * The caller must hold a reference to the destination socket. On error, the
1651 * reference is dropped. The skb is not send to the destination, just all
1652 * all error checks are performed and memory in the queue is reserved.
1653 * Return values:
1654 * < 0: error. skb freed, reference to sock dropped.
1655 * 0: continue
1656 * 1: repeat lookup - reference dropped while waiting for socket memory.
1657 */
9457afee 1658int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1659 long *timeo, struct sock *ssk)
1da177e4
LT
1660{
1661 struct netlink_sock *nlk;
1662
1663 nlk = nlk_sk(sk);
1664
5fd96123 1665 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cc3a572f 1666 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
5fd96123 1667 !netlink_skb_is_mmaped(skb)) {
1da177e4 1668 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1669 if (!*timeo) {
aed81560 1670 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1671 netlink_overrun(sk);
1672 sock_put(sk);
1673 kfree_skb(skb);
1674 return -EAGAIN;
1675 }
1676
1677 __set_current_state(TASK_INTERRUPTIBLE);
1678 add_wait_queue(&nlk->wait, &wait);
1679
1680 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cc3a572f 1681 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1da177e4 1682 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1683 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1684
1685 __set_current_state(TASK_RUNNING);
1686 remove_wait_queue(&nlk->wait, &wait);
1687 sock_put(sk);
1688
1689 if (signal_pending(current)) {
1690 kfree_skb(skb);
c3d8d1e3 1691 return sock_intr_errno(*timeo);
1da177e4
LT
1692 }
1693 return 1;
1694 }
cf0a018a 1695 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1696 return 0;
1697}
1698
4a7e7c2a 1699static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1700{
1da177e4
LT
1701 int len = skb->len;
1702
bcbde0d4
DB
1703 netlink_deliver_tap(skb);
1704
f9c22888
PM
1705#ifdef CONFIG_NETLINK_MMAP
1706 if (netlink_skb_is_mmaped(skb))
1707 netlink_queue_mmaped_skb(sk, skb);
1708 else if (netlink_rx_is_mmaped(sk))
1709 netlink_ring_set_copied(sk, skb);
1710 else
1711#endif /* CONFIG_NETLINK_MMAP */
1712 skb_queue_tail(&sk->sk_receive_queue, skb);
676d2369 1713 sk->sk_data_ready(sk);
4a7e7c2a
ED
1714 return len;
1715}
1716
1717int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1718{
1719 int len = __netlink_sendskb(sk, skb);
1720
1da177e4
LT
1721 sock_put(sk);
1722 return len;
1723}
1724
1725void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1726{
1727 kfree_skb(skb);
1728 sock_put(sk);
1729}
1730
b57ef81f 1731static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1732{
1733 int delta;
1734
1298ca46 1735 WARN_ON(skb->sk != NULL);
5fd96123
PM
1736 if (netlink_skb_is_mmaped(skb))
1737 return skb;
1da177e4 1738
4305b541 1739 delta = skb->end - skb->tail;
c05cdb1b 1740 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1da177e4
LT
1741 return skb;
1742
1743 if (skb_shared(skb)) {
1744 struct sk_buff *nskb = skb_clone(skb, allocation);
1745 if (!nskb)
1746 return skb;
8460c00f 1747 consume_skb(skb);
1da177e4
LT
1748 skb = nskb;
1749 }
1750
1751 if (!pskb_expand_head(skb, 0, -delta, allocation))
1752 skb->truesize -= delta;
1753
1754 return skb;
1755}
1756
3fbc2905
EB
1757static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1758 struct sock *ssk)
cd40b7d3
DL
1759{
1760 int ret;
1761 struct netlink_sock *nlk = nlk_sk(sk);
1762
1763 ret = -ECONNREFUSED;
1764 if (nlk->netlink_rcv != NULL) {
1765 ret = skb->len;
cf0a018a 1766 netlink_skb_set_owner_r(skb, sk);
e32123e5 1767 NETLINK_CB(skb).sk = ssk;
73bfd370 1768 netlink_deliver_tap_kernel(sk, ssk, skb);
cd40b7d3 1769 nlk->netlink_rcv(skb);
bfb253c9
ED
1770 consume_skb(skb);
1771 } else {
1772 kfree_skb(skb);
cd40b7d3 1773 }
cd40b7d3
DL
1774 sock_put(sk);
1775 return ret;
1776}
1777
1778int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1779 u32 portid, int nonblock)
1da177e4
LT
1780{
1781 struct sock *sk;
1782 int err;
1783 long timeo;
1784
1785 skb = netlink_trim(skb, gfp_any());
1786
1787 timeo = sock_sndtimeo(ssk, nonblock);
1788retry:
15e47304 1789 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1790 if (IS_ERR(sk)) {
1791 kfree_skb(skb);
1792 return PTR_ERR(sk);
1793 }
cd40b7d3 1794 if (netlink_is_kernel(sk))
3fbc2905 1795 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1796
b1153f29 1797 if (sk_filter(sk, skb)) {
84874607 1798 err = skb->len;
b1153f29
SH
1799 kfree_skb(skb);
1800 sock_put(sk);
1801 return err;
1802 }
1803
9457afee 1804 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1805 if (err == 1)
1806 goto retry;
1807 if (err)
1808 return err;
1809
7ee015e0 1810 return netlink_sendskb(sk, skb);
1da177e4 1811}
6ac552fd 1812EXPORT_SYMBOL(netlink_unicast);
1da177e4 1813
f9c22888
PM
1814struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1815 u32 dst_portid, gfp_t gfp_mask)
1816{
1817#ifdef CONFIG_NETLINK_MMAP
1818 struct sock *sk = NULL;
1819 struct sk_buff *skb;
1820 struct netlink_ring *ring;
1821 struct nl_mmap_hdr *hdr;
1822 unsigned int maxlen;
1823
1824 sk = netlink_getsockbyportid(ssk, dst_portid);
1825 if (IS_ERR(sk))
1826 goto out;
1827
1828 ring = &nlk_sk(sk)->rx_ring;
1829 /* fast-path without atomic ops for common case: non-mmaped receiver */
1830 if (ring->pg_vec == NULL)
1831 goto out_put;
1832
aae9f0e2
TG
1833 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1834 goto out_put;
1835
f9c22888
PM
1836 skb = alloc_skb_head(gfp_mask);
1837 if (skb == NULL)
1838 goto err1;
1839
1840 spin_lock_bh(&sk->sk_receive_queue.lock);
1841 /* check again under lock */
1842 if (ring->pg_vec == NULL)
1843 goto out_free;
1844
aae9f0e2 1845 /* check again under lock */
f9c22888
PM
1846 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1847 if (maxlen < size)
1848 goto out_free;
1849
1850 netlink_forward_ring(ring);
1851 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1852 if (hdr == NULL)
1853 goto err2;
1854 netlink_ring_setup_skb(skb, sk, ring, hdr);
1855 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1856 atomic_inc(&ring->pending);
1857 netlink_increment_head(ring);
1858
1859 spin_unlock_bh(&sk->sk_receive_queue.lock);
1860 return skb;
1861
1862err2:
1863 kfree_skb(skb);
1864 spin_unlock_bh(&sk->sk_receive_queue.lock);
cd1df525 1865 netlink_overrun(sk);
f9c22888
PM
1866err1:
1867 sock_put(sk);
1868 return NULL;
1869
1870out_free:
1871 kfree_skb(skb);
1872 spin_unlock_bh(&sk->sk_receive_queue.lock);
1873out_put:
1874 sock_put(sk);
1875out:
1876#endif
1877 return alloc_skb(size, gfp_mask);
1878}
1879EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1880
4277a083
PM
1881int netlink_has_listeners(struct sock *sk, unsigned int group)
1882{
1883 int res = 0;
5c398dc8 1884 struct listeners *listeners;
4277a083 1885
aed81560 1886 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1887
1888 rcu_read_lock();
1889 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1890
6d772ac5 1891 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1892 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1893
1894 rcu_read_unlock();
1895
4277a083
PM
1896 return res;
1897}
1898EXPORT_SYMBOL_GPL(netlink_has_listeners);
1899
b57ef81f 1900static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1901{
1902 struct netlink_sock *nlk = nlk_sk(sk);
1903
1904 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cc3a572f 1905 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
cf0a018a 1906 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1907 __netlink_sendskb(sk, skb);
2c645800 1908 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1909 }
1910 return -1;
1911}
1912
1913struct netlink_broadcast_data {
1914 struct sock *exclude_sk;
b4b51029 1915 struct net *net;
15e47304 1916 u32 portid;
1da177e4
LT
1917 u32 group;
1918 int failure;
ff491a73 1919 int delivery_failure;
1da177e4
LT
1920 int congested;
1921 int delivered;
7d877f3b 1922 gfp_t allocation;
1da177e4 1923 struct sk_buff *skb, *skb2;
910a7e90
EB
1924 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1925 void *tx_data;
1da177e4
LT
1926};
1927
46c9521f
RR
1928static void do_one_broadcast(struct sock *sk,
1929 struct netlink_broadcast_data *p)
1da177e4
LT
1930{
1931 struct netlink_sock *nlk = nlk_sk(sk);
1932 int val;
1933
1934 if (p->exclude_sk == sk)
46c9521f 1935 return;
1da177e4 1936
15e47304 1937 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1938 !test_bit(p->group - 1, nlk->groups))
46c9521f 1939 return;
1da177e4 1940
59324cf3
ND
1941 if (!net_eq(sock_net(sk), p->net)) {
1942 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1943 return;
1944
1945 if (!peernet_has_id(sock_net(sk), p->net))
1946 return;
1947
1948 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
1949 CAP_NET_BROADCAST))
1950 return;
1951 }
b4b51029 1952
1da177e4
LT
1953 if (p->failure) {
1954 netlink_overrun(sk);
46c9521f 1955 return;
1da177e4
LT
1956 }
1957
1958 sock_hold(sk);
1959 if (p->skb2 == NULL) {
68acc024 1960 if (skb_shared(p->skb)) {
1da177e4
LT
1961 p->skb2 = skb_clone(p->skb, p->allocation);
1962 } else {
68acc024
TC
1963 p->skb2 = skb_get(p->skb);
1964 /*
1965 * skb ownership may have been set when
1966 * delivered to a previous socket.
1967 */
1968 skb_orphan(p->skb2);
1da177e4
LT
1969 }
1970 }
1971 if (p->skb2 == NULL) {
1972 netlink_overrun(sk);
1973 /* Clone failed. Notify ALL listeners. */
1974 p->failure = 1;
cc3a572f 1975 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
be0c22a4 1976 p->delivery_failure = 1;
59324cf3
ND
1977 goto out;
1978 }
1979 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
910a7e90
EB
1980 kfree_skb(p->skb2);
1981 p->skb2 = NULL;
59324cf3
ND
1982 goto out;
1983 }
1984 if (sk_filter(sk, p->skb2)) {
b1153f29
SH
1985 kfree_skb(p->skb2);
1986 p->skb2 = NULL;
59324cf3
ND
1987 goto out;
1988 }
1989 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1990 NETLINK_CB(p->skb2).nsid_is_set = true;
1991 val = netlink_broadcast_deliver(sk, p->skb2);
1992 if (val < 0) {
1da177e4 1993 netlink_overrun(sk);
cc3a572f 1994 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
be0c22a4 1995 p->delivery_failure = 1;
1da177e4
LT
1996 } else {
1997 p->congested |= val;
1998 p->delivered = 1;
1999 p->skb2 = NULL;
2000 }
59324cf3 2001out:
1da177e4 2002 sock_put(sk);
1da177e4
LT
2003}
2004
15e47304 2005int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2006 u32 group, gfp_t allocation,
2007 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2008 void *filter_data)
1da177e4 2009{
3b1e0a65 2010 struct net *net = sock_net(ssk);
1da177e4 2011 struct netlink_broadcast_data info;
1da177e4
LT
2012 struct sock *sk;
2013
2014 skb = netlink_trim(skb, allocation);
2015
2016 info.exclude_sk = ssk;
b4b51029 2017 info.net = net;
15e47304 2018 info.portid = portid;
1da177e4
LT
2019 info.group = group;
2020 info.failure = 0;
ff491a73 2021 info.delivery_failure = 0;
1da177e4
LT
2022 info.congested = 0;
2023 info.delivered = 0;
2024 info.allocation = allocation;
2025 info.skb = skb;
2026 info.skb2 = NULL;
910a7e90
EB
2027 info.tx_filter = filter;
2028 info.tx_data = filter_data;
1da177e4
LT
2029
2030 /* While we sleep in clone, do not allow to change socket list */
2031
2032 netlink_lock_table();
2033
b67bfe0d 2034 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
2035 do_one_broadcast(sk, &info);
2036
70d4bf6d 2037 consume_skb(skb);
aa1c6a6f 2038
1da177e4
LT
2039 netlink_unlock_table();
2040
70d4bf6d
NH
2041 if (info.delivery_failure) {
2042 kfree_skb(info.skb2);
ff491a73 2043 return -ENOBUFS;
658cb354
ED
2044 }
2045 consume_skb(info.skb2);
ff491a73 2046
1da177e4
LT
2047 if (info.delivered) {
2048 if (info.congested && (allocation & __GFP_WAIT))
2049 yield();
2050 return 0;
2051 }
1da177e4
LT
2052 return -ESRCH;
2053}
910a7e90
EB
2054EXPORT_SYMBOL(netlink_broadcast_filtered);
2055
15e47304 2056int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2057 u32 group, gfp_t allocation)
2058{
15e47304 2059 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
2060 NULL, NULL);
2061}
6ac552fd 2062EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
2063
2064struct netlink_set_err_data {
2065 struct sock *exclude_sk;
15e47304 2066 u32 portid;
1da177e4
LT
2067 u32 group;
2068 int code;
2069};
2070
b57ef81f 2071static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
2072{
2073 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 2074 int ret = 0;
1da177e4
LT
2075
2076 if (sk == p->exclude_sk)
2077 goto out;
2078
09ad9bc7 2079 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
2080 goto out;
2081
15e47304 2082 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 2083 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
2084 goto out;
2085
cc3a572f 2086 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1a50307b
PNA
2087 ret = 1;
2088 goto out;
2089 }
2090
1da177e4
LT
2091 sk->sk_err = p->code;
2092 sk->sk_error_report(sk);
2093out:
1a50307b 2094 return ret;
1da177e4
LT
2095}
2096
4843b93c
PNA
2097/**
2098 * netlink_set_err - report error to broadcast listeners
2099 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 2100 * @portid: the PORTID of a process that we want to skip (if any)
840e93f2 2101 * @group: the broadcast group that will notice the error
4843b93c 2102 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
2103 *
2104 * This function returns the number of broadcast listeners that have set the
cc3a572f 2105 * NETLINK_NO_ENOBUFS socket option.
4843b93c 2106 */
15e47304 2107int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
2108{
2109 struct netlink_set_err_data info;
1da177e4 2110 struct sock *sk;
1a50307b 2111 int ret = 0;
1da177e4
LT
2112
2113 info.exclude_sk = ssk;
15e47304 2114 info.portid = portid;
1da177e4 2115 info.group = group;
4843b93c
PNA
2116 /* sk->sk_err wants a positive error value */
2117 info.code = -code;
1da177e4
LT
2118
2119 read_lock(&nl_table_lock);
2120
b67bfe0d 2121 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 2122 ret += do_one_set_err(sk, &info);
1da177e4
LT
2123
2124 read_unlock(&nl_table_lock);
1a50307b 2125 return ret;
1da177e4 2126}
dd5b6ce6 2127EXPORT_SYMBOL(netlink_set_err);
1da177e4 2128
84659eb5
JB
2129/* must be called with netlink table grabbed */
2130static void netlink_update_socket_mc(struct netlink_sock *nlk,
2131 unsigned int group,
2132 int is_new)
2133{
2134 int old, new = !!is_new, subscriptions;
2135
2136 old = test_bit(group - 1, nlk->groups);
2137 subscriptions = nlk->subscriptions - old + new;
2138 if (new)
2139 __set_bit(group - 1, nlk->groups);
2140 else
2141 __clear_bit(group - 1, nlk->groups);
2142 netlink_update_subscriptions(&nlk->sk, subscriptions);
2143 netlink_update_listeners(&nlk->sk);
2144}
2145
9a4595bc 2146static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 2147 char __user *optval, unsigned int optlen)
9a4595bc
PM
2148{
2149 struct sock *sk = sock->sk;
2150 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
2151 unsigned int val = 0;
2152 int err;
9a4595bc
PM
2153
2154 if (level != SOL_NETLINK)
2155 return -ENOPROTOOPT;
2156
ccdfcc39
PM
2157 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2158 optlen >= sizeof(int) &&
eb496534 2159 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
2160 return -EFAULT;
2161
2162 switch (optname) {
2163 case NETLINK_PKTINFO:
2164 if (val)
cc3a572f 2165 nlk->flags |= NETLINK_F_RECV_PKTINFO;
9a4595bc 2166 else
cc3a572f 2167 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
9a4595bc
PM
2168 err = 0;
2169 break;
2170 case NETLINK_ADD_MEMBERSHIP:
2171 case NETLINK_DROP_MEMBERSHIP: {
5187cd05 2172 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 2173 return -EPERM;
b4ff4f04
JB
2174 err = netlink_realloc_groups(sk);
2175 if (err)
2176 return err;
9a4595bc
PM
2177 if (!val || val - 1 >= nlk->ngroups)
2178 return -EINVAL;
7774d5e0 2179 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
023e2cfa 2180 err = nlk->netlink_bind(sock_net(sk), val);
4f520900
RGB
2181 if (err)
2182 return err;
2183 }
9a4595bc 2184 netlink_table_grab();
84659eb5
JB
2185 netlink_update_socket_mc(nlk, val,
2186 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 2187 netlink_table_ungrab();
7774d5e0 2188 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
023e2cfa 2189 nlk->netlink_unbind(sock_net(sk), val);
03292745 2190
9a4595bc
PM
2191 err = 0;
2192 break;
2193 }
be0c22a4
PNA
2194 case NETLINK_BROADCAST_ERROR:
2195 if (val)
cc3a572f 2196 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
be0c22a4 2197 else
cc3a572f 2198 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
be0c22a4
PNA
2199 err = 0;
2200 break;
38938bfe
PNA
2201 case NETLINK_NO_ENOBUFS:
2202 if (val) {
cc3a572f
ND
2203 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2204 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
38938bfe 2205 wake_up_interruptible(&nlk->wait);
658cb354 2206 } else {
cc3a572f 2207 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
658cb354 2208 }
38938bfe
PNA
2209 err = 0;
2210 break;
ccdfcc39
PM
2211#ifdef CONFIG_NETLINK_MMAP
2212 case NETLINK_RX_RING:
2213 case NETLINK_TX_RING: {
2214 struct nl_mmap_req req;
2215
2216 /* Rings might consume more memory than queue limits, require
2217 * CAP_NET_ADMIN.
2218 */
2219 if (!capable(CAP_NET_ADMIN))
2220 return -EPERM;
2221 if (optlen < sizeof(req))
2222 return -EINVAL;
2223 if (copy_from_user(&req, optval, sizeof(req)))
2224 return -EFAULT;
2225 err = netlink_set_ring(sk, &req, false,
2226 optname == NETLINK_TX_RING);
2227 break;
2228 }
2229#endif /* CONFIG_NETLINK_MMAP */
59324cf3
ND
2230 case NETLINK_LISTEN_ALL_NSID:
2231 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2232 return -EPERM;
2233
2234 if (val)
2235 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2236 else
2237 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2238 err = 0;
2239 break;
9a4595bc
PM
2240 default:
2241 err = -ENOPROTOOPT;
2242 }
2243 return err;
2244}
2245
2246static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 2247 char __user *optval, int __user *optlen)
9a4595bc
PM
2248{
2249 struct sock *sk = sock->sk;
2250 struct netlink_sock *nlk = nlk_sk(sk);
2251 int len, val, err;
2252
2253 if (level != SOL_NETLINK)
2254 return -ENOPROTOOPT;
2255
2256 if (get_user(len, optlen))
2257 return -EFAULT;
2258 if (len < 0)
2259 return -EINVAL;
2260
2261 switch (optname) {
2262 case NETLINK_PKTINFO:
2263 if (len < sizeof(int))
2264 return -EINVAL;
2265 len = sizeof(int);
cc3a572f 2266 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
2267 if (put_user(len, optlen) ||
2268 put_user(val, optval))
2269 return -EFAULT;
9a4595bc
PM
2270 err = 0;
2271 break;
be0c22a4
PNA
2272 case NETLINK_BROADCAST_ERROR:
2273 if (len < sizeof(int))
2274 return -EINVAL;
2275 len = sizeof(int);
cc3a572f 2276 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
be0c22a4
PNA
2277 if (put_user(len, optlen) ||
2278 put_user(val, optval))
2279 return -EFAULT;
2280 err = 0;
2281 break;
38938bfe
PNA
2282 case NETLINK_NO_ENOBUFS:
2283 if (len < sizeof(int))
2284 return -EINVAL;
2285 len = sizeof(int);
cc3a572f 2286 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
38938bfe
PNA
2287 if (put_user(len, optlen) ||
2288 put_user(val, optval))
2289 return -EFAULT;
2290 err = 0;
2291 break;
9a4595bc
PM
2292 default:
2293 err = -ENOPROTOOPT;
2294 }
2295 return err;
2296}
2297
2298static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2299{
2300 struct nl_pktinfo info;
2301
2302 info.group = NETLINK_CB(skb).dst_group;
2303 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2304}
2305
59324cf3
ND
2306static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2307 struct sk_buff *skb)
2308{
2309 if (!NETLINK_CB(skb).nsid_is_set)
2310 return;
2311
2312 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2313 &NETLINK_CB(skb).nsid);
2314}
2315
1b784140 2316static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1da177e4 2317{
1da177e4
LT
2318 struct sock *sk = sock->sk;
2319 struct netlink_sock *nlk = nlk_sk(sk);
342dfc30 2320 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
15e47304 2321 u32 dst_portid;
d629b836 2322 u32 dst_group;
1da177e4
LT
2323 struct sk_buff *skb;
2324 int err;
2325 struct scm_cookie scm;
2d7a85f4 2326 u32 netlink_skb_flags = 0;
1da177e4
LT
2327
2328 if (msg->msg_flags&MSG_OOB)
2329 return -EOPNOTSUPP;
2330
7cc05662 2331 err = scm_send(sock, msg, &scm, true);
1da177e4
LT
2332 if (err < 0)
2333 return err;
2334
2335 if (msg->msg_namelen) {
b47030c7 2336 err = -EINVAL;
1da177e4 2337 if (addr->nl_family != AF_NETLINK)
b47030c7 2338 goto out;
15e47304 2339 dst_portid = addr->nl_pid;
d629b836 2340 dst_group = ffs(addr->nl_groups);
b47030c7 2341 err = -EPERM;
15e47304 2342 if ((dst_group || dst_portid) &&
5187cd05 2343 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 2344 goto out;
2d7a85f4 2345 netlink_skb_flags |= NETLINK_SKB_DST;
1da177e4 2346 } else {
15e47304 2347 dst_portid = nlk->dst_portid;
d629b836 2348 dst_group = nlk->dst_group;
1da177e4
LT
2349 }
2350
15e47304 2351 if (!nlk->portid) {
1da177e4
LT
2352 err = netlink_autobind(sock);
2353 if (err)
2354 goto out;
2355 }
2356
a8866ff6
AV
2357 /* It's a really convoluted way for userland to ask for mmaped
2358 * sendmsg(), but that's what we've got...
2359 */
5fd96123 2360 if (netlink_tx_is_mmaped(sk) &&
a8866ff6
AV
2361 msg->msg_iter.type == ITER_IOVEC &&
2362 msg->msg_iter.nr_segs == 1 &&
c0371da6 2363 msg->msg_iter.iov->iov_base == NULL) {
5fd96123 2364 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
7cc05662 2365 &scm);
5fd96123
PM
2366 goto out;
2367 }
2368
1da177e4
LT
2369 err = -EMSGSIZE;
2370 if (len > sk->sk_sndbuf - 32)
2371 goto out;
2372 err = -ENOBUFS;
3a36515f 2373 skb = netlink_alloc_large_skb(len, dst_group);
6ac552fd 2374 if (skb == NULL)
1da177e4
LT
2375 goto out;
2376
15e47304 2377 NETLINK_CB(skb).portid = nlk->portid;
d629b836 2378 NETLINK_CB(skb).dst_group = dst_group;
7cc05662 2379 NETLINK_CB(skb).creds = scm.creds;
2d7a85f4 2380 NETLINK_CB(skb).flags = netlink_skb_flags;
1da177e4 2381
1da177e4 2382 err = -EFAULT;
6ce8e9ce 2383 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
2384 kfree_skb(skb);
2385 goto out;
2386 }
2387
2388 err = security_netlink_send(sk, skb);
2389 if (err) {
2390 kfree_skb(skb);
2391 goto out;
2392 }
2393
d629b836 2394 if (dst_group) {
1da177e4 2395 atomic_inc(&skb->users);
15e47304 2396 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 2397 }
15e47304 2398 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
2399
2400out:
7cc05662 2401 scm_destroy(&scm);
1da177e4
LT
2402 return err;
2403}
2404
1b784140 2405static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1da177e4
LT
2406 int flags)
2407{
1da177e4
LT
2408 struct scm_cookie scm;
2409 struct sock *sk = sock->sk;
2410 struct netlink_sock *nlk = nlk_sk(sk);
2411 int noblock = flags&MSG_DONTWAIT;
2412 size_t copied;
68d6ac6d 2413 struct sk_buff *skb, *data_skb;
b44d211e 2414 int err, ret;
1da177e4
LT
2415
2416 if (flags&MSG_OOB)
2417 return -EOPNOTSUPP;
2418
2419 copied = 0;
2420
6ac552fd
PM
2421 skb = skb_recv_datagram(sk, flags, noblock, &err);
2422 if (skb == NULL)
1da177e4
LT
2423 goto out;
2424
68d6ac6d
JB
2425 data_skb = skb;
2426
1dacc76d
JB
2427#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2428 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2429 /*
68d6ac6d
JB
2430 * If this skb has a frag_list, then here that means that we
2431 * will have to use the frag_list skb's data for compat tasks
2432 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2433 *
68d6ac6d
JB
2434 * If we need to send the compat skb, assign it to the
2435 * 'data_skb' variable so that it will be used below for data
2436 * copying. We keep 'skb' for everything else, including
2437 * freeing both later.
1dacc76d 2438 */
68d6ac6d
JB
2439 if (flags & MSG_CMSG_COMPAT)
2440 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2441 }
2442#endif
2443
9063e21f
ED
2444 /* Record the max length of recvmsg() calls for future allocations */
2445 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2446 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2447 16384);
2448
68d6ac6d 2449 copied = data_skb->len;
1da177e4
LT
2450 if (len < copied) {
2451 msg->msg_flags |= MSG_TRUNC;
2452 copied = len;
2453 }
2454
68d6ac6d 2455 skb_reset_transport_header(data_skb);
51f3d02b 2456 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1da177e4
LT
2457
2458 if (msg->msg_name) {
342dfc30 2459 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1da177e4
LT
2460 addr->nl_family = AF_NETLINK;
2461 addr->nl_pad = 0;
15e47304 2462 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2463 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2464 msg->msg_namelen = sizeof(*addr);
2465 }
2466
cc3a572f 2467 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
cc9a06cd 2468 netlink_cmsg_recv_pktinfo(msg, skb);
59324cf3
ND
2469 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2470 netlink_cmsg_listen_all_nsid(sk, msg, skb);
cc9a06cd 2471
7cc05662
CH
2472 memset(&scm, 0, sizeof(scm));
2473 scm.creds = *NETLINK_CREDS(skb);
188ccb55 2474 if (flags & MSG_TRUNC)
68d6ac6d 2475 copied = data_skb->len;
daa3766e 2476
1da177e4
LT
2477 skb_free_datagram(sk, skb);
2478
16b304f3
PS
2479 if (nlk->cb_running &&
2480 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
b44d211e
AV
2481 ret = netlink_dump(sk);
2482 if (ret) {
ac30ef83 2483 sk->sk_err = -ret;
b44d211e
AV
2484 sk->sk_error_report(sk);
2485 }
2486 }
1da177e4 2487
7cc05662 2488 scm_recv(sock, msg, &scm, flags);
1da177e4
LT
2489out:
2490 netlink_rcv_wake(sk);
2491 return err ? : copied;
2492}
2493
676d2369 2494static void netlink_data_ready(struct sock *sk)
1da177e4 2495{
cd40b7d3 2496 BUG();
1da177e4
LT
2497}
2498
2499/*
746fac4d 2500 * We export these functions to other modules. They provide a
1da177e4
LT
2501 * complete set of kernel non-blocking support for message
2502 * queueing.
2503 */
2504
2505struct sock *
9f00d977
PNA
2506__netlink_kernel_create(struct net *net, int unit, struct module *module,
2507 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2508{
2509 struct socket *sock;
2510 struct sock *sk;
77247bbb 2511 struct netlink_sock *nlk;
5c398dc8 2512 struct listeners *listeners = NULL;
a31f2d17
PNA
2513 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2514 unsigned int groups;
1da177e4 2515
fab2caf6 2516 BUG_ON(!nl_table);
1da177e4 2517
6ac552fd 2518 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2519 return NULL;
2520
2521 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2522 return NULL;
13d3078e
EB
2523
2524 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
23fe1866
PE
2525 goto out_sock_release_nosk;
2526
2527 sk = sock->sk;
4fdb3bb7 2528
a31f2d17 2529 if (!cfg || cfg->groups < 32)
4277a083 2530 groups = 32;
a31f2d17
PNA
2531 else
2532 groups = cfg->groups;
4277a083 2533
5c398dc8 2534 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2535 if (!listeners)
2536 goto out_sock_release;
2537
1da177e4 2538 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2539 if (cfg && cfg->input)
2540 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2541
8ea65f4a 2542 if (netlink_insert(sk, 0))
77247bbb 2543 goto out_sock_release;
4fdb3bb7 2544
77247bbb 2545 nlk = nlk_sk(sk);
cc3a572f 2546 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
4fdb3bb7 2547
4fdb3bb7 2548 netlink_table_grab();
b4b51029
EB
2549 if (!nl_table[unit].registered) {
2550 nl_table[unit].groups = groups;
5c398dc8 2551 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2552 nl_table[unit].cb_mutex = cb_mutex;
2553 nl_table[unit].module = module;
9785e10a
PNA
2554 if (cfg) {
2555 nl_table[unit].bind = cfg->bind;
6251edd9 2556 nl_table[unit].unbind = cfg->unbind;
9785e10a 2557 nl_table[unit].flags = cfg->flags;
da12c90e
G
2558 if (cfg->compare)
2559 nl_table[unit].compare = cfg->compare;
9785e10a 2560 }
b4b51029 2561 nl_table[unit].registered = 1;
f937f1f4
JJ
2562 } else {
2563 kfree(listeners);
869e58f8 2564 nl_table[unit].registered++;
b4b51029 2565 }
4fdb3bb7 2566 netlink_table_ungrab();
77247bbb
PM
2567 return sk;
2568
4fdb3bb7 2569out_sock_release:
4277a083 2570 kfree(listeners);
9dfbec1f 2571 netlink_kernel_release(sk);
23fe1866
PE
2572 return NULL;
2573
2574out_sock_release_nosk:
4fdb3bb7 2575 sock_release(sock);
77247bbb 2576 return NULL;
1da177e4 2577}
9f00d977 2578EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2579
2580void
2581netlink_kernel_release(struct sock *sk)
2582{
13d3078e
EB
2583 if (sk == NULL || sk->sk_socket == NULL)
2584 return;
2585
2586 sock_release(sk->sk_socket);
b7c6ba6e
DL
2587}
2588EXPORT_SYMBOL(netlink_kernel_release);
2589
d136f1bd 2590int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2591{
5c398dc8 2592 struct listeners *new, *old;
b4ff4f04 2593 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2594
2595 if (groups < 32)
2596 groups = 32;
2597
b4ff4f04 2598 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2599 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2600 if (!new)
d136f1bd 2601 return -ENOMEM;
6d772ac5 2602 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2603 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2604 rcu_assign_pointer(tbl->listeners, new);
2605
37b6b935 2606 kfree_rcu(old, rcu);
b4ff4f04
JB
2607 }
2608 tbl->groups = groups;
2609
d136f1bd
JB
2610 return 0;
2611}
2612
2613/**
2614 * netlink_change_ngroups - change number of multicast groups
2615 *
2616 * This changes the number of multicast groups that are available
2617 * on a certain netlink family. Note that it is not possible to
2618 * change the number of groups to below 32. Also note that it does
2619 * not implicitly call netlink_clear_multicast_users() when the
2620 * number of groups is reduced.
2621 *
2622 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2623 * @groups: The new number of groups.
2624 */
2625int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2626{
2627 int err;
2628
2629 netlink_table_grab();
2630 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2631 netlink_table_ungrab();
d136f1bd 2632
b4ff4f04
JB
2633 return err;
2634}
b4ff4f04 2635
b8273570
JB
2636void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2637{
2638 struct sock *sk;
b8273570
JB
2639 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2640
b67bfe0d 2641 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2642 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2643}
2644
a46621a3 2645struct nlmsghdr *
15e47304 2646__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2647{
2648 struct nlmsghdr *nlh;
573ce260 2649 int size = nlmsg_msg_size(len);
a46621a3 2650
23b45672 2651 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
a46621a3
DV
2652 nlh->nlmsg_type = type;
2653 nlh->nlmsg_len = size;
2654 nlh->nlmsg_flags = flags;
15e47304 2655 nlh->nlmsg_pid = portid;
a46621a3
DV
2656 nlh->nlmsg_seq = seq;
2657 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2658 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2659 return nlh;
2660}
2661EXPORT_SYMBOL(__nlmsg_put);
2662
1da177e4
LT
2663/*
2664 * It looks a bit ugly.
2665 * It would be better to create kernel thread.
2666 */
2667
2668static int netlink_dump(struct sock *sk)
2669{
2670 struct netlink_sock *nlk = nlk_sk(sk);
2671 struct netlink_callback *cb;
c7ac8679 2672 struct sk_buff *skb = NULL;
1da177e4 2673 struct nlmsghdr *nlh;
bf8b79e4 2674 int len, err = -ENOBUFS;
c7ac8679 2675 int alloc_size;
1da177e4 2676
af65bdfc 2677 mutex_lock(nlk->cb_mutex);
16b304f3 2678 if (!nlk->cb_running) {
bf8b79e4
TG
2679 err = -EINVAL;
2680 goto errout_skb;
1da177e4
LT
2681 }
2682
16b304f3 2683 cb = &nlk->cb;
c7ac8679
GR
2684 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2685
f9c22888
PM
2686 if (!netlink_rx_is_mmaped(sk) &&
2687 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2688 goto errout_skb;
9063e21f
ED
2689
2690 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2691 * required, but it makes sense to _attempt_ a 16K bytes allocation
2692 * to reduce number of system calls on dump operations, if user
2693 * ever provided a big enough buffer.
2694 */
2695 if (alloc_size < nlk->max_recvmsg_len) {
2696 skb = netlink_alloc_skb(sk,
2697 nlk->max_recvmsg_len,
2698 nlk->portid,
2699 GFP_KERNEL |
2700 __GFP_NOWARN |
2701 __GFP_NORETRY);
2702 /* available room should be exact amount to avoid MSG_TRUNC */
2703 if (skb)
2704 skb_reserve(skb, skb_tailroom(skb) -
2705 nlk->max_recvmsg_len);
2706 }
2707 if (!skb)
2708 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2709 GFP_KERNEL);
c7ac8679 2710 if (!skb)
c63d6ea3 2711 goto errout_skb;
f9c22888 2712 netlink_skb_set_owner_r(skb, sk);
c7ac8679 2713
1da177e4
LT
2714 len = cb->dump(skb, cb);
2715
2716 if (len > 0) {
af65bdfc 2717 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2718
2719 if (sk_filter(sk, skb))
2720 kfree_skb(skb);
4a7e7c2a
ED
2721 else
2722 __netlink_sendskb(sk, skb);
1da177e4
LT
2723 return 0;
2724 }
2725
bf8b79e4
TG
2726 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2727 if (!nlh)
2728 goto errout_skb;
2729
670dc283
JB
2730 nl_dump_check_consistent(cb, nlh);
2731
bf8b79e4
TG
2732 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2733
b1153f29
SH
2734 if (sk_filter(sk, skb))
2735 kfree_skb(skb);
4a7e7c2a
ED
2736 else
2737 __netlink_sendskb(sk, skb);
1da177e4 2738
a8f74b22
TG
2739 if (cb->done)
2740 cb->done(cb);
1da177e4 2741
16b304f3
PS
2742 nlk->cb_running = false;
2743 mutex_unlock(nlk->cb_mutex);
6dc878a8 2744 module_put(cb->module);
16b304f3 2745 consume_skb(cb->skb);
1da177e4 2746 return 0;
1797754e 2747
bf8b79e4 2748errout_skb:
af65bdfc 2749 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2750 kfree_skb(skb);
bf8b79e4 2751 return err;
1da177e4
LT
2752}
2753
6dc878a8
G
2754int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2755 const struct nlmsghdr *nlh,
2756 struct netlink_dump_control *control)
1da177e4
LT
2757{
2758 struct netlink_callback *cb;
2759 struct sock *sk;
2760 struct netlink_sock *nlk;
b44d211e 2761 int ret;
1da177e4 2762
f9c22888
PM
2763 /* Memory mapped dump requests need to be copied to avoid looping
2764 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2765 * a reference to the skb.
2766 */
2767 if (netlink_skb_is_mmaped(skb)) {
2768 skb = skb_copy(skb, GFP_KERNEL);
16b304f3 2769 if (skb == NULL)
f9c22888 2770 return -ENOBUFS;
f9c22888
PM
2771 } else
2772 atomic_inc(&skb->users);
2773
15e47304 2774 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4 2775 if (sk == NULL) {
16b304f3
PS
2776 ret = -ECONNREFUSED;
2777 goto error_free;
1da177e4 2778 }
6dc878a8 2779
16b304f3 2780 nlk = nlk_sk(sk);
af65bdfc 2781 mutex_lock(nlk->cb_mutex);
6dc878a8 2782 /* A dump is in progress... */
16b304f3 2783 if (nlk->cb_running) {
6dc878a8 2784 ret = -EBUSY;
16b304f3 2785 goto error_unlock;
1da177e4 2786 }
6dc878a8 2787 /* add reference of module which cb->dump belongs to */
16b304f3 2788 if (!try_module_get(control->module)) {
6dc878a8 2789 ret = -EPROTONOSUPPORT;
16b304f3 2790 goto error_unlock;
6dc878a8
G
2791 }
2792
16b304f3
PS
2793 cb = &nlk->cb;
2794 memset(cb, 0, sizeof(*cb));
2795 cb->dump = control->dump;
2796 cb->done = control->done;
2797 cb->nlh = nlh;
2798 cb->data = control->data;
2799 cb->module = control->module;
2800 cb->min_dump_alloc = control->min_dump_alloc;
2801 cb->skb = skb;
2802
2803 nlk->cb_running = true;
2804
af65bdfc 2805 mutex_unlock(nlk->cb_mutex);
1da177e4 2806
b44d211e 2807 ret = netlink_dump(sk);
1da177e4 2808 sock_put(sk);
5c58298c 2809
b44d211e
AV
2810 if (ret)
2811 return ret;
2812
5c58298c
DL
2813 /* We successfully started a dump, by returning -EINTR we
2814 * signal not to send ACK even if it was requested.
2815 */
2816 return -EINTR;
16b304f3
PS
2817
2818error_unlock:
2819 sock_put(sk);
2820 mutex_unlock(nlk->cb_mutex);
2821error_free:
2822 kfree_skb(skb);
2823 return ret;
1da177e4 2824}
6dc878a8 2825EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2826
2827void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2828{
2829 struct sk_buff *skb;
2830 struct nlmsghdr *rep;
2831 struct nlmsgerr *errmsg;
339bf98f 2832 size_t payload = sizeof(*errmsg);
1da177e4 2833
339bf98f
TG
2834 /* error messages get the original request appened */
2835 if (err)
2836 payload += nlmsg_len(nlh);
1da177e4 2837
f9c22888
PM
2838 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2839 NETLINK_CB(in_skb).portid, GFP_KERNEL);
1da177e4
LT
2840 if (!skb) {
2841 struct sock *sk;
2842
3b1e0a65 2843 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2844 in_skb->sk->sk_protocol,
15e47304 2845 NETLINK_CB(in_skb).portid);
1da177e4
LT
2846 if (sk) {
2847 sk->sk_err = ENOBUFS;
2848 sk->sk_error_report(sk);
2849 sock_put(sk);
2850 }
2851 return;
2852 }
2853
15e47304 2854 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2855 NLMSG_ERROR, payload, 0);
bf8b79e4 2856 errmsg = nlmsg_data(rep);
1da177e4 2857 errmsg->error = err;
bf8b79e4 2858 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2859 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2860}
6ac552fd 2861EXPORT_SYMBOL(netlink_ack);
1da177e4 2862
cd40b7d3 2863int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2864 struct nlmsghdr *))
82ace47a 2865{
82ace47a
TG
2866 struct nlmsghdr *nlh;
2867 int err;
2868
2869 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2870 int msglen;
2871
b529ccf2 2872 nlh = nlmsg_hdr(skb);
d35b6856 2873 err = 0;
82ace47a 2874
ad8e4b75 2875 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2876 return 0;
2877
d35b6856
TG
2878 /* Only requests are handled by the kernel */
2879 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2880 goto ack;
45e7ae7f
TG
2881
2882 /* Skip control messages */
2883 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2884 goto ack;
d35b6856 2885
1d00a4eb 2886 err = cb(skb, nlh);
5c58298c
DL
2887 if (err == -EINTR)
2888 goto skip;
2889
2890ack:
d35b6856 2891 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2892 netlink_ack(skb, nlh, err);
82ace47a 2893
5c58298c 2894skip:
6ac552fd 2895 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2896 if (msglen > skb->len)
2897 msglen = skb->len;
2898 skb_pull(skb, msglen);
82ace47a
TG
2899 }
2900
2901 return 0;
2902}
6ac552fd 2903EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 2904
d387f6ad
TG
2905/**
2906 * nlmsg_notify - send a notification netlink message
2907 * @sk: netlink socket to use
2908 * @skb: notification message
15e47304 2909 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
2910 * @group: destination multicast group or 0
2911 * @report: 1 to report back, 0 to disable
2912 * @flags: allocation flags
2913 */
15e47304 2914int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
2915 unsigned int group, int report, gfp_t flags)
2916{
2917 int err = 0;
2918
2919 if (group) {
15e47304 2920 int exclude_portid = 0;
d387f6ad
TG
2921
2922 if (report) {
2923 atomic_inc(&skb->users);
15e47304 2924 exclude_portid = portid;
d387f6ad
TG
2925 }
2926
1ce85fe4
PNA
2927 /* errors reported via destination sk->sk_err, but propagate
2928 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 2929 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
2930 }
2931
1ce85fe4
PNA
2932 if (report) {
2933 int err2;
2934
15e47304 2935 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
2936 if (!err || err == -ESRCH)
2937 err = err2;
2938 }
d387f6ad
TG
2939
2940 return err;
2941}
6ac552fd 2942EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 2943
1da177e4
LT
2944#ifdef CONFIG_PROC_FS
2945struct nl_seq_iter {
e372c414 2946 struct seq_net_private p;
56d28b1e 2947 struct rhashtable_iter hti;
1da177e4 2948 int link;
1da177e4
LT
2949};
2950
56d28b1e 2951static int netlink_walk_start(struct nl_seq_iter *iter)
1da177e4 2952{
56d28b1e 2953 int err;
1da177e4 2954
56d28b1e
HX
2955 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
2956 if (err) {
2957 iter->link = MAX_LINKS;
2958 return err;
1da177e4 2959 }
56d28b1e
HX
2960
2961 err = rhashtable_walk_start(&iter->hti);
2962 return err == -EAGAIN ? 0 : err;
1da177e4
LT
2963}
2964
56d28b1e 2965static void netlink_walk_stop(struct nl_seq_iter *iter)
1da177e4 2966{
56d28b1e
HX
2967 rhashtable_walk_stop(&iter->hti);
2968 rhashtable_walk_exit(&iter->hti);
1da177e4
LT
2969}
2970
56d28b1e 2971static void *__netlink_seq_next(struct seq_file *seq)
1da177e4 2972{
56d28b1e 2973 struct nl_seq_iter *iter = seq->private;
e341694e 2974 struct netlink_sock *nlk;
1da177e4 2975
56d28b1e
HX
2976 do {
2977 for (;;) {
2978 int err;
1da177e4 2979
56d28b1e 2980 nlk = rhashtable_walk_next(&iter->hti);
746fac4d 2981
56d28b1e
HX
2982 if (IS_ERR(nlk)) {
2983 if (PTR_ERR(nlk) == -EAGAIN)
2984 continue;
e341694e 2985
56d28b1e
HX
2986 return nlk;
2987 }
1da177e4 2988
56d28b1e
HX
2989 if (nlk)
2990 break;
1da177e4 2991
56d28b1e
HX
2992 netlink_walk_stop(iter);
2993 if (++iter->link >= MAX_LINKS)
2994 return NULL;
da12c90e 2995
56d28b1e
HX
2996 err = netlink_walk_start(iter);
2997 if (err)
2998 return ERR_PTR(err);
1da177e4 2999 }
56d28b1e 3000 } while (sock_net(&nlk->sk) != seq_file_net(seq));
1da177e4 3001
56d28b1e
HX
3002 return nlk;
3003}
1da177e4 3004
56d28b1e
HX
3005static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3006{
3007 struct nl_seq_iter *iter = seq->private;
3008 void *obj = SEQ_START_TOKEN;
3009 loff_t pos;
3010 int err;
3011
3012 iter->link = 0;
3013
3014 err = netlink_walk_start(iter);
3015 if (err)
3016 return ERR_PTR(err);
3017
3018 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3019 obj = __netlink_seq_next(seq);
3020
3021 return obj;
3022}
3023
3024static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3025{
3026 ++*pos;
3027 return __netlink_seq_next(seq);
1da177e4
LT
3028}
3029
3030static void netlink_seq_stop(struct seq_file *seq, void *v)
3031{
56d28b1e
HX
3032 struct nl_seq_iter *iter = seq->private;
3033
3034 if (iter->link >= MAX_LINKS)
3035 return;
3036
3037 netlink_walk_stop(iter);
1da177e4
LT
3038}
3039
3040
3041static int netlink_seq_show(struct seq_file *seq, void *v)
3042{
658cb354 3043 if (v == SEQ_START_TOKEN) {
1da177e4
LT
3044 seq_puts(seq,
3045 "sk Eth Pid Groups "
cf0aa4e0 3046 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 3047 } else {
1da177e4
LT
3048 struct sock *s = v;
3049 struct netlink_sock *nlk = nlk_sk(s);
3050
16b304f3 3051 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
1da177e4
LT
3052 s,
3053 s->sk_protocol,
15e47304 3054 nlk->portid,
513c2500 3055 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
3056 sk_rmem_alloc_get(s),
3057 sk_wmem_alloc_get(s),
16b304f3 3058 nlk->cb_running,
38938bfe 3059 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
3060 atomic_read(&s->sk_drops),
3061 sock_i_ino(s)
1da177e4
LT
3062 );
3063
3064 }
3065 return 0;
3066}
3067
56b3d975 3068static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
3069 .start = netlink_seq_start,
3070 .next = netlink_seq_next,
3071 .stop = netlink_seq_stop,
3072 .show = netlink_seq_show,
3073};
3074
3075
3076static int netlink_seq_open(struct inode *inode, struct file *file)
3077{
e372c414
DL
3078 return seq_open_net(inode, file, &netlink_seq_ops,
3079 sizeof(struct nl_seq_iter));
b4b51029
EB
3080}
3081
da7071d7 3082static const struct file_operations netlink_seq_fops = {
1da177e4
LT
3083 .owner = THIS_MODULE,
3084 .open = netlink_seq_open,
3085 .read = seq_read,
3086 .llseek = seq_lseek,
e372c414 3087 .release = seq_release_net,
1da177e4
LT
3088};
3089
3090#endif
3091
3092int netlink_register_notifier(struct notifier_block *nb)
3093{
e041c683 3094 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 3095}
6ac552fd 3096EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
3097
3098int netlink_unregister_notifier(struct notifier_block *nb)
3099{
e041c683 3100 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 3101}
6ac552fd 3102EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 3103
90ddc4f0 3104static const struct proto_ops netlink_ops = {
1da177e4
LT
3105 .family = PF_NETLINK,
3106 .owner = THIS_MODULE,
3107 .release = netlink_release,
3108 .bind = netlink_bind,
3109 .connect = netlink_connect,
3110 .socketpair = sock_no_socketpair,
3111 .accept = sock_no_accept,
3112 .getname = netlink_getname,
9652e931 3113 .poll = netlink_poll,
1da177e4
LT
3114 .ioctl = sock_no_ioctl,
3115 .listen = sock_no_listen,
3116 .shutdown = sock_no_shutdown,
9a4595bc
PM
3117 .setsockopt = netlink_setsockopt,
3118 .getsockopt = netlink_getsockopt,
1da177e4
LT
3119 .sendmsg = netlink_sendmsg,
3120 .recvmsg = netlink_recvmsg,
ccdfcc39 3121 .mmap = netlink_mmap,
1da177e4
LT
3122 .sendpage = sock_no_sendpage,
3123};
3124
ec1b4cf7 3125static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
3126 .family = PF_NETLINK,
3127 .create = netlink_create,
3128 .owner = THIS_MODULE, /* for consistency 8) */
3129};
3130
4665079c 3131static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
3132{
3133#ifdef CONFIG_PROC_FS
d4beaa66 3134 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
3135 return -ENOMEM;
3136#endif
3137 return 0;
3138}
3139
4665079c 3140static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
3141{
3142#ifdef CONFIG_PROC_FS
ece31ffd 3143 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
3144#endif
3145}
3146
b963ea89
DM
3147static void __init netlink_add_usersock_entry(void)
3148{
5c398dc8 3149 struct listeners *listeners;
b963ea89
DM
3150 int groups = 32;
3151
5c398dc8 3152 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 3153 if (!listeners)
5c398dc8 3154 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
3155
3156 netlink_table_grab();
3157
3158 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 3159 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
3160 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3161 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 3162 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
3163
3164 netlink_table_ungrab();
3165}
3166
022cbae6 3167static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
3168 .init = netlink_net_init,
3169 .exit = netlink_net_exit,
3170};
3171
49f7b33e 3172static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
c428ecd1
HX
3173{
3174 const struct netlink_sock *nlk = data;
3175 struct netlink_compare_arg arg;
3176
3177 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
11b58ba1 3178 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
c428ecd1
HX
3179}
3180
3181static const struct rhashtable_params netlink_rhashtable_params = {
3182 .head_offset = offsetof(struct netlink_sock, node),
3183 .key_len = netlink_compare_arg_len,
c428ecd1
HX
3184 .obj_hashfn = netlink_hash,
3185 .obj_cmpfn = netlink_compare,
b5e2c150 3186 .automatic_shrinking = true,
c428ecd1
HX
3187};
3188
1da177e4
LT
3189static int __init netlink_proto_init(void)
3190{
1da177e4 3191 int i;
1da177e4
LT
3192 int err = proto_register(&netlink_proto, 0);
3193
3194 if (err != 0)
3195 goto out;
3196
fab25745 3197 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3198
0da974f4 3199 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
3200 if (!nl_table)
3201 goto panic;
1da177e4 3202
1da177e4 3203 for (i = 0; i < MAX_LINKS; i++) {
c428ecd1
HX
3204 if (rhashtable_init(&nl_table[i].hash,
3205 &netlink_rhashtable_params) < 0) {
e341694e
TG
3206 while (--i > 0)
3207 rhashtable_destroy(&nl_table[i].hash);
1da177e4 3208 kfree(nl_table);
fab2caf6 3209 goto panic;
1da177e4 3210 }
1da177e4
LT
3211 }
3212
bcbde0d4
DB
3213 INIT_LIST_HEAD(&netlink_tap_all);
3214
b963ea89
DM
3215 netlink_add_usersock_entry();
3216
1da177e4 3217 sock_register(&netlink_family_ops);
b4b51029 3218 register_pernet_subsys(&netlink_net_ops);
746fac4d 3219 /* The netlink device handler may be needed early. */
1da177e4
LT
3220 rtnetlink_init();
3221out:
3222 return err;
fab2caf6
AM
3223panic:
3224 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
3225}
3226
1da177e4 3227core_initcall(netlink_proto_init);
This page took 1.041809 seconds and 5 git commands to generate.