netlink, mmap: transform mmap skb into full skb on taps
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
cd1df525 6 * Patrick McHardy <kaber@trash.net>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
746fac4d 12 *
1da177e4
LT
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
1da177e4
LT
23 */
24
1da177e4
LT
25#include <linux/module.h>
26
4fc268d2 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/init.h>
1da177e4
LT
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
1da177e4
LT
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
af65bdfc 58#include <linux/mutex.h>
ccdfcc39 59#include <linux/vmalloc.h>
bcbde0d4 60#include <linux/if_arp.h>
e341694e 61#include <linux/rhashtable.h>
9652e931 62#include <asm/cacheflush.h>
e341694e 63#include <linux/hash.h>
ee1c2442 64#include <linux/genetlink.h>
54e0f520 65
457c4cbc 66#include <net/net_namespace.h>
1da177e4
LT
67#include <net/sock.h>
68#include <net/scm.h>
82ace47a 69#include <net/netlink.h>
1da177e4 70
0f29c768 71#include "af_netlink.h"
1da177e4 72
5c398dc8
ED
73struct listeners {
74 struct rcu_head rcu;
75 unsigned long masks[0];
6c04bb18
JB
76};
77
cd967e05 78/* state bits */
cc3a572f 79#define NETLINK_S_CONGESTED 0x0
cd967e05
PM
80
81/* flags */
cc3a572f
ND
82#define NETLINK_F_KERNEL_SOCKET 0x1
83#define NETLINK_F_RECV_PKTINFO 0x2
84#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85#define NETLINK_F_RECV_NO_ENOBUFS 0x8
59324cf3 86#define NETLINK_F_LISTEN_ALL_NSID 0x10
0a6a3a23 87#define NETLINK_F_CAP_ACK 0x20
77247bbb 88
035c4c16 89static inline int netlink_is_kernel(struct sock *sk)
aed81560 90{
cc3a572f 91 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
aed81560
DL
92}
93
91dd93f9 94struct netlink_table *nl_table __read_mostly;
0f29c768 95EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
96
97static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
98
99static int netlink_dump(struct sock *sk);
9652e931 100static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 101
78fd1d0a 102/* nl_table locking explained:
21e4902a 103 * Lookup and traversal are protected with an RCU read-side lock. Insertion
c5adde94 104 * and removal are protected with per bucket lock while using RCU list
21e4902a
TG
105 * modification primitives and may run in parallel to RCU protected lookups.
106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
107 * been acquired * either during or after the socket has been removed from
108 * the list and after an RCU grace period.
78fd1d0a 109 */
0f29c768
AV
110DEFINE_RWLOCK(nl_table_lock);
111EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
112static atomic_t nl_table_users = ATOMIC_INIT(0);
113
6d772ac5
ED
114#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
115
e041c683 116static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 117
bcbde0d4
DB
118static DEFINE_SPINLOCK(netlink_tap_lock);
119static struct list_head netlink_tap_all __read_mostly;
120
c428ecd1
HX
121static const struct rhashtable_params netlink_rhashtable_params;
122
b57ef81f 123static inline u32 netlink_group_mask(u32 group)
d629b836
PM
124{
125 return group ? 1 << (group - 1) : 0;
126}
127
1853c949
DB
128static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
129 gfp_t gfp_mask)
130{
131 unsigned int len = skb_end_offset(skb);
132 struct sk_buff *new;
133
134 new = alloc_skb(len, gfp_mask);
135 if (new == NULL)
136 return NULL;
137
138 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
139 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
140 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
141
142 memcpy(skb_put(new, len), skb->data, len);
143 return new;
144}
145
bcbde0d4
DB
146int netlink_add_tap(struct netlink_tap *nt)
147{
148 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
149 return -EINVAL;
150
151 spin_lock(&netlink_tap_lock);
152 list_add_rcu(&nt->list, &netlink_tap_all);
153 spin_unlock(&netlink_tap_lock);
154
fcd4d35e 155 __module_get(nt->module);
bcbde0d4
DB
156
157 return 0;
158}
159EXPORT_SYMBOL_GPL(netlink_add_tap);
160
2173f8d9 161static int __netlink_remove_tap(struct netlink_tap *nt)
bcbde0d4
DB
162{
163 bool found = false;
164 struct netlink_tap *tmp;
165
166 spin_lock(&netlink_tap_lock);
167
168 list_for_each_entry(tmp, &netlink_tap_all, list) {
169 if (nt == tmp) {
170 list_del_rcu(&nt->list);
171 found = true;
172 goto out;
173 }
174 }
175
176 pr_warn("__netlink_remove_tap: %p not found\n", nt);
177out:
178 spin_unlock(&netlink_tap_lock);
179
92b80eb3 180 if (found)
bcbde0d4
DB
181 module_put(nt->module);
182
183 return found ? 0 : -ENODEV;
184}
bcbde0d4
DB
185
186int netlink_remove_tap(struct netlink_tap *nt)
187{
188 int ret;
189
190 ret = __netlink_remove_tap(nt);
191 synchronize_net();
192
193 return ret;
194}
195EXPORT_SYMBOL_GPL(netlink_remove_tap);
196
5ffd5cdd
DB
197static bool netlink_filter_tap(const struct sk_buff *skb)
198{
199 struct sock *sk = skb->sk;
5ffd5cdd
DB
200
201 /* We take the more conservative approach and
202 * whitelist socket protocols that may pass.
203 */
204 switch (sk->sk_protocol) {
205 case NETLINK_ROUTE:
206 case NETLINK_USERSOCK:
207 case NETLINK_SOCK_DIAG:
208 case NETLINK_NFLOG:
209 case NETLINK_XFRM:
210 case NETLINK_FIB_LOOKUP:
211 case NETLINK_NETFILTER:
212 case NETLINK_GENERIC:
498044bb 213 return true;
5ffd5cdd
DB
214 }
215
498044bb 216 return false;
5ffd5cdd
DB
217}
218
bcbde0d4
DB
219static int __netlink_deliver_tap_skb(struct sk_buff *skb,
220 struct net_device *dev)
221{
222 struct sk_buff *nskb;
5ffd5cdd 223 struct sock *sk = skb->sk;
bcbde0d4
DB
224 int ret = -ENOMEM;
225
226 dev_hold(dev);
1853c949
DB
227
228 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
230 else
231 nskb = skb_clone(skb, GFP_ATOMIC);
bcbde0d4
DB
232 if (nskb) {
233 nskb->dev = dev;
5ffd5cdd 234 nskb->protocol = htons((u16) sk->sk_protocol);
604d13c9
DB
235 nskb->pkt_type = netlink_is_kernel(sk) ?
236 PACKET_KERNEL : PACKET_USER;
4e48ed88 237 skb_reset_network_header(nskb);
bcbde0d4
DB
238 ret = dev_queue_xmit(nskb);
239 if (unlikely(ret > 0))
240 ret = net_xmit_errno(ret);
241 }
242
243 dev_put(dev);
244 return ret;
245}
246
247static void __netlink_deliver_tap(struct sk_buff *skb)
248{
249 int ret;
250 struct netlink_tap *tmp;
251
5ffd5cdd
DB
252 if (!netlink_filter_tap(skb))
253 return;
254
bcbde0d4
DB
255 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
256 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
257 if (unlikely(ret))
258 break;
259 }
260}
261
262static void netlink_deliver_tap(struct sk_buff *skb)
263{
264 rcu_read_lock();
265
266 if (unlikely(!list_empty(&netlink_tap_all)))
267 __netlink_deliver_tap(skb);
268
269 rcu_read_unlock();
270}
271
73bfd370
DB
272static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
273 struct sk_buff *skb)
274{
275 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
276 netlink_deliver_tap(skb);
277}
278
cd1df525
PM
279static void netlink_overrun(struct sock *sk)
280{
281 struct netlink_sock *nlk = nlk_sk(sk);
282
cc3a572f
ND
283 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
284 if (!test_and_set_bit(NETLINK_S_CONGESTED,
285 &nlk_sk(sk)->state)) {
cd1df525
PM
286 sk->sk_err = ENOBUFS;
287 sk->sk_error_report(sk);
288 }
289 }
290 atomic_inc(&sk->sk_drops);
291}
292
293static void netlink_rcv_wake(struct sock *sk)
294{
295 struct netlink_sock *nlk = nlk_sk(sk);
296
297 if (skb_queue_empty(&sk->sk_receive_queue))
cc3a572f
ND
298 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
299 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
cd1df525
PM
300 wake_up_interruptible(&nlk->wait);
301}
302
ccdfcc39 303#ifdef CONFIG_NETLINK_MMAP
f9c22888
PM
304static bool netlink_rx_is_mmaped(struct sock *sk)
305{
306 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
307}
308
5fd96123
PM
309static bool netlink_tx_is_mmaped(struct sock *sk)
310{
311 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
312}
313
ccdfcc39
PM
314static __pure struct page *pgvec_to_page(const void *addr)
315{
316 if (is_vmalloc_addr(addr))
317 return vmalloc_to_page(addr);
318 else
319 return virt_to_page(addr);
320}
321
322static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
323{
324 unsigned int i;
325
326 for (i = 0; i < len; i++) {
327 if (pg_vec[i] != NULL) {
328 if (is_vmalloc_addr(pg_vec[i]))
329 vfree(pg_vec[i]);
330 else
331 free_pages((unsigned long)pg_vec[i], order);
332 }
333 }
334 kfree(pg_vec);
335}
336
337static void *alloc_one_pg_vec_page(unsigned long order)
338{
339 void *buffer;
340 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
341 __GFP_NOWARN | __GFP_NORETRY;
342
343 buffer = (void *)__get_free_pages(gfp_flags, order);
344 if (buffer != NULL)
345 return buffer;
346
347 buffer = vzalloc((1 << order) * PAGE_SIZE);
348 if (buffer != NULL)
349 return buffer;
350
351 gfp_flags &= ~__GFP_NORETRY;
352 return (void *)__get_free_pages(gfp_flags, order);
353}
354
355static void **alloc_pg_vec(struct netlink_sock *nlk,
356 struct nl_mmap_req *req, unsigned int order)
357{
358 unsigned int block_nr = req->nm_block_nr;
359 unsigned int i;
8a849bb7 360 void **pg_vec;
ccdfcc39
PM
361
362 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
363 if (pg_vec == NULL)
364 return NULL;
365
366 for (i = 0; i < block_nr; i++) {
8a849bb7 367 pg_vec[i] = alloc_one_pg_vec_page(order);
ccdfcc39
PM
368 if (pg_vec[i] == NULL)
369 goto err1;
370 }
371
372 return pg_vec;
373err1:
374 free_pg_vec(pg_vec, order, block_nr);
375 return NULL;
376}
377
0470eb99
FW
378
379static void
380__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
381 unsigned int order)
382{
383 struct netlink_sock *nlk = nlk_sk(sk);
384 struct sk_buff_head *queue;
385 struct netlink_ring *ring;
386
387 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
388 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
389
390 spin_lock_bh(&queue->lock);
391
392 ring->frame_max = req->nm_frame_nr - 1;
393 ring->head = 0;
394 ring->frame_size = req->nm_frame_size;
395 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
396
397 swap(ring->pg_vec_len, req->nm_block_nr);
398 swap(ring->pg_vec_order, order);
399 swap(ring->pg_vec, pg_vec);
400
401 __skb_queue_purge(queue);
402 spin_unlock_bh(&queue->lock);
403
404 WARN_ON(atomic_read(&nlk->mapped));
405
406 if (pg_vec)
407 free_pg_vec(pg_vec, order, req->nm_block_nr);
408}
409
ccdfcc39 410static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
0470eb99 411 bool tx_ring)
ccdfcc39
PM
412{
413 struct netlink_sock *nlk = nlk_sk(sk);
414 struct netlink_ring *ring;
ccdfcc39
PM
415 void **pg_vec = NULL;
416 unsigned int order = 0;
ccdfcc39
PM
417
418 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
ccdfcc39 419
0470eb99
FW
420 if (atomic_read(&nlk->mapped))
421 return -EBUSY;
422 if (atomic_read(&ring->pending))
423 return -EBUSY;
ccdfcc39
PM
424
425 if (req->nm_block_nr) {
426 if (ring->pg_vec != NULL)
427 return -EBUSY;
428
429 if ((int)req->nm_block_size <= 0)
430 return -EINVAL;
74e83b23 431 if (!PAGE_ALIGNED(req->nm_block_size))
ccdfcc39
PM
432 return -EINVAL;
433 if (req->nm_frame_size < NL_MMAP_HDRLEN)
434 return -EINVAL;
435 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
436 return -EINVAL;
437
438 ring->frames_per_block = req->nm_block_size /
439 req->nm_frame_size;
440 if (ring->frames_per_block == 0)
441 return -EINVAL;
442 if (ring->frames_per_block * req->nm_block_nr !=
443 req->nm_frame_nr)
444 return -EINVAL;
445
446 order = get_order(req->nm_block_size);
447 pg_vec = alloc_pg_vec(nlk, req, order);
448 if (pg_vec == NULL)
449 return -ENOMEM;
450 } else {
451 if (req->nm_frame_nr)
452 return -EINVAL;
453 }
454
ccdfcc39 455 mutex_lock(&nlk->pg_vec_lock);
0470eb99
FW
456 if (atomic_read(&nlk->mapped) == 0) {
457 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
458 mutex_unlock(&nlk->pg_vec_lock);
459 return 0;
ccdfcc39 460 }
0470eb99 461
ccdfcc39
PM
462 mutex_unlock(&nlk->pg_vec_lock);
463
464 if (pg_vec)
465 free_pg_vec(pg_vec, order, req->nm_block_nr);
0470eb99
FW
466
467 return -EBUSY;
ccdfcc39
PM
468}
469
470static void netlink_mm_open(struct vm_area_struct *vma)
471{
472 struct file *file = vma->vm_file;
473 struct socket *sock = file->private_data;
474 struct sock *sk = sock->sk;
475
476 if (sk)
477 atomic_inc(&nlk_sk(sk)->mapped);
478}
479
480static void netlink_mm_close(struct vm_area_struct *vma)
481{
482 struct file *file = vma->vm_file;
483 struct socket *sock = file->private_data;
484 struct sock *sk = sock->sk;
485
486 if (sk)
487 atomic_dec(&nlk_sk(sk)->mapped);
488}
489
490static const struct vm_operations_struct netlink_mmap_ops = {
491 .open = netlink_mm_open,
492 .close = netlink_mm_close,
493};
494
495static int netlink_mmap(struct file *file, struct socket *sock,
496 struct vm_area_struct *vma)
497{
498 struct sock *sk = sock->sk;
499 struct netlink_sock *nlk = nlk_sk(sk);
500 struct netlink_ring *ring;
501 unsigned long start, size, expected;
502 unsigned int i;
503 int err = -EINVAL;
504
505 if (vma->vm_pgoff)
506 return -EINVAL;
507
508 mutex_lock(&nlk->pg_vec_lock);
509
510 expected = 0;
511 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
512 if (ring->pg_vec == NULL)
513 continue;
514 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
515 }
516
517 if (expected == 0)
518 goto out;
519
520 size = vma->vm_end - vma->vm_start;
521 if (size != expected)
522 goto out;
523
524 start = vma->vm_start;
525 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
526 if (ring->pg_vec == NULL)
527 continue;
528
529 for (i = 0; i < ring->pg_vec_len; i++) {
530 struct page *page;
531 void *kaddr = ring->pg_vec[i];
532 unsigned int pg_num;
533
534 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
535 page = pgvec_to_page(kaddr);
536 err = vm_insert_page(vma, start, page);
537 if (err < 0)
538 goto out;
539 start += PAGE_SIZE;
540 kaddr += PAGE_SIZE;
541 }
542 }
543 }
544
545 atomic_inc(&nlk->mapped);
546 vma->vm_ops = &netlink_mmap_ops;
547 err = 0;
548out:
549 mutex_unlock(&nlk->pg_vec_lock);
7cdbac71 550 return err;
ccdfcc39 551}
9652e931 552
4682a035 553static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
9652e931
PM
554{
555#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
556 struct page *p_start, *p_end;
557
558 /* First page is flushed through netlink_{get,set}_status */
559 p_start = pgvec_to_page(hdr + PAGE_SIZE);
4682a035 560 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
9652e931
PM
561 while (p_start <= p_end) {
562 flush_dcache_page(p_start);
563 p_start++;
564 }
565#endif
566}
567
568static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
569{
570 smp_rmb();
571 flush_dcache_page(pgvec_to_page(hdr));
572 return hdr->nm_status;
573}
574
575static void netlink_set_status(struct nl_mmap_hdr *hdr,
576 enum nl_mmap_status status)
577{
a18e6a18 578 smp_mb();
9652e931
PM
579 hdr->nm_status = status;
580 flush_dcache_page(pgvec_to_page(hdr));
9652e931
PM
581}
582
583static struct nl_mmap_hdr *
584__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
585{
586 unsigned int pg_vec_pos, frame_off;
587
588 pg_vec_pos = pos / ring->frames_per_block;
589 frame_off = pos % ring->frames_per_block;
590
591 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
592}
593
594static struct nl_mmap_hdr *
595netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
596 enum nl_mmap_status status)
597{
598 struct nl_mmap_hdr *hdr;
599
600 hdr = __netlink_lookup_frame(ring, pos);
601 if (netlink_get_status(hdr) != status)
602 return NULL;
603
604 return hdr;
605}
606
607static struct nl_mmap_hdr *
608netlink_current_frame(const struct netlink_ring *ring,
609 enum nl_mmap_status status)
610{
611 return netlink_lookup_frame(ring, ring->head, status);
612}
613
9652e931
PM
614static void netlink_increment_head(struct netlink_ring *ring)
615{
616 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
617}
618
619static void netlink_forward_ring(struct netlink_ring *ring)
620{
7084a315 621 unsigned int head = ring->head;
9652e931
PM
622 const struct nl_mmap_hdr *hdr;
623
624 do {
7084a315 625 hdr = __netlink_lookup_frame(ring, ring->head);
9652e931
PM
626 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
627 break;
628 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
629 break;
630 netlink_increment_head(ring);
631 } while (ring->head != head);
632}
633
0ef70770
KM
634static bool netlink_has_valid_frame(struct netlink_ring *ring)
635{
636 unsigned int head = ring->head, pos = head;
637 const struct nl_mmap_hdr *hdr;
638
639 do {
640 hdr = __netlink_lookup_frame(ring, pos);
641 if (hdr->nm_status == NL_MMAP_STATUS_VALID)
642 return true;
643 pos = pos != 0 ? pos - 1 : ring->frame_max;
644 } while (pos != head);
645
646 return false;
647}
648
cd1df525
PM
649static bool netlink_dump_space(struct netlink_sock *nlk)
650{
651 struct netlink_ring *ring = &nlk->rx_ring;
652 struct nl_mmap_hdr *hdr;
653 unsigned int n;
654
655 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
656 if (hdr == NULL)
657 return false;
658
659 n = ring->head + ring->frame_max / 2;
660 if (n > ring->frame_max)
661 n -= ring->frame_max;
662
663 hdr = __netlink_lookup_frame(ring, n);
664
665 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
666}
667
9652e931
PM
668static unsigned int netlink_poll(struct file *file, struct socket *sock,
669 poll_table *wait)
670{
671 struct sock *sk = sock->sk;
672 struct netlink_sock *nlk = nlk_sk(sk);
673 unsigned int mask;
cd1df525 674 int err;
9652e931 675
cd1df525
PM
676 if (nlk->rx_ring.pg_vec != NULL) {
677 /* Memory mapped sockets don't call recvmsg(), so flow control
678 * for dumps is performed here. A dump is allowed to continue
679 * if at least half the ring is unused.
680 */
16b304f3 681 while (nlk->cb_running && netlink_dump_space(nlk)) {
cd1df525
PM
682 err = netlink_dump(sk);
683 if (err < 0) {
ac30ef83 684 sk->sk_err = -err;
cd1df525
PM
685 sk->sk_error_report(sk);
686 break;
687 }
688 }
689 netlink_rcv_wake(sk);
690 }
5fd96123 691
9652e931
PM
692 mask = datagram_poll(file, sock, wait);
693
a66e3656
DB
694 /* We could already have received frames in the normal receive
695 * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
696 * so if mask contains pollin/etc already, there's no point
697 * walking the ring.
698 */
699 if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
700 spin_lock_bh(&sk->sk_receive_queue.lock);
701 if (nlk->rx_ring.pg_vec) {
702 if (netlink_has_valid_frame(&nlk->rx_ring))
703 mask |= POLLIN | POLLRDNORM;
704 }
705 spin_unlock_bh(&sk->sk_receive_queue.lock);
9652e931 706 }
9652e931
PM
707
708 spin_lock_bh(&sk->sk_write_queue.lock);
709 if (nlk->tx_ring.pg_vec) {
710 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
711 mask |= POLLOUT | POLLWRNORM;
712 }
713 spin_unlock_bh(&sk->sk_write_queue.lock);
714
715 return mask;
716}
717
718static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
719{
720 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
721}
722
723static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
724 struct netlink_ring *ring,
725 struct nl_mmap_hdr *hdr)
726{
727 unsigned int size;
728 void *data;
729
730 size = ring->frame_size - NL_MMAP_HDRLEN;
731 data = (void *)hdr + NL_MMAP_HDRLEN;
732
733 skb->head = data;
734 skb->data = data;
735 skb_reset_tail_pointer(skb);
736 skb->end = skb->tail + size;
737 skb->len = 0;
738
739 skb->destructor = netlink_skb_destructor;
740 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
741 NETLINK_CB(skb).sk = sk;
742}
5fd96123
PM
743
744static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
745 u32 dst_portid, u32 dst_group,
7cc05662 746 struct scm_cookie *scm)
5fd96123
PM
747{
748 struct netlink_sock *nlk = nlk_sk(sk);
749 struct netlink_ring *ring;
750 struct nl_mmap_hdr *hdr;
751 struct sk_buff *skb;
752 unsigned int maxlen;
5fd96123
PM
753 int err = 0, len = 0;
754
5fd96123
PM
755 mutex_lock(&nlk->pg_vec_lock);
756
757 ring = &nlk->tx_ring;
758 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
759
760 do {
4682a035
DM
761 unsigned int nm_len;
762
5fd96123
PM
763 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
764 if (hdr == NULL) {
765 if (!(msg->msg_flags & MSG_DONTWAIT) &&
766 atomic_read(&nlk->tx_ring.pending))
767 schedule();
768 continue;
769 }
4682a035
DM
770
771 nm_len = ACCESS_ONCE(hdr->nm_len);
772 if (nm_len > maxlen) {
5fd96123
PM
773 err = -EINVAL;
774 goto out;
775 }
776
4682a035 777 netlink_frame_flush_dcache(hdr, nm_len);
5fd96123 778
4682a035
DM
779 skb = alloc_skb(nm_len, GFP_KERNEL);
780 if (skb == NULL) {
781 err = -ENOBUFS;
782 goto out;
5fd96123 783 }
4682a035
DM
784 __skb_put(skb, nm_len);
785 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
786 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
5fd96123
PM
787
788 netlink_increment_head(ring);
789
790 NETLINK_CB(skb).portid = nlk->portid;
791 NETLINK_CB(skb).dst_group = dst_group;
7cc05662 792 NETLINK_CB(skb).creds = scm->creds;
5fd96123
PM
793
794 err = security_netlink_send(sk, skb);
795 if (err) {
796 kfree_skb(skb);
797 goto out;
798 }
799
800 if (unlikely(dst_group)) {
801 atomic_inc(&skb->users);
802 netlink_broadcast(sk, skb, dst_portid, dst_group,
803 GFP_KERNEL);
804 }
805 err = netlink_unicast(sk, skb, dst_portid,
806 msg->msg_flags & MSG_DONTWAIT);
807 if (err < 0)
808 goto out;
809 len += err;
810
811 } while (hdr != NULL ||
812 (!(msg->msg_flags & MSG_DONTWAIT) &&
813 atomic_read(&nlk->tx_ring.pending)));
814
815 if (len > 0)
816 err = len;
817out:
818 mutex_unlock(&nlk->pg_vec_lock);
819 return err;
820}
f9c22888
PM
821
822static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
823{
824 struct nl_mmap_hdr *hdr;
825
826 hdr = netlink_mmap_hdr(skb);
827 hdr->nm_len = skb->len;
828 hdr->nm_group = NETLINK_CB(skb).dst_group;
829 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
830 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
831 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
4682a035 832 netlink_frame_flush_dcache(hdr, hdr->nm_len);
f9c22888
PM
833 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
834
835 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
836 kfree_skb(skb);
837}
838
839static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
840{
841 struct netlink_sock *nlk = nlk_sk(sk);
842 struct netlink_ring *ring = &nlk->rx_ring;
843 struct nl_mmap_hdr *hdr;
844
845 spin_lock_bh(&sk->sk_receive_queue.lock);
846 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
847 if (hdr == NULL) {
848 spin_unlock_bh(&sk->sk_receive_queue.lock);
849 kfree_skb(skb);
cd1df525 850 netlink_overrun(sk);
f9c22888
PM
851 return;
852 }
853 netlink_increment_head(ring);
854 __skb_queue_tail(&sk->sk_receive_queue, skb);
855 spin_unlock_bh(&sk->sk_receive_queue.lock);
856
857 hdr->nm_len = skb->len;
858 hdr->nm_group = NETLINK_CB(skb).dst_group;
859 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
860 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
861 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
f9c22888
PM
862 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
863}
864
ccdfcc39 865#else /* CONFIG_NETLINK_MMAP */
f9c22888 866#define netlink_rx_is_mmaped(sk) false
5fd96123 867#define netlink_tx_is_mmaped(sk) false
ccdfcc39 868#define netlink_mmap sock_no_mmap
9652e931 869#define netlink_poll datagram_poll
7cc05662 870#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
ccdfcc39
PM
871#endif /* CONFIG_NETLINK_MMAP */
872
cf0a018a
PM
873static void netlink_skb_destructor(struct sk_buff *skb)
874{
9652e931
PM
875#ifdef CONFIG_NETLINK_MMAP
876 struct nl_mmap_hdr *hdr;
877 struct netlink_ring *ring;
878 struct sock *sk;
879
880 /* If a packet from the kernel to userspace was freed because of an
881 * error without being delivered to userspace, the kernel must reset
882 * the status. In the direction userspace to kernel, the status is
883 * always reset here after the packet was processed and freed.
884 */
885 if (netlink_skb_is_mmaped(skb)) {
886 hdr = netlink_mmap_hdr(skb);
887 sk = NETLINK_CB(skb).sk;
888
5fd96123
PM
889 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
890 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
891 ring = &nlk_sk(sk)->tx_ring;
892 } else {
893 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
894 hdr->nm_len = 0;
895 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
896 }
897 ring = &nlk_sk(sk)->rx_ring;
9652e931 898 }
9652e931
PM
899
900 WARN_ON(atomic_read(&ring->pending) == 0);
901 atomic_dec(&ring->pending);
902 sock_put(sk);
903
5e71d9d7 904 skb->head = NULL;
9652e931
PM
905 }
906#endif
c05cdb1b 907 if (is_vmalloc_addr(skb->head)) {
3a36515f
PN
908 if (!skb->cloned ||
909 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
910 vfree(skb->head);
911
c05cdb1b
PNA
912 skb->head = NULL;
913 }
9652e931
PM
914 if (skb->sk != NULL)
915 sock_rfree(skb);
cf0a018a
PM
916}
917
918static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
919{
920 WARN_ON(skb->sk != NULL);
921 skb->sk = sk;
922 skb->destructor = netlink_skb_destructor;
923 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
924 sk_mem_charge(sk, skb->truesize);
925}
926
1da177e4
LT
927static void netlink_sock_destruct(struct sock *sk)
928{
3f660d66
HX
929 struct netlink_sock *nlk = nlk_sk(sk);
930
16b304f3
PS
931 if (nlk->cb_running) {
932 if (nlk->cb.done)
933 nlk->cb.done(&nlk->cb);
6dc878a8 934
16b304f3
PS
935 module_put(nlk->cb.module);
936 kfree_skb(nlk->cb.skb);
3f660d66
HX
937 }
938
1da177e4 939 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
940#ifdef CONFIG_NETLINK_MMAP
941 if (1) {
942 struct nl_mmap_req req;
943
944 memset(&req, 0, sizeof(req));
945 if (nlk->rx_ring.pg_vec)
0470eb99 946 __netlink_set_ring(sk, &req, false, NULL, 0);
ccdfcc39
PM
947 memset(&req, 0, sizeof(req));
948 if (nlk->tx_ring.pg_vec)
0470eb99 949 __netlink_set_ring(sk, &req, true, NULL, 0);
ccdfcc39
PM
950 }
951#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
952
953 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 954 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
955 return;
956 }
547b792c
IJ
957
958 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
959 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
960 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
961}
962
6ac552fd
PM
963/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
964 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
965 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
966 * this, _but_ remember, it adds useless work on UP machines.
967 */
968
d136f1bd 969void netlink_table_grab(void)
9a429c49 970 __acquires(nl_table_lock)
1da177e4 971{
d136f1bd
JB
972 might_sleep();
973
6abd219c 974 write_lock_irq(&nl_table_lock);
1da177e4
LT
975
976 if (atomic_read(&nl_table_users)) {
977 DECLARE_WAITQUEUE(wait, current);
978
979 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 980 for (;;) {
1da177e4
LT
981 set_current_state(TASK_UNINTERRUPTIBLE);
982 if (atomic_read(&nl_table_users) == 0)
983 break;
6abd219c 984 write_unlock_irq(&nl_table_lock);
1da177e4 985 schedule();
6abd219c 986 write_lock_irq(&nl_table_lock);
1da177e4
LT
987 }
988
989 __set_current_state(TASK_RUNNING);
990 remove_wait_queue(&nl_table_wait, &wait);
991 }
992}
993
d136f1bd 994void netlink_table_ungrab(void)
9a429c49 995 __releases(nl_table_lock)
1da177e4 996{
6abd219c 997 write_unlock_irq(&nl_table_lock);
1da177e4
LT
998 wake_up(&nl_table_wait);
999}
1000
6ac552fd 1001static inline void
1da177e4
LT
1002netlink_lock_table(void)
1003{
1004 /* read_lock() synchronizes us to netlink_table_grab */
1005
1006 read_lock(&nl_table_lock);
1007 atomic_inc(&nl_table_users);
1008 read_unlock(&nl_table_lock);
1009}
1010
6ac552fd 1011static inline void
1da177e4
LT
1012netlink_unlock_table(void)
1013{
1014 if (atomic_dec_and_test(&nl_table_users))
1015 wake_up(&nl_table_wait);
1016}
1017
e341694e 1018struct netlink_compare_arg
1da177e4 1019{
c428ecd1 1020 possible_net_t pnet;
e341694e
TG
1021 u32 portid;
1022};
1da177e4 1023
8f2ddaac
HX
1024/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1025#define netlink_compare_arg_len \
1026 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
c428ecd1
HX
1027
1028static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1029 const void *ptr)
1da177e4 1030{
c428ecd1
HX
1031 const struct netlink_compare_arg *x = arg->key;
1032 const struct netlink_sock *nlk = ptr;
1da177e4 1033
c428ecd1
HX
1034 return nlk->portid != x->portid ||
1035 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1036}
1037
1038static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1039 struct net *net, u32 portid)
1040{
1041 memset(arg, 0, sizeof(*arg));
1042 write_pnet(&arg->pnet, net);
1043 arg->portid = portid;
1da177e4
LT
1044}
1045
e341694e
TG
1046static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1047 struct net *net)
1da177e4 1048{
c428ecd1 1049 struct netlink_compare_arg arg;
1da177e4 1050
c428ecd1
HX
1051 netlink_compare_arg_init(&arg, net, portid);
1052 return rhashtable_lookup_fast(&table->hash, &arg,
1053 netlink_rhashtable_params);
1da177e4
LT
1054}
1055
c428ecd1 1056static int __netlink_insert(struct netlink_table *table, struct sock *sk)
c5adde94 1057{
c428ecd1 1058 struct netlink_compare_arg arg;
c5adde94 1059
c428ecd1
HX
1060 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
1061 return rhashtable_lookup_insert_key(&table->hash, &arg,
1062 &nlk_sk(sk)->node,
1063 netlink_rhashtable_params);
c5adde94
YX
1064}
1065
e341694e 1066static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 1067{
e341694e
TG
1068 struct netlink_table *table = &nl_table[protocol];
1069 struct sock *sk;
1da177e4 1070
e341694e
TG
1071 rcu_read_lock();
1072 sk = __netlink_lookup(table, portid, net);
1073 if (sk)
1074 sock_hold(sk);
1075 rcu_read_unlock();
1da177e4 1076
e341694e 1077 return sk;
1da177e4
LT
1078}
1079
90ddc4f0 1080static const struct proto_ops netlink_ops;
1da177e4 1081
4277a083
PM
1082static void
1083netlink_update_listeners(struct sock *sk)
1084{
1085 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
1086 unsigned long mask;
1087 unsigned int i;
6d772ac5
ED
1088 struct listeners *listeners;
1089
1090 listeners = nl_deref_protected(tbl->listeners);
1091 if (!listeners)
1092 return;
4277a083 1093
b4ff4f04 1094 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 1095 mask = 0;
b67bfe0d 1096 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
1097 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1098 mask |= nlk_sk(sk)->groups[i];
1099 }
6d772ac5 1100 listeners->masks[i] = mask;
4277a083
PM
1101 }
1102 /* this function is only called with the netlink table "grabbed", which
1103 * makes sure updates are visible before bind or setsockopt return. */
1104}
1105
8ea65f4a 1106static int netlink_insert(struct sock *sk, u32 portid)
1da177e4 1107{
da12c90e 1108 struct netlink_table *table = &nl_table[sk->sk_protocol];
919d9db9 1109 int err;
1da177e4 1110
c5adde94 1111 lock_sock(sk);
1da177e4
LT
1112
1113 err = -EBUSY;
15e47304 1114 if (nlk_sk(sk)->portid)
1da177e4
LT
1115 goto err;
1116
1117 err = -ENOMEM;
97defe1e
TG
1118 if (BITS_PER_LONG > 32 &&
1119 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1da177e4
LT
1120 goto err;
1121
15e47304 1122 nlk_sk(sk)->portid = portid;
e341694e 1123 sock_hold(sk);
919d9db9 1124
c428ecd1
HX
1125 err = __netlink_insert(table, sk);
1126 if (err) {
4e7c1330
DB
1127 /* In case the hashtable backend returns with -EBUSY
1128 * from here, it must not escape to the caller.
1129 */
1130 if (unlikely(err == -EBUSY))
1131 err = -EOVERFLOW;
c428ecd1
HX
1132 if (err == -EEXIST)
1133 err = -EADDRINUSE;
c0bb07df 1134 nlk_sk(sk)->portid = 0;
c5adde94 1135 sock_put(sk);
919d9db9
HX
1136 }
1137
1da177e4 1138err:
c5adde94 1139 release_sock(sk);
1da177e4
LT
1140 return err;
1141}
1142
1143static void netlink_remove(struct sock *sk)
1144{
e341694e
TG
1145 struct netlink_table *table;
1146
e341694e 1147 table = &nl_table[sk->sk_protocol];
c428ecd1
HX
1148 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1149 netlink_rhashtable_params)) {
e341694e
TG
1150 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1151 __sock_put(sk);
1152 }
e341694e 1153
1da177e4 1154 netlink_table_grab();
b10dcb3b 1155 if (nlk_sk(sk)->subscriptions) {
1da177e4 1156 __sk_del_bind_node(sk);
b10dcb3b
JB
1157 netlink_update_listeners(sk);
1158 }
ee1c2442
JB
1159 if (sk->sk_protocol == NETLINK_GENERIC)
1160 atomic_inc(&genl_sk_destructing_cnt);
1da177e4
LT
1161 netlink_table_ungrab();
1162}
1163
1164static struct proto netlink_proto = {
1165 .name = "NETLINK",
1166 .owner = THIS_MODULE,
1167 .obj_size = sizeof(struct netlink_sock),
1168};
1169
1b8d7ae4 1170static int __netlink_create(struct net *net, struct socket *sock,
11aa9c28
EB
1171 struct mutex *cb_mutex, int protocol,
1172 int kern)
1da177e4
LT
1173{
1174 struct sock *sk;
1175 struct netlink_sock *nlk;
ab33a171
PM
1176
1177 sock->ops = &netlink_ops;
1178
11aa9c28 1179 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
ab33a171
PM
1180 if (!sk)
1181 return -ENOMEM;
1182
1183 sock_init_data(sock, sk);
1184
1185 nlk = nlk_sk(sk);
658cb354 1186 if (cb_mutex) {
ffa4d721 1187 nlk->cb_mutex = cb_mutex;
658cb354 1188 } else {
ffa4d721
PM
1189 nlk->cb_mutex = &nlk->cb_def_mutex;
1190 mutex_init(nlk->cb_mutex);
1191 }
ab33a171 1192 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
1193#ifdef CONFIG_NETLINK_MMAP
1194 mutex_init(&nlk->pg_vec_lock);
1195#endif
ab33a171
PM
1196
1197 sk->sk_destruct = netlink_sock_destruct;
1198 sk->sk_protocol = protocol;
1199 return 0;
1200}
1201
3f378b68
EP
1202static int netlink_create(struct net *net, struct socket *sock, int protocol,
1203 int kern)
ab33a171
PM
1204{
1205 struct module *module = NULL;
af65bdfc 1206 struct mutex *cb_mutex;
f7fa9b10 1207 struct netlink_sock *nlk;
023e2cfa
JB
1208 int (*bind)(struct net *net, int group);
1209 void (*unbind)(struct net *net, int group);
ab33a171 1210 int err = 0;
1da177e4
LT
1211
1212 sock->state = SS_UNCONNECTED;
1213
1214 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1215 return -ESOCKTNOSUPPORT;
1216
6ac552fd 1217 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
1218 return -EPROTONOSUPPORT;
1219
77247bbb 1220 netlink_lock_table();
95a5afca 1221#ifdef CONFIG_MODULES
ab33a171 1222 if (!nl_table[protocol].registered) {
77247bbb 1223 netlink_unlock_table();
4fdb3bb7 1224 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 1225 netlink_lock_table();
4fdb3bb7 1226 }
ab33a171
PM
1227#endif
1228 if (nl_table[protocol].registered &&
1229 try_module_get(nl_table[protocol].module))
1230 module = nl_table[protocol].module;
974c37e9
AD
1231 else
1232 err = -EPROTONOSUPPORT;
af65bdfc 1233 cb_mutex = nl_table[protocol].cb_mutex;
03292745 1234 bind = nl_table[protocol].bind;
4f520900 1235 unbind = nl_table[protocol].unbind;
77247bbb 1236 netlink_unlock_table();
4fdb3bb7 1237
974c37e9
AD
1238 if (err < 0)
1239 goto out;
1240
11aa9c28 1241 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
6ac552fd 1242 if (err < 0)
f7fa9b10
PM
1243 goto out_module;
1244
6f756a8c 1245 local_bh_disable();
c1fd3b94 1246 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
1247 local_bh_enable();
1248
f7fa9b10 1249 nlk = nlk_sk(sock->sk);
f7fa9b10 1250 nlk->module = module;
03292745 1251 nlk->netlink_bind = bind;
4f520900 1252 nlk->netlink_unbind = unbind;
ab33a171
PM
1253out:
1254 return err;
1da177e4 1255
ab33a171
PM
1256out_module:
1257 module_put(module);
1258 goto out;
1da177e4
LT
1259}
1260
21e4902a
TG
1261static void deferred_put_nlk_sk(struct rcu_head *head)
1262{
1263 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1264
1265 sock_put(&nlk->sk);
1266}
1267
1da177e4
LT
1268static int netlink_release(struct socket *sock)
1269{
1270 struct sock *sk = sock->sk;
1271 struct netlink_sock *nlk;
1272
1273 if (!sk)
1274 return 0;
1275
1276 netlink_remove(sk);
ac57b3a9 1277 sock_orphan(sk);
1da177e4
LT
1278 nlk = nlk_sk(sk);
1279
3f660d66
HX
1280 /*
1281 * OK. Socket is unlinked, any packets that arrive now
1282 * will be purged.
1283 */
1da177e4 1284
ee1c2442
JB
1285 /* must not acquire netlink_table_lock in any way again before unbind
1286 * and notifying genetlink is done as otherwise it might deadlock
1287 */
1288 if (nlk->netlink_unbind) {
1289 int i;
1290
1291 for (i = 0; i < nlk->ngroups; i++)
1292 if (test_bit(i, nlk->groups))
1293 nlk->netlink_unbind(sock_net(sk), i + 1);
1294 }
1295 if (sk->sk_protocol == NETLINK_GENERIC &&
1296 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1297 wake_up(&genl_sk_destructing_waitq);
1298
1da177e4
LT
1299 sock->sk = NULL;
1300 wake_up_interruptible_all(&nlk->wait);
1301
1302 skb_queue_purge(&sk->sk_write_queue);
1303
15e47304 1304 if (nlk->portid) {
1da177e4 1305 struct netlink_notify n = {
3b1e0a65 1306 .net = sock_net(sk),
1da177e4 1307 .protocol = sk->sk_protocol,
15e47304 1308 .portid = nlk->portid,
1da177e4 1309 };
e041c683
AS
1310 atomic_notifier_call_chain(&netlink_chain,
1311 NETLINK_URELEASE, &n);
746fac4d 1312 }
4fdb3bb7 1313
5e7c001c 1314 module_put(nlk->module);
4fdb3bb7 1315
aed81560 1316 if (netlink_is_kernel(sk)) {
b10dcb3b 1317 netlink_table_grab();
869e58f8
DL
1318 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1319 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1320 struct listeners *old;
1321
1322 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1323 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1324 kfree_rcu(old, rcu);
869e58f8 1325 nl_table[sk->sk_protocol].module = NULL;
9785e10a 1326 nl_table[sk->sk_protocol].bind = NULL;
4f520900 1327 nl_table[sk->sk_protocol].unbind = NULL;
9785e10a 1328 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1329 nl_table[sk->sk_protocol].registered = 0;
1330 }
b10dcb3b 1331 netlink_table_ungrab();
658cb354 1332 }
77247bbb 1333
f7fa9b10
PM
1334 kfree(nlk->groups);
1335 nlk->groups = NULL;
1336
3755810c 1337 local_bh_disable();
c1fd3b94 1338 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1339 local_bh_enable();
21e4902a 1340 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1da177e4
LT
1341 return 0;
1342}
1343
1344static int netlink_autobind(struct socket *sock)
1345{
1346 struct sock *sk = sock->sk;
3b1e0a65 1347 struct net *net = sock_net(sk);
da12c90e 1348 struct netlink_table *table = &nl_table[sk->sk_protocol];
15e47304 1349 s32 portid = task_tgid_vnr(current);
1da177e4 1350 int err;
b9fbe709
HX
1351 s32 rover = -4096;
1352 bool ok;
1da177e4
LT
1353
1354retry:
1355 cond_resched();
e341694e 1356 rcu_read_lock();
b9fbe709
HX
1357 ok = !__netlink_lookup(table, portid, net);
1358 rcu_read_unlock();
1359 if (!ok) {
e341694e 1360 /* Bind collision, search negative portid values. */
b9fbe709
HX
1361 if (rover == -4096)
1362 /* rover will be in range [S32_MIN, -4097] */
1363 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1364 else if (rover >= -4096)
e341694e 1365 rover = -4097;
b9fbe709 1366 portid = rover--;
e341694e 1367 goto retry;
1da177e4 1368 }
1da177e4 1369
8ea65f4a 1370 err = netlink_insert(sk, portid);
1da177e4
LT
1371 if (err == -EADDRINUSE)
1372 goto retry;
d470e3b4
DM
1373
1374 /* If 2 threads race to autobind, that is fine. */
1375 if (err == -EBUSY)
1376 err = 0;
1377
1378 return err;
1da177e4
LT
1379}
1380
aa4cf945
EB
1381/**
1382 * __netlink_ns_capable - General netlink message capability test
1383 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1384 * @user_ns: The user namespace of the capability to use
1385 * @cap: The capability to use
1386 *
1387 * Test to see if the opener of the socket we received the message
1388 * from had when the netlink socket was created and the sender of the
1389 * message has has the capability @cap in the user namespace @user_ns.
1390 */
1391bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1392 struct user_namespace *user_ns, int cap)
1393{
2d7a85f4
EB
1394 return ((nsp->flags & NETLINK_SKB_DST) ||
1395 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1396 ns_capable(user_ns, cap);
aa4cf945
EB
1397}
1398EXPORT_SYMBOL(__netlink_ns_capable);
1399
1400/**
1401 * netlink_ns_capable - General netlink message capability test
1402 * @skb: socket buffer holding a netlink command from userspace
1403 * @user_ns: The user namespace of the capability to use
1404 * @cap: The capability to use
1405 *
1406 * Test to see if the opener of the socket we received the message
1407 * from had when the netlink socket was created and the sender of the
1408 * message has has the capability @cap in the user namespace @user_ns.
1409 */
1410bool netlink_ns_capable(const struct sk_buff *skb,
1411 struct user_namespace *user_ns, int cap)
1412{
1413 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1414}
1415EXPORT_SYMBOL(netlink_ns_capable);
1416
1417/**
1418 * netlink_capable - Netlink global message capability test
1419 * @skb: socket buffer holding a netlink command from userspace
1420 * @cap: The capability to use
1421 *
1422 * Test to see if the opener of the socket we received the message
1423 * from had when the netlink socket was created and the sender of the
1424 * message has has the capability @cap in all user namespaces.
1425 */
1426bool netlink_capable(const struct sk_buff *skb, int cap)
1427{
1428 return netlink_ns_capable(skb, &init_user_ns, cap);
1429}
1430EXPORT_SYMBOL(netlink_capable);
1431
1432/**
1433 * netlink_net_capable - Netlink network namespace message capability test
1434 * @skb: socket buffer holding a netlink command from userspace
1435 * @cap: The capability to use
1436 *
1437 * Test to see if the opener of the socket we received the message
1438 * from had when the netlink socket was created and the sender of the
1439 * message has has the capability @cap over the network namespace of
1440 * the socket we received the message from.
1441 */
1442bool netlink_net_capable(const struct sk_buff *skb, int cap)
1443{
1444 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1445}
1446EXPORT_SYMBOL(netlink_net_capable);
1447
5187cd05 1448static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
746fac4d 1449{
9785e10a 1450 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1451 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1452}
1da177e4 1453
f7fa9b10
PM
1454static void
1455netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1456{
1457 struct netlink_sock *nlk = nlk_sk(sk);
1458
1459 if (nlk->subscriptions && !subscriptions)
1460 __sk_del_bind_node(sk);
1461 else if (!nlk->subscriptions && subscriptions)
1462 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1463 nlk->subscriptions = subscriptions;
1464}
1465
b4ff4f04 1466static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1467{
1468 struct netlink_sock *nlk = nlk_sk(sk);
1469 unsigned int groups;
b4ff4f04 1470 unsigned long *new_groups;
513c2500
PM
1471 int err = 0;
1472
b4ff4f04
JB
1473 netlink_table_grab();
1474
513c2500 1475 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1476 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1477 err = -ENOENT;
b4ff4f04
JB
1478 goto out_unlock;
1479 }
513c2500 1480
b4ff4f04
JB
1481 if (nlk->ngroups >= groups)
1482 goto out_unlock;
513c2500 1483
b4ff4f04
JB
1484 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1485 if (new_groups == NULL) {
1486 err = -ENOMEM;
1487 goto out_unlock;
1488 }
6ac552fd 1489 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1490 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1491
1492 nlk->groups = new_groups;
513c2500 1493 nlk->ngroups = groups;
b4ff4f04
JB
1494 out_unlock:
1495 netlink_table_ungrab();
1496 return err;
513c2500
PM
1497}
1498
02c81ab9 1499static void netlink_undo_bind(int group, long unsigned int groups,
023e2cfa 1500 struct sock *sk)
4f520900 1501{
023e2cfa 1502 struct netlink_sock *nlk = nlk_sk(sk);
4f520900
RGB
1503 int undo;
1504
1505 if (!nlk->netlink_unbind)
1506 return;
1507
1508 for (undo = 0; undo < group; undo++)
6251edd9 1509 if (test_bit(undo, &groups))
8b7c36d8 1510 nlk->netlink_unbind(sock_net(sk), undo + 1);
4f520900
RGB
1511}
1512
6ac552fd
PM
1513static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1514 int addr_len)
1da177e4
LT
1515{
1516 struct sock *sk = sock->sk;
3b1e0a65 1517 struct net *net = sock_net(sk);
1da177e4
LT
1518 struct netlink_sock *nlk = nlk_sk(sk);
1519 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1520 int err;
4f520900 1521 long unsigned int groups = nladdr->nl_groups;
746fac4d 1522
4e4b5376
HFS
1523 if (addr_len < sizeof(struct sockaddr_nl))
1524 return -EINVAL;
1525
1da177e4
LT
1526 if (nladdr->nl_family != AF_NETLINK)
1527 return -EINVAL;
1528
1529 /* Only superuser is allowed to listen multicasts */
4f520900 1530 if (groups) {
5187cd05 1531 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1532 return -EPERM;
b4ff4f04
JB
1533 err = netlink_realloc_groups(sk);
1534 if (err)
1535 return err;
513c2500 1536 }
1da177e4 1537
4f520900 1538 if (nlk->portid)
15e47304 1539 if (nladdr->nl_pid != nlk->portid)
1da177e4 1540 return -EINVAL;
4f520900
RGB
1541
1542 if (nlk->netlink_bind && groups) {
1543 int group;
1544
1545 for (group = 0; group < nlk->ngroups; group++) {
1546 if (!test_bit(group, &groups))
1547 continue;
8b7c36d8 1548 err = nlk->netlink_bind(net, group + 1);
4f520900
RGB
1549 if (!err)
1550 continue;
023e2cfa 1551 netlink_undo_bind(group, groups, sk);
4f520900
RGB
1552 return err;
1553 }
1554 }
1555
1556 if (!nlk->portid) {
1da177e4 1557 err = nladdr->nl_pid ?
8ea65f4a 1558 netlink_insert(sk, nladdr->nl_pid) :
1da177e4 1559 netlink_autobind(sock);
4f520900 1560 if (err) {
023e2cfa 1561 netlink_undo_bind(nlk->ngroups, groups, sk);
1da177e4 1562 return err;
4f520900 1563 }
1da177e4
LT
1564 }
1565
4f520900 1566 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1567 return 0;
1568
1569 netlink_table_grab();
f7fa9b10 1570 netlink_update_subscriptions(sk, nlk->subscriptions +
4f520900 1571 hweight32(groups) -
746fac4d 1572 hweight32(nlk->groups[0]));
4f520900 1573 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
4277a083 1574 netlink_update_listeners(sk);
1da177e4
LT
1575 netlink_table_ungrab();
1576
1577 return 0;
1578}
1579
1580static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1581 int alen, int flags)
1582{
1583 int err = 0;
1584 struct sock *sk = sock->sk;
1585 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1586 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1587
6503d961
CG
1588 if (alen < sizeof(addr->sa_family))
1589 return -EINVAL;
1590
1da177e4
LT
1591 if (addr->sa_family == AF_UNSPEC) {
1592 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1593 nlk->dst_portid = 0;
d629b836 1594 nlk->dst_group = 0;
1da177e4
LT
1595 return 0;
1596 }
1597 if (addr->sa_family != AF_NETLINK)
1598 return -EINVAL;
1599
46833a86 1600 if ((nladdr->nl_groups || nladdr->nl_pid) &&
5187cd05 1601 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1602 return -EPERM;
1603
15e47304 1604 if (!nlk->portid)
1da177e4
LT
1605 err = netlink_autobind(sock);
1606
1607 if (err == 0) {
1608 sk->sk_state = NETLINK_CONNECTED;
15e47304 1609 nlk->dst_portid = nladdr->nl_pid;
d629b836 1610 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1611 }
1612
1613 return err;
1614}
1615
6ac552fd
PM
1616static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1617 int *addr_len, int peer)
1da177e4
LT
1618{
1619 struct sock *sk = sock->sk;
1620 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1621 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1622
1da177e4
LT
1623 nladdr->nl_family = AF_NETLINK;
1624 nladdr->nl_pad = 0;
1625 *addr_len = sizeof(*nladdr);
1626
1627 if (peer) {
15e47304 1628 nladdr->nl_pid = nlk->dst_portid;
d629b836 1629 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1630 } else {
15e47304 1631 nladdr->nl_pid = nlk->portid;
513c2500 1632 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1633 }
1634 return 0;
1635}
1636
15e47304 1637static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1638{
1da177e4
LT
1639 struct sock *sock;
1640 struct netlink_sock *nlk;
1641
15e47304 1642 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1643 if (!sock)
1644 return ERR_PTR(-ECONNREFUSED);
1645
1646 /* Don't bother queuing skb if kernel socket has no input function */
1647 nlk = nlk_sk(sock);
cd40b7d3 1648 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1649 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1650 sock_put(sock);
1651 return ERR_PTR(-ECONNREFUSED);
1652 }
1653 return sock;
1654}
1655
1656struct sock *netlink_getsockbyfilp(struct file *filp)
1657{
496ad9aa 1658 struct inode *inode = file_inode(filp);
1da177e4
LT
1659 struct sock *sock;
1660
1661 if (!S_ISSOCK(inode->i_mode))
1662 return ERR_PTR(-ENOTSOCK);
1663
1664 sock = SOCKET_I(inode)->sk;
1665 if (sock->sk_family != AF_NETLINK)
1666 return ERR_PTR(-EINVAL);
1667
1668 sock_hold(sock);
1669 return sock;
1670}
1671
3a36515f
PN
1672static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1673 int broadcast)
c05cdb1b
PNA
1674{
1675 struct sk_buff *skb;
1676 void *data;
1677
3a36515f 1678 if (size <= NLMSG_GOODSIZE || broadcast)
c05cdb1b
PNA
1679 return alloc_skb(size, GFP_KERNEL);
1680
3a36515f
PN
1681 size = SKB_DATA_ALIGN(size) +
1682 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c05cdb1b
PNA
1683
1684 data = vmalloc(size);
1685 if (data == NULL)
3a36515f 1686 return NULL;
c05cdb1b 1687
2ea2f62c 1688 skb = __build_skb(data, size);
3a36515f
PN
1689 if (skb == NULL)
1690 vfree(data);
2ea2f62c 1691 else
3a36515f 1692 skb->destructor = netlink_skb_destructor;
c05cdb1b
PNA
1693
1694 return skb;
c05cdb1b
PNA
1695}
1696
1da177e4
LT
1697/*
1698 * Attach a skb to a netlink socket.
1699 * The caller must hold a reference to the destination socket. On error, the
1700 * reference is dropped. The skb is not send to the destination, just all
1701 * all error checks are performed and memory in the queue is reserved.
1702 * Return values:
1703 * < 0: error. skb freed, reference to sock dropped.
1704 * 0: continue
1705 * 1: repeat lookup - reference dropped while waiting for socket memory.
1706 */
9457afee 1707int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1708 long *timeo, struct sock *ssk)
1da177e4
LT
1709{
1710 struct netlink_sock *nlk;
1711
1712 nlk = nlk_sk(sk);
1713
5fd96123 1714 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cc3a572f 1715 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
5fd96123 1716 !netlink_skb_is_mmaped(skb)) {
1da177e4 1717 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1718 if (!*timeo) {
aed81560 1719 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1720 netlink_overrun(sk);
1721 sock_put(sk);
1722 kfree_skb(skb);
1723 return -EAGAIN;
1724 }
1725
1726 __set_current_state(TASK_INTERRUPTIBLE);
1727 add_wait_queue(&nlk->wait, &wait);
1728
1729 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cc3a572f 1730 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1da177e4 1731 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1732 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1733
1734 __set_current_state(TASK_RUNNING);
1735 remove_wait_queue(&nlk->wait, &wait);
1736 sock_put(sk);
1737
1738 if (signal_pending(current)) {
1739 kfree_skb(skb);
c3d8d1e3 1740 return sock_intr_errno(*timeo);
1da177e4
LT
1741 }
1742 return 1;
1743 }
cf0a018a 1744 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1745 return 0;
1746}
1747
4a7e7c2a 1748static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1749{
1da177e4
LT
1750 int len = skb->len;
1751
bcbde0d4
DB
1752 netlink_deliver_tap(skb);
1753
f9c22888
PM
1754#ifdef CONFIG_NETLINK_MMAP
1755 if (netlink_skb_is_mmaped(skb))
1756 netlink_queue_mmaped_skb(sk, skb);
1757 else if (netlink_rx_is_mmaped(sk))
1758 netlink_ring_set_copied(sk, skb);
1759 else
1760#endif /* CONFIG_NETLINK_MMAP */
1761 skb_queue_tail(&sk->sk_receive_queue, skb);
676d2369 1762 sk->sk_data_ready(sk);
4a7e7c2a
ED
1763 return len;
1764}
1765
1766int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1767{
1768 int len = __netlink_sendskb(sk, skb);
1769
1da177e4
LT
1770 sock_put(sk);
1771 return len;
1772}
1773
1774void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1775{
1776 kfree_skb(skb);
1777 sock_put(sk);
1778}
1779
b57ef81f 1780static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1781{
1782 int delta;
1783
1298ca46 1784 WARN_ON(skb->sk != NULL);
5fd96123
PM
1785 if (netlink_skb_is_mmaped(skb))
1786 return skb;
1da177e4 1787
4305b541 1788 delta = skb->end - skb->tail;
c05cdb1b 1789 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1da177e4
LT
1790 return skb;
1791
1792 if (skb_shared(skb)) {
1793 struct sk_buff *nskb = skb_clone(skb, allocation);
1794 if (!nskb)
1795 return skb;
8460c00f 1796 consume_skb(skb);
1da177e4
LT
1797 skb = nskb;
1798 }
1799
1800 if (!pskb_expand_head(skb, 0, -delta, allocation))
1801 skb->truesize -= delta;
1802
1803 return skb;
1804}
1805
3fbc2905
EB
1806static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1807 struct sock *ssk)
cd40b7d3
DL
1808{
1809 int ret;
1810 struct netlink_sock *nlk = nlk_sk(sk);
1811
1812 ret = -ECONNREFUSED;
1813 if (nlk->netlink_rcv != NULL) {
1814 ret = skb->len;
cf0a018a 1815 netlink_skb_set_owner_r(skb, sk);
e32123e5 1816 NETLINK_CB(skb).sk = ssk;
73bfd370 1817 netlink_deliver_tap_kernel(sk, ssk, skb);
cd40b7d3 1818 nlk->netlink_rcv(skb);
bfb253c9
ED
1819 consume_skb(skb);
1820 } else {
1821 kfree_skb(skb);
cd40b7d3 1822 }
cd40b7d3
DL
1823 sock_put(sk);
1824 return ret;
1825}
1826
1827int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1828 u32 portid, int nonblock)
1da177e4
LT
1829{
1830 struct sock *sk;
1831 int err;
1832 long timeo;
1833
1834 skb = netlink_trim(skb, gfp_any());
1835
1836 timeo = sock_sndtimeo(ssk, nonblock);
1837retry:
15e47304 1838 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1839 if (IS_ERR(sk)) {
1840 kfree_skb(skb);
1841 return PTR_ERR(sk);
1842 }
cd40b7d3 1843 if (netlink_is_kernel(sk))
3fbc2905 1844 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1845
b1153f29 1846 if (sk_filter(sk, skb)) {
84874607 1847 err = skb->len;
b1153f29
SH
1848 kfree_skb(skb);
1849 sock_put(sk);
1850 return err;
1851 }
1852
9457afee 1853 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1854 if (err == 1)
1855 goto retry;
1856 if (err)
1857 return err;
1858
7ee015e0 1859 return netlink_sendskb(sk, skb);
1da177e4 1860}
6ac552fd 1861EXPORT_SYMBOL(netlink_unicast);
1da177e4 1862
6bb0fef4
DB
1863struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
1864 unsigned int ldiff, u32 dst_portid,
1865 gfp_t gfp_mask)
f9c22888
PM
1866{
1867#ifdef CONFIG_NETLINK_MMAP
6bb0fef4 1868 unsigned int maxlen, linear_size;
f9c22888
PM
1869 struct sock *sk = NULL;
1870 struct sk_buff *skb;
1871 struct netlink_ring *ring;
1872 struct nl_mmap_hdr *hdr;
f9c22888
PM
1873
1874 sk = netlink_getsockbyportid(ssk, dst_portid);
1875 if (IS_ERR(sk))
1876 goto out;
1877
1878 ring = &nlk_sk(sk)->rx_ring;
1879 /* fast-path without atomic ops for common case: non-mmaped receiver */
1880 if (ring->pg_vec == NULL)
1881 goto out_put;
1882
6bb0fef4
DB
1883 /* We need to account the full linear size needed as a ring
1884 * slot cannot have non-linear parts.
1885 */
1886 linear_size = size + ldiff;
1887 if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
aae9f0e2
TG
1888 goto out_put;
1889
f9c22888
PM
1890 skb = alloc_skb_head(gfp_mask);
1891 if (skb == NULL)
1892 goto err1;
1893
1894 spin_lock_bh(&sk->sk_receive_queue.lock);
1895 /* check again under lock */
1896 if (ring->pg_vec == NULL)
1897 goto out_free;
1898
aae9f0e2 1899 /* check again under lock */
f9c22888 1900 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
6bb0fef4 1901 if (maxlen < linear_size)
f9c22888
PM
1902 goto out_free;
1903
1904 netlink_forward_ring(ring);
1905 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1906 if (hdr == NULL)
1907 goto err2;
6bb0fef4 1908
f9c22888
PM
1909 netlink_ring_setup_skb(skb, sk, ring, hdr);
1910 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1911 atomic_inc(&ring->pending);
1912 netlink_increment_head(ring);
1913
1914 spin_unlock_bh(&sk->sk_receive_queue.lock);
1915 return skb;
1916
1917err2:
1918 kfree_skb(skb);
1919 spin_unlock_bh(&sk->sk_receive_queue.lock);
cd1df525 1920 netlink_overrun(sk);
f9c22888
PM
1921err1:
1922 sock_put(sk);
1923 return NULL;
1924
1925out_free:
1926 kfree_skb(skb);
1927 spin_unlock_bh(&sk->sk_receive_queue.lock);
1928out_put:
1929 sock_put(sk);
1930out:
1931#endif
1932 return alloc_skb(size, gfp_mask);
1933}
6bb0fef4 1934EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
f9c22888 1935
4277a083
PM
1936int netlink_has_listeners(struct sock *sk, unsigned int group)
1937{
1938 int res = 0;
5c398dc8 1939 struct listeners *listeners;
4277a083 1940
aed81560 1941 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1942
1943 rcu_read_lock();
1944 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1945
6d772ac5 1946 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1947 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1948
1949 rcu_read_unlock();
1950
4277a083
PM
1951 return res;
1952}
1953EXPORT_SYMBOL_GPL(netlink_has_listeners);
1954
b57ef81f 1955static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1956{
1957 struct netlink_sock *nlk = nlk_sk(sk);
1958
1959 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cc3a572f 1960 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
cf0a018a 1961 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1962 __netlink_sendskb(sk, skb);
2c645800 1963 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1964 }
1965 return -1;
1966}
1967
1968struct netlink_broadcast_data {
1969 struct sock *exclude_sk;
b4b51029 1970 struct net *net;
15e47304 1971 u32 portid;
1da177e4
LT
1972 u32 group;
1973 int failure;
ff491a73 1974 int delivery_failure;
1da177e4
LT
1975 int congested;
1976 int delivered;
7d877f3b 1977 gfp_t allocation;
1da177e4 1978 struct sk_buff *skb, *skb2;
910a7e90
EB
1979 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1980 void *tx_data;
1da177e4
LT
1981};
1982
46c9521f
RR
1983static void do_one_broadcast(struct sock *sk,
1984 struct netlink_broadcast_data *p)
1da177e4
LT
1985{
1986 struct netlink_sock *nlk = nlk_sk(sk);
1987 int val;
1988
1989 if (p->exclude_sk == sk)
46c9521f 1990 return;
1da177e4 1991
15e47304 1992 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1993 !test_bit(p->group - 1, nlk->groups))
46c9521f 1994 return;
1da177e4 1995
59324cf3
ND
1996 if (!net_eq(sock_net(sk), p->net)) {
1997 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
1998 return;
1999
2000 if (!peernet_has_id(sock_net(sk), p->net))
2001 return;
2002
2003 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
2004 CAP_NET_BROADCAST))
2005 return;
2006 }
b4b51029 2007
1da177e4
LT
2008 if (p->failure) {
2009 netlink_overrun(sk);
46c9521f 2010 return;
1da177e4
LT
2011 }
2012
2013 sock_hold(sk);
2014 if (p->skb2 == NULL) {
68acc024 2015 if (skb_shared(p->skb)) {
1da177e4
LT
2016 p->skb2 = skb_clone(p->skb, p->allocation);
2017 } else {
68acc024
TC
2018 p->skb2 = skb_get(p->skb);
2019 /*
2020 * skb ownership may have been set when
2021 * delivered to a previous socket.
2022 */
2023 skb_orphan(p->skb2);
1da177e4
LT
2024 }
2025 }
2026 if (p->skb2 == NULL) {
2027 netlink_overrun(sk);
2028 /* Clone failed. Notify ALL listeners. */
2029 p->failure = 1;
cc3a572f 2030 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
be0c22a4 2031 p->delivery_failure = 1;
59324cf3
ND
2032 goto out;
2033 }
2034 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
910a7e90
EB
2035 kfree_skb(p->skb2);
2036 p->skb2 = NULL;
59324cf3
ND
2037 goto out;
2038 }
2039 if (sk_filter(sk, p->skb2)) {
b1153f29
SH
2040 kfree_skb(p->skb2);
2041 p->skb2 = NULL;
59324cf3
ND
2042 goto out;
2043 }
2044 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
2045 NETLINK_CB(p->skb2).nsid_is_set = true;
2046 val = netlink_broadcast_deliver(sk, p->skb2);
2047 if (val < 0) {
1da177e4 2048 netlink_overrun(sk);
cc3a572f 2049 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
be0c22a4 2050 p->delivery_failure = 1;
1da177e4
LT
2051 } else {
2052 p->congested |= val;
2053 p->delivered = 1;
2054 p->skb2 = NULL;
2055 }
59324cf3 2056out:
1da177e4 2057 sock_put(sk);
1da177e4
LT
2058}
2059
15e47304 2060int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2061 u32 group, gfp_t allocation,
2062 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2063 void *filter_data)
1da177e4 2064{
3b1e0a65 2065 struct net *net = sock_net(ssk);
1da177e4 2066 struct netlink_broadcast_data info;
1da177e4
LT
2067 struct sock *sk;
2068
2069 skb = netlink_trim(skb, allocation);
2070
2071 info.exclude_sk = ssk;
b4b51029 2072 info.net = net;
15e47304 2073 info.portid = portid;
1da177e4
LT
2074 info.group = group;
2075 info.failure = 0;
ff491a73 2076 info.delivery_failure = 0;
1da177e4
LT
2077 info.congested = 0;
2078 info.delivered = 0;
2079 info.allocation = allocation;
2080 info.skb = skb;
2081 info.skb2 = NULL;
910a7e90
EB
2082 info.tx_filter = filter;
2083 info.tx_data = filter_data;
1da177e4
LT
2084
2085 /* While we sleep in clone, do not allow to change socket list */
2086
2087 netlink_lock_table();
2088
b67bfe0d 2089 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
2090 do_one_broadcast(sk, &info);
2091
70d4bf6d 2092 consume_skb(skb);
aa1c6a6f 2093
1da177e4
LT
2094 netlink_unlock_table();
2095
70d4bf6d
NH
2096 if (info.delivery_failure) {
2097 kfree_skb(info.skb2);
ff491a73 2098 return -ENOBUFS;
658cb354
ED
2099 }
2100 consume_skb(info.skb2);
ff491a73 2101
1da177e4
LT
2102 if (info.delivered) {
2103 if (info.congested && (allocation & __GFP_WAIT))
2104 yield();
2105 return 0;
2106 }
1da177e4
LT
2107 return -ESRCH;
2108}
910a7e90
EB
2109EXPORT_SYMBOL(netlink_broadcast_filtered);
2110
15e47304 2111int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2112 u32 group, gfp_t allocation)
2113{
15e47304 2114 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
2115 NULL, NULL);
2116}
6ac552fd 2117EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
2118
2119struct netlink_set_err_data {
2120 struct sock *exclude_sk;
15e47304 2121 u32 portid;
1da177e4
LT
2122 u32 group;
2123 int code;
2124};
2125
b57ef81f 2126static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
2127{
2128 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 2129 int ret = 0;
1da177e4
LT
2130
2131 if (sk == p->exclude_sk)
2132 goto out;
2133
09ad9bc7 2134 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
2135 goto out;
2136
15e47304 2137 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 2138 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
2139 goto out;
2140
cc3a572f 2141 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1a50307b
PNA
2142 ret = 1;
2143 goto out;
2144 }
2145
1da177e4
LT
2146 sk->sk_err = p->code;
2147 sk->sk_error_report(sk);
2148out:
1a50307b 2149 return ret;
1da177e4
LT
2150}
2151
4843b93c
PNA
2152/**
2153 * netlink_set_err - report error to broadcast listeners
2154 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 2155 * @portid: the PORTID of a process that we want to skip (if any)
840e93f2 2156 * @group: the broadcast group that will notice the error
4843b93c 2157 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
2158 *
2159 * This function returns the number of broadcast listeners that have set the
cc3a572f 2160 * NETLINK_NO_ENOBUFS socket option.
4843b93c 2161 */
15e47304 2162int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
2163{
2164 struct netlink_set_err_data info;
1da177e4 2165 struct sock *sk;
1a50307b 2166 int ret = 0;
1da177e4
LT
2167
2168 info.exclude_sk = ssk;
15e47304 2169 info.portid = portid;
1da177e4 2170 info.group = group;
4843b93c
PNA
2171 /* sk->sk_err wants a positive error value */
2172 info.code = -code;
1da177e4
LT
2173
2174 read_lock(&nl_table_lock);
2175
b67bfe0d 2176 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 2177 ret += do_one_set_err(sk, &info);
1da177e4
LT
2178
2179 read_unlock(&nl_table_lock);
1a50307b 2180 return ret;
1da177e4 2181}
dd5b6ce6 2182EXPORT_SYMBOL(netlink_set_err);
1da177e4 2183
84659eb5
JB
2184/* must be called with netlink table grabbed */
2185static void netlink_update_socket_mc(struct netlink_sock *nlk,
2186 unsigned int group,
2187 int is_new)
2188{
2189 int old, new = !!is_new, subscriptions;
2190
2191 old = test_bit(group - 1, nlk->groups);
2192 subscriptions = nlk->subscriptions - old + new;
2193 if (new)
2194 __set_bit(group - 1, nlk->groups);
2195 else
2196 __clear_bit(group - 1, nlk->groups);
2197 netlink_update_subscriptions(&nlk->sk, subscriptions);
2198 netlink_update_listeners(&nlk->sk);
2199}
2200
9a4595bc 2201static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 2202 char __user *optval, unsigned int optlen)
9a4595bc
PM
2203{
2204 struct sock *sk = sock->sk;
2205 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
2206 unsigned int val = 0;
2207 int err;
9a4595bc
PM
2208
2209 if (level != SOL_NETLINK)
2210 return -ENOPROTOOPT;
2211
ccdfcc39
PM
2212 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2213 optlen >= sizeof(int) &&
eb496534 2214 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
2215 return -EFAULT;
2216
2217 switch (optname) {
2218 case NETLINK_PKTINFO:
2219 if (val)
cc3a572f 2220 nlk->flags |= NETLINK_F_RECV_PKTINFO;
9a4595bc 2221 else
cc3a572f 2222 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
9a4595bc
PM
2223 err = 0;
2224 break;
2225 case NETLINK_ADD_MEMBERSHIP:
2226 case NETLINK_DROP_MEMBERSHIP: {
5187cd05 2227 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 2228 return -EPERM;
b4ff4f04
JB
2229 err = netlink_realloc_groups(sk);
2230 if (err)
2231 return err;
9a4595bc
PM
2232 if (!val || val - 1 >= nlk->ngroups)
2233 return -EINVAL;
7774d5e0 2234 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
023e2cfa 2235 err = nlk->netlink_bind(sock_net(sk), val);
4f520900
RGB
2236 if (err)
2237 return err;
2238 }
9a4595bc 2239 netlink_table_grab();
84659eb5
JB
2240 netlink_update_socket_mc(nlk, val,
2241 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 2242 netlink_table_ungrab();
7774d5e0 2243 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
023e2cfa 2244 nlk->netlink_unbind(sock_net(sk), val);
03292745 2245
9a4595bc
PM
2246 err = 0;
2247 break;
2248 }
be0c22a4
PNA
2249 case NETLINK_BROADCAST_ERROR:
2250 if (val)
cc3a572f 2251 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
be0c22a4 2252 else
cc3a572f 2253 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
be0c22a4
PNA
2254 err = 0;
2255 break;
38938bfe
PNA
2256 case NETLINK_NO_ENOBUFS:
2257 if (val) {
cc3a572f
ND
2258 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2259 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
38938bfe 2260 wake_up_interruptible(&nlk->wait);
658cb354 2261 } else {
cc3a572f 2262 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
658cb354 2263 }
38938bfe
PNA
2264 err = 0;
2265 break;
ccdfcc39
PM
2266#ifdef CONFIG_NETLINK_MMAP
2267 case NETLINK_RX_RING:
2268 case NETLINK_TX_RING: {
2269 struct nl_mmap_req req;
2270
2271 /* Rings might consume more memory than queue limits, require
2272 * CAP_NET_ADMIN.
2273 */
2274 if (!capable(CAP_NET_ADMIN))
2275 return -EPERM;
2276 if (optlen < sizeof(req))
2277 return -EINVAL;
2278 if (copy_from_user(&req, optval, sizeof(req)))
2279 return -EFAULT;
0470eb99 2280 err = netlink_set_ring(sk, &req,
ccdfcc39
PM
2281 optname == NETLINK_TX_RING);
2282 break;
2283 }
2284#endif /* CONFIG_NETLINK_MMAP */
59324cf3
ND
2285 case NETLINK_LISTEN_ALL_NSID:
2286 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2287 return -EPERM;
2288
2289 if (val)
2290 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2291 else
2292 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2293 err = 0;
2294 break;
0a6a3a23
CR
2295 case NETLINK_CAP_ACK:
2296 if (val)
2297 nlk->flags |= NETLINK_F_CAP_ACK;
2298 else
2299 nlk->flags &= ~NETLINK_F_CAP_ACK;
2300 err = 0;
2301 break;
9a4595bc
PM
2302 default:
2303 err = -ENOPROTOOPT;
2304 }
2305 return err;
2306}
2307
2308static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 2309 char __user *optval, int __user *optlen)
9a4595bc
PM
2310{
2311 struct sock *sk = sock->sk;
2312 struct netlink_sock *nlk = nlk_sk(sk);
2313 int len, val, err;
2314
2315 if (level != SOL_NETLINK)
2316 return -ENOPROTOOPT;
2317
2318 if (get_user(len, optlen))
2319 return -EFAULT;
2320 if (len < 0)
2321 return -EINVAL;
2322
2323 switch (optname) {
2324 case NETLINK_PKTINFO:
2325 if (len < sizeof(int))
2326 return -EINVAL;
2327 len = sizeof(int);
cc3a572f 2328 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
2329 if (put_user(len, optlen) ||
2330 put_user(val, optval))
2331 return -EFAULT;
9a4595bc
PM
2332 err = 0;
2333 break;
be0c22a4
PNA
2334 case NETLINK_BROADCAST_ERROR:
2335 if (len < sizeof(int))
2336 return -EINVAL;
2337 len = sizeof(int);
cc3a572f 2338 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
be0c22a4
PNA
2339 if (put_user(len, optlen) ||
2340 put_user(val, optval))
2341 return -EFAULT;
2342 err = 0;
2343 break;
38938bfe
PNA
2344 case NETLINK_NO_ENOBUFS:
2345 if (len < sizeof(int))
2346 return -EINVAL;
2347 len = sizeof(int);
cc3a572f 2348 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
38938bfe
PNA
2349 if (put_user(len, optlen) ||
2350 put_user(val, optval))
2351 return -EFAULT;
2352 err = 0;
2353 break;
b42be38b
DH
2354 case NETLINK_LIST_MEMBERSHIPS: {
2355 int pos, idx, shift;
2356
2357 err = 0;
2358 netlink_table_grab();
2359 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2360 if (len - pos < sizeof(u32))
2361 break;
2362
2363 idx = pos / sizeof(unsigned long);
2364 shift = (pos % sizeof(unsigned long)) * 8;
2365 if (put_user((u32)(nlk->groups[idx] >> shift),
2366 (u32 __user *)(optval + pos))) {
2367 err = -EFAULT;
2368 break;
2369 }
2370 }
2371 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2372 err = -EFAULT;
2373 netlink_table_ungrab();
2374 break;
2375 }
0a6a3a23
CR
2376 case NETLINK_CAP_ACK:
2377 if (len < sizeof(int))
2378 return -EINVAL;
2379 len = sizeof(int);
2380 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
2381 if (put_user(len, optlen) ||
2382 put_user(val, optval))
2383 return -EFAULT;
2384 err = 0;
2385 break;
9a4595bc
PM
2386 default:
2387 err = -ENOPROTOOPT;
2388 }
2389 return err;
2390}
2391
2392static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2393{
2394 struct nl_pktinfo info;
2395
2396 info.group = NETLINK_CB(skb).dst_group;
2397 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2398}
2399
59324cf3
ND
2400static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2401 struct sk_buff *skb)
2402{
2403 if (!NETLINK_CB(skb).nsid_is_set)
2404 return;
2405
2406 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2407 &NETLINK_CB(skb).nsid);
2408}
2409
1b784140 2410static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1da177e4 2411{
1da177e4
LT
2412 struct sock *sk = sock->sk;
2413 struct netlink_sock *nlk = nlk_sk(sk);
342dfc30 2414 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
15e47304 2415 u32 dst_portid;
d629b836 2416 u32 dst_group;
1da177e4
LT
2417 struct sk_buff *skb;
2418 int err;
2419 struct scm_cookie scm;
2d7a85f4 2420 u32 netlink_skb_flags = 0;
1da177e4
LT
2421
2422 if (msg->msg_flags&MSG_OOB)
2423 return -EOPNOTSUPP;
2424
7cc05662 2425 err = scm_send(sock, msg, &scm, true);
1da177e4
LT
2426 if (err < 0)
2427 return err;
2428
2429 if (msg->msg_namelen) {
b47030c7 2430 err = -EINVAL;
1da177e4 2431 if (addr->nl_family != AF_NETLINK)
b47030c7 2432 goto out;
15e47304 2433 dst_portid = addr->nl_pid;
d629b836 2434 dst_group = ffs(addr->nl_groups);
b47030c7 2435 err = -EPERM;
15e47304 2436 if ((dst_group || dst_portid) &&
5187cd05 2437 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 2438 goto out;
2d7a85f4 2439 netlink_skb_flags |= NETLINK_SKB_DST;
1da177e4 2440 } else {
15e47304 2441 dst_portid = nlk->dst_portid;
d629b836 2442 dst_group = nlk->dst_group;
1da177e4
LT
2443 }
2444
15e47304 2445 if (!nlk->portid) {
1da177e4
LT
2446 err = netlink_autobind(sock);
2447 if (err)
2448 goto out;
2449 }
2450
a8866ff6
AV
2451 /* It's a really convoluted way for userland to ask for mmaped
2452 * sendmsg(), but that's what we've got...
2453 */
5fd96123 2454 if (netlink_tx_is_mmaped(sk) &&
c953e239 2455 iter_is_iovec(&msg->msg_iter) &&
a8866ff6 2456 msg->msg_iter.nr_segs == 1 &&
c0371da6 2457 msg->msg_iter.iov->iov_base == NULL) {
5fd96123 2458 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
7cc05662 2459 &scm);
5fd96123
PM
2460 goto out;
2461 }
2462
1da177e4
LT
2463 err = -EMSGSIZE;
2464 if (len > sk->sk_sndbuf - 32)
2465 goto out;
2466 err = -ENOBUFS;
3a36515f 2467 skb = netlink_alloc_large_skb(len, dst_group);
6ac552fd 2468 if (skb == NULL)
1da177e4
LT
2469 goto out;
2470
15e47304 2471 NETLINK_CB(skb).portid = nlk->portid;
d629b836 2472 NETLINK_CB(skb).dst_group = dst_group;
7cc05662 2473 NETLINK_CB(skb).creds = scm.creds;
2d7a85f4 2474 NETLINK_CB(skb).flags = netlink_skb_flags;
1da177e4 2475
1da177e4 2476 err = -EFAULT;
6ce8e9ce 2477 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
2478 kfree_skb(skb);
2479 goto out;
2480 }
2481
2482 err = security_netlink_send(sk, skb);
2483 if (err) {
2484 kfree_skb(skb);
2485 goto out;
2486 }
2487
d629b836 2488 if (dst_group) {
1da177e4 2489 atomic_inc(&skb->users);
15e47304 2490 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 2491 }
15e47304 2492 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
2493
2494out:
7cc05662 2495 scm_destroy(&scm);
1da177e4
LT
2496 return err;
2497}
2498
1b784140 2499static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1da177e4
LT
2500 int flags)
2501{
1da177e4
LT
2502 struct scm_cookie scm;
2503 struct sock *sk = sock->sk;
2504 struct netlink_sock *nlk = nlk_sk(sk);
2505 int noblock = flags&MSG_DONTWAIT;
2506 size_t copied;
68d6ac6d 2507 struct sk_buff *skb, *data_skb;
b44d211e 2508 int err, ret;
1da177e4
LT
2509
2510 if (flags&MSG_OOB)
2511 return -EOPNOTSUPP;
2512
2513 copied = 0;
2514
6ac552fd
PM
2515 skb = skb_recv_datagram(sk, flags, noblock, &err);
2516 if (skb == NULL)
1da177e4
LT
2517 goto out;
2518
68d6ac6d
JB
2519 data_skb = skb;
2520
1dacc76d
JB
2521#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2522 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2523 /*
68d6ac6d
JB
2524 * If this skb has a frag_list, then here that means that we
2525 * will have to use the frag_list skb's data for compat tasks
2526 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2527 *
68d6ac6d
JB
2528 * If we need to send the compat skb, assign it to the
2529 * 'data_skb' variable so that it will be used below for data
2530 * copying. We keep 'skb' for everything else, including
2531 * freeing both later.
1dacc76d 2532 */
68d6ac6d
JB
2533 if (flags & MSG_CMSG_COMPAT)
2534 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2535 }
2536#endif
2537
9063e21f
ED
2538 /* Record the max length of recvmsg() calls for future allocations */
2539 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2540 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2541 16384);
2542
68d6ac6d 2543 copied = data_skb->len;
1da177e4
LT
2544 if (len < copied) {
2545 msg->msg_flags |= MSG_TRUNC;
2546 copied = len;
2547 }
2548
68d6ac6d 2549 skb_reset_transport_header(data_skb);
51f3d02b 2550 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1da177e4
LT
2551
2552 if (msg->msg_name) {
342dfc30 2553 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1da177e4
LT
2554 addr->nl_family = AF_NETLINK;
2555 addr->nl_pad = 0;
15e47304 2556 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2557 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2558 msg->msg_namelen = sizeof(*addr);
2559 }
2560
cc3a572f 2561 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
cc9a06cd 2562 netlink_cmsg_recv_pktinfo(msg, skb);
59324cf3
ND
2563 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2564 netlink_cmsg_listen_all_nsid(sk, msg, skb);
cc9a06cd 2565
7cc05662
CH
2566 memset(&scm, 0, sizeof(scm));
2567 scm.creds = *NETLINK_CREDS(skb);
188ccb55 2568 if (flags & MSG_TRUNC)
68d6ac6d 2569 copied = data_skb->len;
daa3766e 2570
1da177e4
LT
2571 skb_free_datagram(sk, skb);
2572
16b304f3
PS
2573 if (nlk->cb_running &&
2574 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
b44d211e
AV
2575 ret = netlink_dump(sk);
2576 if (ret) {
ac30ef83 2577 sk->sk_err = -ret;
b44d211e
AV
2578 sk->sk_error_report(sk);
2579 }
2580 }
1da177e4 2581
7cc05662 2582 scm_recv(sock, msg, &scm, flags);
1da177e4
LT
2583out:
2584 netlink_rcv_wake(sk);
2585 return err ? : copied;
2586}
2587
676d2369 2588static void netlink_data_ready(struct sock *sk)
1da177e4 2589{
cd40b7d3 2590 BUG();
1da177e4
LT
2591}
2592
2593/*
746fac4d 2594 * We export these functions to other modules. They provide a
1da177e4
LT
2595 * complete set of kernel non-blocking support for message
2596 * queueing.
2597 */
2598
2599struct sock *
9f00d977
PNA
2600__netlink_kernel_create(struct net *net, int unit, struct module *module,
2601 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2602{
2603 struct socket *sock;
2604 struct sock *sk;
77247bbb 2605 struct netlink_sock *nlk;
5c398dc8 2606 struct listeners *listeners = NULL;
a31f2d17
PNA
2607 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2608 unsigned int groups;
1da177e4 2609
fab2caf6 2610 BUG_ON(!nl_table);
1da177e4 2611
6ac552fd 2612 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2613 return NULL;
2614
2615 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2616 return NULL;
13d3078e
EB
2617
2618 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
23fe1866
PE
2619 goto out_sock_release_nosk;
2620
2621 sk = sock->sk;
4fdb3bb7 2622
a31f2d17 2623 if (!cfg || cfg->groups < 32)
4277a083 2624 groups = 32;
a31f2d17
PNA
2625 else
2626 groups = cfg->groups;
4277a083 2627
5c398dc8 2628 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2629 if (!listeners)
2630 goto out_sock_release;
2631
1da177e4 2632 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2633 if (cfg && cfg->input)
2634 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2635
8ea65f4a 2636 if (netlink_insert(sk, 0))
77247bbb 2637 goto out_sock_release;
4fdb3bb7 2638
77247bbb 2639 nlk = nlk_sk(sk);
cc3a572f 2640 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
4fdb3bb7 2641
4fdb3bb7 2642 netlink_table_grab();
b4b51029
EB
2643 if (!nl_table[unit].registered) {
2644 nl_table[unit].groups = groups;
5c398dc8 2645 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2646 nl_table[unit].cb_mutex = cb_mutex;
2647 nl_table[unit].module = module;
9785e10a
PNA
2648 if (cfg) {
2649 nl_table[unit].bind = cfg->bind;
6251edd9 2650 nl_table[unit].unbind = cfg->unbind;
9785e10a 2651 nl_table[unit].flags = cfg->flags;
da12c90e
G
2652 if (cfg->compare)
2653 nl_table[unit].compare = cfg->compare;
9785e10a 2654 }
b4b51029 2655 nl_table[unit].registered = 1;
f937f1f4
JJ
2656 } else {
2657 kfree(listeners);
869e58f8 2658 nl_table[unit].registered++;
b4b51029 2659 }
4fdb3bb7 2660 netlink_table_ungrab();
77247bbb
PM
2661 return sk;
2662
4fdb3bb7 2663out_sock_release:
4277a083 2664 kfree(listeners);
9dfbec1f 2665 netlink_kernel_release(sk);
23fe1866
PE
2666 return NULL;
2667
2668out_sock_release_nosk:
4fdb3bb7 2669 sock_release(sock);
77247bbb 2670 return NULL;
1da177e4 2671}
9f00d977 2672EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2673
2674void
2675netlink_kernel_release(struct sock *sk)
2676{
13d3078e
EB
2677 if (sk == NULL || sk->sk_socket == NULL)
2678 return;
2679
2680 sock_release(sk->sk_socket);
b7c6ba6e
DL
2681}
2682EXPORT_SYMBOL(netlink_kernel_release);
2683
d136f1bd 2684int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2685{
5c398dc8 2686 struct listeners *new, *old;
b4ff4f04 2687 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2688
2689 if (groups < 32)
2690 groups = 32;
2691
b4ff4f04 2692 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2693 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2694 if (!new)
d136f1bd 2695 return -ENOMEM;
6d772ac5 2696 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2697 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2698 rcu_assign_pointer(tbl->listeners, new);
2699
37b6b935 2700 kfree_rcu(old, rcu);
b4ff4f04
JB
2701 }
2702 tbl->groups = groups;
2703
d136f1bd
JB
2704 return 0;
2705}
2706
2707/**
2708 * netlink_change_ngroups - change number of multicast groups
2709 *
2710 * This changes the number of multicast groups that are available
2711 * on a certain netlink family. Note that it is not possible to
2712 * change the number of groups to below 32. Also note that it does
2713 * not implicitly call netlink_clear_multicast_users() when the
2714 * number of groups is reduced.
2715 *
2716 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2717 * @groups: The new number of groups.
2718 */
2719int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2720{
2721 int err;
2722
2723 netlink_table_grab();
2724 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2725 netlink_table_ungrab();
d136f1bd 2726
b4ff4f04
JB
2727 return err;
2728}
b4ff4f04 2729
b8273570
JB
2730void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2731{
2732 struct sock *sk;
b8273570
JB
2733 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2734
b67bfe0d 2735 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2736 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2737}
2738
a46621a3 2739struct nlmsghdr *
15e47304 2740__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2741{
2742 struct nlmsghdr *nlh;
573ce260 2743 int size = nlmsg_msg_size(len);
a46621a3 2744
23b45672 2745 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
a46621a3
DV
2746 nlh->nlmsg_type = type;
2747 nlh->nlmsg_len = size;
2748 nlh->nlmsg_flags = flags;
15e47304 2749 nlh->nlmsg_pid = portid;
a46621a3
DV
2750 nlh->nlmsg_seq = seq;
2751 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2752 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2753 return nlh;
2754}
2755EXPORT_SYMBOL(__nlmsg_put);
2756
1da177e4
LT
2757/*
2758 * It looks a bit ugly.
2759 * It would be better to create kernel thread.
2760 */
2761
2762static int netlink_dump(struct sock *sk)
2763{
2764 struct netlink_sock *nlk = nlk_sk(sk);
2765 struct netlink_callback *cb;
c7ac8679 2766 struct sk_buff *skb = NULL;
1da177e4 2767 struct nlmsghdr *nlh;
bf8b79e4 2768 int len, err = -ENOBUFS;
c7ac8679 2769 int alloc_size;
1da177e4 2770
af65bdfc 2771 mutex_lock(nlk->cb_mutex);
16b304f3 2772 if (!nlk->cb_running) {
bf8b79e4
TG
2773 err = -EINVAL;
2774 goto errout_skb;
1da177e4
LT
2775 }
2776
16b304f3 2777 cb = &nlk->cb;
c7ac8679
GR
2778 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2779
f9c22888
PM
2780 if (!netlink_rx_is_mmaped(sk) &&
2781 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2782 goto errout_skb;
9063e21f
ED
2783
2784 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2785 * required, but it makes sense to _attempt_ a 16K bytes allocation
2786 * to reduce number of system calls on dump operations, if user
2787 * ever provided a big enough buffer.
2788 */
2789 if (alloc_size < nlk->max_recvmsg_len) {
2790 skb = netlink_alloc_skb(sk,
2791 nlk->max_recvmsg_len,
2792 nlk->portid,
2793 GFP_KERNEL |
2794 __GFP_NOWARN |
2795 __GFP_NORETRY);
2796 /* available room should be exact amount to avoid MSG_TRUNC */
2797 if (skb)
2798 skb_reserve(skb, skb_tailroom(skb) -
2799 nlk->max_recvmsg_len);
2800 }
2801 if (!skb)
2802 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2803 GFP_KERNEL);
c7ac8679 2804 if (!skb)
c63d6ea3 2805 goto errout_skb;
f9c22888 2806 netlink_skb_set_owner_r(skb, sk);
c7ac8679 2807
1da177e4
LT
2808 len = cb->dump(skb, cb);
2809
2810 if (len > 0) {
af65bdfc 2811 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2812
2813 if (sk_filter(sk, skb))
2814 kfree_skb(skb);
4a7e7c2a
ED
2815 else
2816 __netlink_sendskb(sk, skb);
1da177e4
LT
2817 return 0;
2818 }
2819
bf8b79e4
TG
2820 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2821 if (!nlh)
2822 goto errout_skb;
2823
670dc283
JB
2824 nl_dump_check_consistent(cb, nlh);
2825
bf8b79e4
TG
2826 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2827
b1153f29
SH
2828 if (sk_filter(sk, skb))
2829 kfree_skb(skb);
4a7e7c2a
ED
2830 else
2831 __netlink_sendskb(sk, skb);
1da177e4 2832
a8f74b22
TG
2833 if (cb->done)
2834 cb->done(cb);
1da177e4 2835
16b304f3
PS
2836 nlk->cb_running = false;
2837 mutex_unlock(nlk->cb_mutex);
6dc878a8 2838 module_put(cb->module);
16b304f3 2839 consume_skb(cb->skb);
1da177e4 2840 return 0;
1797754e 2841
bf8b79e4 2842errout_skb:
af65bdfc 2843 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2844 kfree_skb(skb);
bf8b79e4 2845 return err;
1da177e4
LT
2846}
2847
6dc878a8
G
2848int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2849 const struct nlmsghdr *nlh,
2850 struct netlink_dump_control *control)
1da177e4
LT
2851{
2852 struct netlink_callback *cb;
2853 struct sock *sk;
2854 struct netlink_sock *nlk;
b44d211e 2855 int ret;
1da177e4 2856
f9c22888
PM
2857 /* Memory mapped dump requests need to be copied to avoid looping
2858 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2859 * a reference to the skb.
2860 */
2861 if (netlink_skb_is_mmaped(skb)) {
2862 skb = skb_copy(skb, GFP_KERNEL);
16b304f3 2863 if (skb == NULL)
f9c22888 2864 return -ENOBUFS;
f9c22888
PM
2865 } else
2866 atomic_inc(&skb->users);
2867
15e47304 2868 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4 2869 if (sk == NULL) {
16b304f3
PS
2870 ret = -ECONNREFUSED;
2871 goto error_free;
1da177e4 2872 }
6dc878a8 2873
16b304f3 2874 nlk = nlk_sk(sk);
af65bdfc 2875 mutex_lock(nlk->cb_mutex);
6dc878a8 2876 /* A dump is in progress... */
16b304f3 2877 if (nlk->cb_running) {
6dc878a8 2878 ret = -EBUSY;
16b304f3 2879 goto error_unlock;
1da177e4 2880 }
6dc878a8 2881 /* add reference of module which cb->dump belongs to */
16b304f3 2882 if (!try_module_get(control->module)) {
6dc878a8 2883 ret = -EPROTONOSUPPORT;
16b304f3 2884 goto error_unlock;
6dc878a8
G
2885 }
2886
16b304f3
PS
2887 cb = &nlk->cb;
2888 memset(cb, 0, sizeof(*cb));
2889 cb->dump = control->dump;
2890 cb->done = control->done;
2891 cb->nlh = nlh;
2892 cb->data = control->data;
2893 cb->module = control->module;
2894 cb->min_dump_alloc = control->min_dump_alloc;
2895 cb->skb = skb;
2896
2897 nlk->cb_running = true;
2898
af65bdfc 2899 mutex_unlock(nlk->cb_mutex);
1da177e4 2900
b44d211e 2901 ret = netlink_dump(sk);
1da177e4 2902 sock_put(sk);
5c58298c 2903
b44d211e
AV
2904 if (ret)
2905 return ret;
2906
5c58298c
DL
2907 /* We successfully started a dump, by returning -EINTR we
2908 * signal not to send ACK even if it was requested.
2909 */
2910 return -EINTR;
16b304f3
PS
2911
2912error_unlock:
2913 sock_put(sk);
2914 mutex_unlock(nlk->cb_mutex);
2915error_free:
2916 kfree_skb(skb);
2917 return ret;
1da177e4 2918}
6dc878a8 2919EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2920
2921void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2922{
2923 struct sk_buff *skb;
2924 struct nlmsghdr *rep;
2925 struct nlmsgerr *errmsg;
339bf98f 2926 size_t payload = sizeof(*errmsg);
0a6a3a23 2927 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
1da177e4 2928
0a6a3a23
CR
2929 /* Error messages get the original request appened, unless the user
2930 * requests to cap the error message.
2931 */
2932 if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
339bf98f 2933 payload += nlmsg_len(nlh);
1da177e4 2934
f9c22888
PM
2935 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2936 NETLINK_CB(in_skb).portid, GFP_KERNEL);
1da177e4
LT
2937 if (!skb) {
2938 struct sock *sk;
2939
3b1e0a65 2940 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2941 in_skb->sk->sk_protocol,
15e47304 2942 NETLINK_CB(in_skb).portid);
1da177e4
LT
2943 if (sk) {
2944 sk->sk_err = ENOBUFS;
2945 sk->sk_error_report(sk);
2946 sock_put(sk);
2947 }
2948 return;
2949 }
2950
15e47304 2951 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2952 NLMSG_ERROR, payload, 0);
bf8b79e4 2953 errmsg = nlmsg_data(rep);
1da177e4 2954 errmsg->error = err;
0a6a3a23 2955 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2956 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2957}
6ac552fd 2958EXPORT_SYMBOL(netlink_ack);
1da177e4 2959
cd40b7d3 2960int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2961 struct nlmsghdr *))
82ace47a 2962{
82ace47a
TG
2963 struct nlmsghdr *nlh;
2964 int err;
2965
2966 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2967 int msglen;
2968
b529ccf2 2969 nlh = nlmsg_hdr(skb);
d35b6856 2970 err = 0;
82ace47a 2971
ad8e4b75 2972 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2973 return 0;
2974
d35b6856
TG
2975 /* Only requests are handled by the kernel */
2976 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2977 goto ack;
45e7ae7f
TG
2978
2979 /* Skip control messages */
2980 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2981 goto ack;
d35b6856 2982
1d00a4eb 2983 err = cb(skb, nlh);
5c58298c
DL
2984 if (err == -EINTR)
2985 goto skip;
2986
2987ack:
d35b6856 2988 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2989 netlink_ack(skb, nlh, err);
82ace47a 2990
5c58298c 2991skip:
6ac552fd 2992 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2993 if (msglen > skb->len)
2994 msglen = skb->len;
2995 skb_pull(skb, msglen);
82ace47a
TG
2996 }
2997
2998 return 0;
2999}
6ac552fd 3000EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 3001
d387f6ad
TG
3002/**
3003 * nlmsg_notify - send a notification netlink message
3004 * @sk: netlink socket to use
3005 * @skb: notification message
15e47304 3006 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
3007 * @group: destination multicast group or 0
3008 * @report: 1 to report back, 0 to disable
3009 * @flags: allocation flags
3010 */
15e47304 3011int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
3012 unsigned int group, int report, gfp_t flags)
3013{
3014 int err = 0;
3015
3016 if (group) {
15e47304 3017 int exclude_portid = 0;
d387f6ad
TG
3018
3019 if (report) {
3020 atomic_inc(&skb->users);
15e47304 3021 exclude_portid = portid;
d387f6ad
TG
3022 }
3023
1ce85fe4
PNA
3024 /* errors reported via destination sk->sk_err, but propagate
3025 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 3026 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
3027 }
3028
1ce85fe4
PNA
3029 if (report) {
3030 int err2;
3031
15e47304 3032 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
3033 if (!err || err == -ESRCH)
3034 err = err2;
3035 }
d387f6ad
TG
3036
3037 return err;
3038}
6ac552fd 3039EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 3040
1da177e4
LT
3041#ifdef CONFIG_PROC_FS
3042struct nl_seq_iter {
e372c414 3043 struct seq_net_private p;
56d28b1e 3044 struct rhashtable_iter hti;
1da177e4 3045 int link;
1da177e4
LT
3046};
3047
56d28b1e 3048static int netlink_walk_start(struct nl_seq_iter *iter)
1da177e4 3049{
56d28b1e 3050 int err;
1da177e4 3051
56d28b1e
HX
3052 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
3053 if (err) {
3054 iter->link = MAX_LINKS;
3055 return err;
1da177e4 3056 }
56d28b1e
HX
3057
3058 err = rhashtable_walk_start(&iter->hti);
3059 return err == -EAGAIN ? 0 : err;
1da177e4
LT
3060}
3061
56d28b1e 3062static void netlink_walk_stop(struct nl_seq_iter *iter)
1da177e4 3063{
56d28b1e
HX
3064 rhashtable_walk_stop(&iter->hti);
3065 rhashtable_walk_exit(&iter->hti);
1da177e4
LT
3066}
3067
56d28b1e 3068static void *__netlink_seq_next(struct seq_file *seq)
1da177e4 3069{
56d28b1e 3070 struct nl_seq_iter *iter = seq->private;
e341694e 3071 struct netlink_sock *nlk;
1da177e4 3072
56d28b1e
HX
3073 do {
3074 for (;;) {
3075 int err;
1da177e4 3076
56d28b1e 3077 nlk = rhashtable_walk_next(&iter->hti);
746fac4d 3078
56d28b1e
HX
3079 if (IS_ERR(nlk)) {
3080 if (PTR_ERR(nlk) == -EAGAIN)
3081 continue;
e341694e 3082
56d28b1e
HX
3083 return nlk;
3084 }
1da177e4 3085
56d28b1e
HX
3086 if (nlk)
3087 break;
1da177e4 3088
56d28b1e
HX
3089 netlink_walk_stop(iter);
3090 if (++iter->link >= MAX_LINKS)
3091 return NULL;
da12c90e 3092
56d28b1e
HX
3093 err = netlink_walk_start(iter);
3094 if (err)
3095 return ERR_PTR(err);
1da177e4 3096 }
56d28b1e 3097 } while (sock_net(&nlk->sk) != seq_file_net(seq));
1da177e4 3098
56d28b1e
HX
3099 return nlk;
3100}
1da177e4 3101
56d28b1e
HX
3102static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3103{
3104 struct nl_seq_iter *iter = seq->private;
3105 void *obj = SEQ_START_TOKEN;
3106 loff_t pos;
3107 int err;
3108
3109 iter->link = 0;
3110
3111 err = netlink_walk_start(iter);
3112 if (err)
3113 return ERR_PTR(err);
3114
3115 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3116 obj = __netlink_seq_next(seq);
3117
3118 return obj;
3119}
3120
3121static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3122{
3123 ++*pos;
3124 return __netlink_seq_next(seq);
1da177e4
LT
3125}
3126
3127static void netlink_seq_stop(struct seq_file *seq, void *v)
3128{
56d28b1e
HX
3129 struct nl_seq_iter *iter = seq->private;
3130
3131 if (iter->link >= MAX_LINKS)
3132 return;
3133
3134 netlink_walk_stop(iter);
1da177e4
LT
3135}
3136
3137
3138static int netlink_seq_show(struct seq_file *seq, void *v)
3139{
658cb354 3140 if (v == SEQ_START_TOKEN) {
1da177e4
LT
3141 seq_puts(seq,
3142 "sk Eth Pid Groups "
cf0aa4e0 3143 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 3144 } else {
1da177e4
LT
3145 struct sock *s = v;
3146 struct netlink_sock *nlk = nlk_sk(s);
3147
16b304f3 3148 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
1da177e4
LT
3149 s,
3150 s->sk_protocol,
15e47304 3151 nlk->portid,
513c2500 3152 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
3153 sk_rmem_alloc_get(s),
3154 sk_wmem_alloc_get(s),
16b304f3 3155 nlk->cb_running,
38938bfe 3156 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
3157 atomic_read(&s->sk_drops),
3158 sock_i_ino(s)
1da177e4
LT
3159 );
3160
3161 }
3162 return 0;
3163}
3164
56b3d975 3165static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
3166 .start = netlink_seq_start,
3167 .next = netlink_seq_next,
3168 .stop = netlink_seq_stop,
3169 .show = netlink_seq_show,
3170};
3171
3172
3173static int netlink_seq_open(struct inode *inode, struct file *file)
3174{
e372c414
DL
3175 return seq_open_net(inode, file, &netlink_seq_ops,
3176 sizeof(struct nl_seq_iter));
b4b51029
EB
3177}
3178
da7071d7 3179static const struct file_operations netlink_seq_fops = {
1da177e4
LT
3180 .owner = THIS_MODULE,
3181 .open = netlink_seq_open,
3182 .read = seq_read,
3183 .llseek = seq_lseek,
e372c414 3184 .release = seq_release_net,
1da177e4
LT
3185};
3186
3187#endif
3188
3189int netlink_register_notifier(struct notifier_block *nb)
3190{
e041c683 3191 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 3192}
6ac552fd 3193EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
3194
3195int netlink_unregister_notifier(struct notifier_block *nb)
3196{
e041c683 3197 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 3198}
6ac552fd 3199EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 3200
90ddc4f0 3201static const struct proto_ops netlink_ops = {
1da177e4
LT
3202 .family = PF_NETLINK,
3203 .owner = THIS_MODULE,
3204 .release = netlink_release,
3205 .bind = netlink_bind,
3206 .connect = netlink_connect,
3207 .socketpair = sock_no_socketpair,
3208 .accept = sock_no_accept,
3209 .getname = netlink_getname,
9652e931 3210 .poll = netlink_poll,
1da177e4
LT
3211 .ioctl = sock_no_ioctl,
3212 .listen = sock_no_listen,
3213 .shutdown = sock_no_shutdown,
9a4595bc
PM
3214 .setsockopt = netlink_setsockopt,
3215 .getsockopt = netlink_getsockopt,
1da177e4
LT
3216 .sendmsg = netlink_sendmsg,
3217 .recvmsg = netlink_recvmsg,
ccdfcc39 3218 .mmap = netlink_mmap,
1da177e4
LT
3219 .sendpage = sock_no_sendpage,
3220};
3221
ec1b4cf7 3222static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
3223 .family = PF_NETLINK,
3224 .create = netlink_create,
3225 .owner = THIS_MODULE, /* for consistency 8) */
3226};
3227
4665079c 3228static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
3229{
3230#ifdef CONFIG_PROC_FS
d4beaa66 3231 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
3232 return -ENOMEM;
3233#endif
3234 return 0;
3235}
3236
4665079c 3237static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
3238{
3239#ifdef CONFIG_PROC_FS
ece31ffd 3240 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
3241#endif
3242}
3243
b963ea89
DM
3244static void __init netlink_add_usersock_entry(void)
3245{
5c398dc8 3246 struct listeners *listeners;
b963ea89
DM
3247 int groups = 32;
3248
5c398dc8 3249 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 3250 if (!listeners)
5c398dc8 3251 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
3252
3253 netlink_table_grab();
3254
3255 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 3256 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
3257 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3258 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 3259 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
3260
3261 netlink_table_ungrab();
3262}
3263
022cbae6 3264static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
3265 .init = netlink_net_init,
3266 .exit = netlink_net_exit,
3267};
3268
49f7b33e 3269static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
c428ecd1
HX
3270{
3271 const struct netlink_sock *nlk = data;
3272 struct netlink_compare_arg arg;
3273
3274 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
11b58ba1 3275 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
c428ecd1
HX
3276}
3277
3278static const struct rhashtable_params netlink_rhashtable_params = {
3279 .head_offset = offsetof(struct netlink_sock, node),
3280 .key_len = netlink_compare_arg_len,
c428ecd1
HX
3281 .obj_hashfn = netlink_hash,
3282 .obj_cmpfn = netlink_compare,
b5e2c150 3283 .automatic_shrinking = true,
c428ecd1
HX
3284};
3285
1da177e4
LT
3286static int __init netlink_proto_init(void)
3287{
1da177e4 3288 int i;
1da177e4
LT
3289 int err = proto_register(&netlink_proto, 0);
3290
3291 if (err != 0)
3292 goto out;
3293
fab25745 3294 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3295
0da974f4 3296 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
3297 if (!nl_table)
3298 goto panic;
1da177e4 3299
1da177e4 3300 for (i = 0; i < MAX_LINKS; i++) {
c428ecd1
HX
3301 if (rhashtable_init(&nl_table[i].hash,
3302 &netlink_rhashtable_params) < 0) {
e341694e
TG
3303 while (--i > 0)
3304 rhashtable_destroy(&nl_table[i].hash);
1da177e4 3305 kfree(nl_table);
fab2caf6 3306 goto panic;
1da177e4 3307 }
1da177e4
LT
3308 }
3309
bcbde0d4
DB
3310 INIT_LIST_HEAD(&netlink_tap_all);
3311
b963ea89
DM
3312 netlink_add_usersock_entry();
3313
1da177e4 3314 sock_register(&netlink_family_ops);
b4b51029 3315 register_pernet_subsys(&netlink_net_ops);
746fac4d 3316 /* The netlink device handler may be needed early. */
1da177e4
LT
3317 rtnetlink_init();
3318out:
3319 return err;
fab2caf6
AM
3320panic:
3321 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
3322}
3323
1da177e4 3324core_initcall(netlink_proto_init);
This page took 1.701186 seconds and 5 git commands to generate.