[NETLINK]: Support dynamic number of multicast groups per netlink family
[deliverable/linux.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
15 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
16 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
17 * - inc module use count of module that owns
18 * the kernel socket in case userspace opens
19 * socket of same protocol
20 * - remove all module support, since netlink is
21 * mandatory if CONFIG_NET=y these days
1da177e4
LT
22 */
23
24#include <linux/config.h>
25#include <linux/module.h>
26
27#include <linux/kernel.h>
28#include <linux/init.h>
1da177e4
LT
29#include <linux/signal.h>
30#include <linux/sched.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/stat.h>
34#include <linux/socket.h>
35#include <linux/un.h>
36#include <linux/fcntl.h>
37#include <linux/termios.h>
38#include <linux/sockios.h>
39#include <linux/net.h>
40#include <linux/fs.h>
41#include <linux/slab.h>
42#include <asm/uaccess.h>
43#include <linux/skbuff.h>
44#include <linux/netdevice.h>
45#include <linux/rtnetlink.h>
46#include <linux/proc_fs.h>
47#include <linux/seq_file.h>
48#include <linux/smp_lock.h>
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520
AM
57#include <linux/audit.h>
58
1da177e4
LT
59#include <net/sock.h>
60#include <net/scm.h>
61
62#define Nprintk(a...)
f7fa9b10 63#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
1da177e4
LT
64
65struct netlink_sock {
66 /* struct sock has to be the first member of netlink_sock */
67 struct sock sk;
68 u32 pid;
1da177e4 69 u32 dst_pid;
d629b836 70 u32 dst_group;
f7fa9b10
PM
71 u32 flags;
72 u32 subscriptions;
73 u32 ngroups;
74 unsigned long *groups;
1da177e4
LT
75 unsigned long state;
76 wait_queue_head_t wait;
77 struct netlink_callback *cb;
78 spinlock_t cb_lock;
79 void (*data_ready)(struct sock *sk, int bytes);
77247bbb 80 struct module *module;
1da177e4
LT
81};
82
77247bbb
PM
83#define NETLINK_KERNEL_SOCKET 0x1
84
1da177e4
LT
85static inline struct netlink_sock *nlk_sk(struct sock *sk)
86{
87 return (struct netlink_sock *)sk;
88}
89
90struct nl_pid_hash {
91 struct hlist_head *table;
92 unsigned long rehash_time;
93
94 unsigned int mask;
95 unsigned int shift;
96
97 unsigned int entries;
98 unsigned int max_shift;
99
100 u32 rnd;
101};
102
103struct netlink_table {
104 struct nl_pid_hash hash;
105 struct hlist_head mc_list;
106 unsigned int nl_nonroot;
f7fa9b10 107 unsigned int groups;
77247bbb 108 struct module *module;
ab33a171 109 int registered;
1da177e4
LT
110};
111
112static struct netlink_table *nl_table;
113
114static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
115
116static int netlink_dump(struct sock *sk);
117static void netlink_destroy_callback(struct netlink_callback *cb);
118
119static DEFINE_RWLOCK(nl_table_lock);
120static atomic_t nl_table_users = ATOMIC_INIT(0);
121
122static struct notifier_block *netlink_chain;
123
d629b836
PM
124static u32 netlink_group_mask(u32 group)
125{
126 return group ? 1 << (group - 1) : 0;
127}
128
1da177e4
LT
129static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
130{
131 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
132}
133
134static void netlink_sock_destruct(struct sock *sk)
135{
136 skb_queue_purge(&sk->sk_receive_queue);
137
138 if (!sock_flag(sk, SOCK_DEAD)) {
139 printk("Freeing alive netlink socket %p\n", sk);
140 return;
141 }
142 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
143 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
144 BUG_TRAP(!nlk_sk(sk)->cb);
f7fa9b10 145 BUG_TRAP(!nlk_sk(sk)->groups);
1da177e4
LT
146}
147
148/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
149 * Look, when several writers sleep and reader wakes them up, all but one
150 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
151 * this, _but_ remember, it adds useless work on UP machines.
152 */
153
154static void netlink_table_grab(void)
155{
156 write_lock_bh(&nl_table_lock);
157
158 if (atomic_read(&nl_table_users)) {
159 DECLARE_WAITQUEUE(wait, current);
160
161 add_wait_queue_exclusive(&nl_table_wait, &wait);
162 for(;;) {
163 set_current_state(TASK_UNINTERRUPTIBLE);
164 if (atomic_read(&nl_table_users) == 0)
165 break;
166 write_unlock_bh(&nl_table_lock);
167 schedule();
168 write_lock_bh(&nl_table_lock);
169 }
170
171 __set_current_state(TASK_RUNNING);
172 remove_wait_queue(&nl_table_wait, &wait);
173 }
174}
175
176static __inline__ void netlink_table_ungrab(void)
177{
178 write_unlock_bh(&nl_table_lock);
179 wake_up(&nl_table_wait);
180}
181
182static __inline__ void
183netlink_lock_table(void)
184{
185 /* read_lock() synchronizes us to netlink_table_grab */
186
187 read_lock(&nl_table_lock);
188 atomic_inc(&nl_table_users);
189 read_unlock(&nl_table_lock);
190}
191
192static __inline__ void
193netlink_unlock_table(void)
194{
195 if (atomic_dec_and_test(&nl_table_users))
196 wake_up(&nl_table_wait);
197}
198
199static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
200{
201 struct nl_pid_hash *hash = &nl_table[protocol].hash;
202 struct hlist_head *head;
203 struct sock *sk;
204 struct hlist_node *node;
205
206 read_lock(&nl_table_lock);
207 head = nl_pid_hashfn(hash, pid);
208 sk_for_each(sk, node, head) {
209 if (nlk_sk(sk)->pid == pid) {
210 sock_hold(sk);
211 goto found;
212 }
213 }
214 sk = NULL;
215found:
216 read_unlock(&nl_table_lock);
217 return sk;
218}
219
220static inline struct hlist_head *nl_pid_hash_alloc(size_t size)
221{
222 if (size <= PAGE_SIZE)
223 return kmalloc(size, GFP_ATOMIC);
224 else
225 return (struct hlist_head *)
226 __get_free_pages(GFP_ATOMIC, get_order(size));
227}
228
229static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
230{
231 if (size <= PAGE_SIZE)
232 kfree(table);
233 else
234 free_pages((unsigned long)table, get_order(size));
235}
236
237static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
238{
239 unsigned int omask, mask, shift;
240 size_t osize, size;
241 struct hlist_head *otable, *table;
242 int i;
243
244 omask = mask = hash->mask;
245 osize = size = (mask + 1) * sizeof(*table);
246 shift = hash->shift;
247
248 if (grow) {
249 if (++shift > hash->max_shift)
250 return 0;
251 mask = mask * 2 + 1;
252 size *= 2;
253 }
254
255 table = nl_pid_hash_alloc(size);
256 if (!table)
257 return 0;
258
259 memset(table, 0, size);
260 otable = hash->table;
261 hash->table = table;
262 hash->mask = mask;
263 hash->shift = shift;
264 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
265
266 for (i = 0; i <= omask; i++) {
267 struct sock *sk;
268 struct hlist_node *node, *tmp;
269
270 sk_for_each_safe(sk, node, tmp, &otable[i])
271 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
272 }
273
274 nl_pid_hash_free(otable, osize);
275 hash->rehash_time = jiffies + 10 * 60 * HZ;
276 return 1;
277}
278
279static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
280{
281 int avg = hash->entries >> hash->shift;
282
283 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
284 return 1;
285
286 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
287 nl_pid_hash_rehash(hash, 0);
288 return 1;
289 }
290
291 return 0;
292}
293
294static struct proto_ops netlink_ops;
295
296static int netlink_insert(struct sock *sk, u32 pid)
297{
298 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
299 struct hlist_head *head;
300 int err = -EADDRINUSE;
301 struct sock *osk;
302 struct hlist_node *node;
303 int len;
304
305 netlink_table_grab();
306 head = nl_pid_hashfn(hash, pid);
307 len = 0;
308 sk_for_each(osk, node, head) {
309 if (nlk_sk(osk)->pid == pid)
310 break;
311 len++;
312 }
313 if (node)
314 goto err;
315
316 err = -EBUSY;
317 if (nlk_sk(sk)->pid)
318 goto err;
319
320 err = -ENOMEM;
321 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
322 goto err;
323
324 if (len && nl_pid_hash_dilute(hash, len))
325 head = nl_pid_hashfn(hash, pid);
326 hash->entries++;
327 nlk_sk(sk)->pid = pid;
328 sk_add_node(sk, head);
329 err = 0;
330
331err:
332 netlink_table_ungrab();
333 return err;
334}
335
336static void netlink_remove(struct sock *sk)
337{
338 netlink_table_grab();
d470e3b4
DM
339 if (sk_del_node_init(sk))
340 nl_table[sk->sk_protocol].hash.entries--;
f7fa9b10 341 if (nlk_sk(sk)->subscriptions)
1da177e4
LT
342 __sk_del_bind_node(sk);
343 netlink_table_ungrab();
344}
345
346static struct proto netlink_proto = {
347 .name = "NETLINK",
348 .owner = THIS_MODULE,
349 .obj_size = sizeof(struct netlink_sock),
350};
351
ab33a171 352static int __netlink_create(struct socket *sock, int protocol)
1da177e4
LT
353{
354 struct sock *sk;
355 struct netlink_sock *nlk;
ab33a171
PM
356
357 sock->ops = &netlink_ops;
358
359 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1);
360 if (!sk)
361 return -ENOMEM;
362
363 sock_init_data(sock, sk);
364
365 nlk = nlk_sk(sk);
366 spin_lock_init(&nlk->cb_lock);
367 init_waitqueue_head(&nlk->wait);
368
369 sk->sk_destruct = netlink_sock_destruct;
370 sk->sk_protocol = protocol;
371 return 0;
372}
373
374static int netlink_create(struct socket *sock, int protocol)
375{
376 struct module *module = NULL;
f7fa9b10
PM
377 struct netlink_sock *nlk;
378 unsigned int groups;
ab33a171 379 int err = 0;
1da177e4
LT
380
381 sock->state = SS_UNCONNECTED;
382
383 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
384 return -ESOCKTNOSUPPORT;
385
386 if (protocol<0 || protocol >= MAX_LINKS)
387 return -EPROTONOSUPPORT;
388
77247bbb 389 netlink_lock_table();
4fdb3bb7 390#ifdef CONFIG_KMOD
ab33a171 391 if (!nl_table[protocol].registered) {
77247bbb 392 netlink_unlock_table();
4fdb3bb7 393 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 394 netlink_lock_table();
4fdb3bb7 395 }
ab33a171
PM
396#endif
397 if (nl_table[protocol].registered &&
398 try_module_get(nl_table[protocol].module))
399 module = nl_table[protocol].module;
400 else
401 err = -EPROTONOSUPPORT;
f7fa9b10 402 groups = nl_table[protocol].groups;
77247bbb 403 netlink_unlock_table();
4fdb3bb7 404
f7fa9b10
PM
405 if (err || (err = __netlink_create(sock, protocol) < 0))
406 goto out_module;
407
408 nlk = nlk_sk(sock->sk);
1da177e4 409
f7fa9b10
PM
410 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL);
411 if (nlk->groups == NULL) {
412 err = -ENOMEM;
ab33a171 413 goto out_module;
f7fa9b10
PM
414 }
415 memset(nlk->groups, 0, NLGRPSZ(groups));
416 nlk->ngroups = groups;
1da177e4 417
f7fa9b10 418 nlk->module = module;
ab33a171
PM
419out:
420 return err;
1da177e4 421
ab33a171
PM
422out_module:
423 module_put(module);
424 goto out;
1da177e4
LT
425}
426
427static int netlink_release(struct socket *sock)
428{
429 struct sock *sk = sock->sk;
430 struct netlink_sock *nlk;
431
432 if (!sk)
433 return 0;
434
435 netlink_remove(sk);
436 nlk = nlk_sk(sk);
437
438 spin_lock(&nlk->cb_lock);
439 if (nlk->cb) {
440 nlk->cb->done(nlk->cb);
441 netlink_destroy_callback(nlk->cb);
442 nlk->cb = NULL;
1da177e4
LT
443 }
444 spin_unlock(&nlk->cb_lock);
445
446 /* OK. Socket is unlinked, and, therefore,
447 no new packets will arrive */
448
449 sock_orphan(sk);
450 sock->sk = NULL;
451 wake_up_interruptible_all(&nlk->wait);
452
453 skb_queue_purge(&sk->sk_write_queue);
454
f7fa9b10 455 if (nlk->pid && !nlk->subscriptions) {
1da177e4
LT
456 struct netlink_notify n = {
457 .protocol = sk->sk_protocol,
458 .pid = nlk->pid,
459 };
460 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
461 }
4fdb3bb7 462
77247bbb
PM
463 if (nlk->module)
464 module_put(nlk->module);
4fdb3bb7 465
77247bbb 466 if (nlk->flags & NETLINK_KERNEL_SOCKET) {
4fdb3bb7 467 netlink_table_grab();
77247bbb 468 nl_table[sk->sk_protocol].module = NULL;
ab33a171 469 nl_table[sk->sk_protocol].registered = 0;
4fdb3bb7
HW
470 netlink_table_ungrab();
471 }
77247bbb 472
f7fa9b10
PM
473 kfree(nlk->groups);
474 nlk->groups = NULL;
475
1da177e4
LT
476 sock_put(sk);
477 return 0;
478}
479
480static int netlink_autobind(struct socket *sock)
481{
482 struct sock *sk = sock->sk;
483 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
484 struct hlist_head *head;
485 struct sock *osk;
486 struct hlist_node *node;
487 s32 pid = current->pid;
488 int err;
489 static s32 rover = -4097;
490
491retry:
492 cond_resched();
493 netlink_table_grab();
494 head = nl_pid_hashfn(hash, pid);
495 sk_for_each(osk, node, head) {
496 if (nlk_sk(osk)->pid == pid) {
497 /* Bind collision, search negative pid values. */
498 pid = rover--;
499 if (rover > -4097)
500 rover = -4097;
501 netlink_table_ungrab();
502 goto retry;
503 }
504 }
505 netlink_table_ungrab();
506
507 err = netlink_insert(sk, pid);
508 if (err == -EADDRINUSE)
509 goto retry;
d470e3b4
DM
510
511 /* If 2 threads race to autobind, that is fine. */
512 if (err == -EBUSY)
513 err = 0;
514
515 return err;
1da177e4
LT
516}
517
518static inline int netlink_capable(struct socket *sock, unsigned int flag)
519{
520 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
521 capable(CAP_NET_ADMIN);
522}
523
f7fa9b10
PM
524static void
525netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
526{
527 struct netlink_sock *nlk = nlk_sk(sk);
528
529 if (nlk->subscriptions && !subscriptions)
530 __sk_del_bind_node(sk);
531 else if (!nlk->subscriptions && subscriptions)
532 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
533 nlk->subscriptions = subscriptions;
534}
535
1da177e4
LT
536static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
537{
538 struct sock *sk = sock->sk;
539 struct netlink_sock *nlk = nlk_sk(sk);
540 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
541 int err;
542
543 if (nladdr->nl_family != AF_NETLINK)
544 return -EINVAL;
545
546 /* Only superuser is allowed to listen multicasts */
547 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
548 return -EPERM;
549
550 if (nlk->pid) {
551 if (nladdr->nl_pid != nlk->pid)
552 return -EINVAL;
553 } else {
554 err = nladdr->nl_pid ?
555 netlink_insert(sk, nladdr->nl_pid) :
556 netlink_autobind(sock);
557 if (err)
558 return err;
559 }
560
f7fa9b10 561 if (!nladdr->nl_groups && !(u32)nlk->groups[0])
1da177e4
LT
562 return 0;
563
564 netlink_table_grab();
f7fa9b10
PM
565 netlink_update_subscriptions(sk, nlk->subscriptions +
566 hweight32(nladdr->nl_groups) -
567 hweight32(nlk->groups[0]));
568 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups;
1da177e4
LT
569 netlink_table_ungrab();
570
571 return 0;
572}
573
574static int netlink_connect(struct socket *sock, struct sockaddr *addr,
575 int alen, int flags)
576{
577 int err = 0;
578 struct sock *sk = sock->sk;
579 struct netlink_sock *nlk = nlk_sk(sk);
580 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
581
582 if (addr->sa_family == AF_UNSPEC) {
583 sk->sk_state = NETLINK_UNCONNECTED;
584 nlk->dst_pid = 0;
d629b836 585 nlk->dst_group = 0;
1da177e4
LT
586 return 0;
587 }
588 if (addr->sa_family != AF_NETLINK)
589 return -EINVAL;
590
591 /* Only superuser is allowed to send multicasts */
592 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
593 return -EPERM;
594
595 if (!nlk->pid)
596 err = netlink_autobind(sock);
597
598 if (err == 0) {
599 sk->sk_state = NETLINK_CONNECTED;
600 nlk->dst_pid = nladdr->nl_pid;
d629b836 601 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
602 }
603
604 return err;
605}
606
607static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
608{
609 struct sock *sk = sock->sk;
610 struct netlink_sock *nlk = nlk_sk(sk);
611 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
612
613 nladdr->nl_family = AF_NETLINK;
614 nladdr->nl_pad = 0;
615 *addr_len = sizeof(*nladdr);
616
617 if (peer) {
618 nladdr->nl_pid = nlk->dst_pid;
d629b836 619 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4
LT
620 } else {
621 nladdr->nl_pid = nlk->pid;
f7fa9b10 622 nladdr->nl_groups = nlk->groups[0];
1da177e4
LT
623 }
624 return 0;
625}
626
627static void netlink_overrun(struct sock *sk)
628{
629 if (!test_and_set_bit(0, &nlk_sk(sk)->state)) {
630 sk->sk_err = ENOBUFS;
631 sk->sk_error_report(sk);
632 }
633}
634
635static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
636{
637 int protocol = ssk->sk_protocol;
638 struct sock *sock;
639 struct netlink_sock *nlk;
640
641 sock = netlink_lookup(protocol, pid);
642 if (!sock)
643 return ERR_PTR(-ECONNREFUSED);
644
645 /* Don't bother queuing skb if kernel socket has no input function */
646 nlk = nlk_sk(sock);
647 if ((nlk->pid == 0 && !nlk->data_ready) ||
648 (sock->sk_state == NETLINK_CONNECTED &&
649 nlk->dst_pid != nlk_sk(ssk)->pid)) {
650 sock_put(sock);
651 return ERR_PTR(-ECONNREFUSED);
652 }
653 return sock;
654}
655
656struct sock *netlink_getsockbyfilp(struct file *filp)
657{
658 struct inode *inode = filp->f_dentry->d_inode;
659 struct sock *sock;
660
661 if (!S_ISSOCK(inode->i_mode))
662 return ERR_PTR(-ENOTSOCK);
663
664 sock = SOCKET_I(inode)->sk;
665 if (sock->sk_family != AF_NETLINK)
666 return ERR_PTR(-EINVAL);
667
668 sock_hold(sock);
669 return sock;
670}
671
672/*
673 * Attach a skb to a netlink socket.
674 * The caller must hold a reference to the destination socket. On error, the
675 * reference is dropped. The skb is not send to the destination, just all
676 * all error checks are performed and memory in the queue is reserved.
677 * Return values:
678 * < 0: error. skb freed, reference to sock dropped.
679 * 0: continue
680 * 1: repeat lookup - reference dropped while waiting for socket memory.
681 */
682int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
683{
684 struct netlink_sock *nlk;
685
686 nlk = nlk_sk(sk);
687
688 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
689 test_bit(0, &nlk->state)) {
690 DECLARE_WAITQUEUE(wait, current);
691 if (!timeo) {
692 if (!nlk->pid)
693 netlink_overrun(sk);
694 sock_put(sk);
695 kfree_skb(skb);
696 return -EAGAIN;
697 }
698
699 __set_current_state(TASK_INTERRUPTIBLE);
700 add_wait_queue(&nlk->wait, &wait);
701
702 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
703 test_bit(0, &nlk->state)) &&
704 !sock_flag(sk, SOCK_DEAD))
705 timeo = schedule_timeout(timeo);
706
707 __set_current_state(TASK_RUNNING);
708 remove_wait_queue(&nlk->wait, &wait);
709 sock_put(sk);
710
711 if (signal_pending(current)) {
712 kfree_skb(skb);
713 return sock_intr_errno(timeo);
714 }
715 return 1;
716 }
717 skb_set_owner_r(skb, sk);
718 return 0;
719}
720
721int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
722{
723 struct netlink_sock *nlk;
724 int len = skb->len;
725
726 nlk = nlk_sk(sk);
727
728 skb_queue_tail(&sk->sk_receive_queue, skb);
729 sk->sk_data_ready(sk, len);
730 sock_put(sk);
731 return len;
732}
733
734void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
735{
736 kfree_skb(skb);
737 sock_put(sk);
738}
739
37da647d
VF
740static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
741 unsigned int __nocast allocation)
1da177e4
LT
742{
743 int delta;
744
745 skb_orphan(skb);
746
747 delta = skb->end - skb->tail;
748 if (delta * 2 < skb->truesize)
749 return skb;
750
751 if (skb_shared(skb)) {
752 struct sk_buff *nskb = skb_clone(skb, allocation);
753 if (!nskb)
754 return skb;
755 kfree_skb(skb);
756 skb = nskb;
757 }
758
759 if (!pskb_expand_head(skb, 0, -delta, allocation))
760 skb->truesize -= delta;
761
762 return skb;
763}
764
765int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
766{
767 struct sock *sk;
768 int err;
769 long timeo;
770
771 skb = netlink_trim(skb, gfp_any());
772
773 timeo = sock_sndtimeo(ssk, nonblock);
774retry:
775 sk = netlink_getsockbypid(ssk, pid);
776 if (IS_ERR(sk)) {
777 kfree_skb(skb);
778 return PTR_ERR(sk);
779 }
780 err = netlink_attachskb(sk, skb, nonblock, timeo);
781 if (err == 1)
782 goto retry;
783 if (err)
784 return err;
785
786 return netlink_sendskb(sk, skb, ssk->sk_protocol);
787}
788
789static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
790{
791 struct netlink_sock *nlk = nlk_sk(sk);
792
793 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
794 !test_bit(0, &nlk->state)) {
795 skb_set_owner_r(skb, sk);
796 skb_queue_tail(&sk->sk_receive_queue, skb);
797 sk->sk_data_ready(sk, skb->len);
798 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
799 }
800 return -1;
801}
802
803struct netlink_broadcast_data {
804 struct sock *exclude_sk;
805 u32 pid;
806 u32 group;
807 int failure;
808 int congested;
809 int delivered;
37da647d 810 unsigned int allocation;
1da177e4
LT
811 struct sk_buff *skb, *skb2;
812};
813
814static inline int do_one_broadcast(struct sock *sk,
815 struct netlink_broadcast_data *p)
816{
817 struct netlink_sock *nlk = nlk_sk(sk);
818 int val;
819
820 if (p->exclude_sk == sk)
821 goto out;
822
f7fa9b10
PM
823 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
824 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
825 goto out;
826
827 if (p->failure) {
828 netlink_overrun(sk);
829 goto out;
830 }
831
832 sock_hold(sk);
833 if (p->skb2 == NULL) {
68acc024 834 if (skb_shared(p->skb)) {
1da177e4
LT
835 p->skb2 = skb_clone(p->skb, p->allocation);
836 } else {
68acc024
TC
837 p->skb2 = skb_get(p->skb);
838 /*
839 * skb ownership may have been set when
840 * delivered to a previous socket.
841 */
842 skb_orphan(p->skb2);
1da177e4
LT
843 }
844 }
845 if (p->skb2 == NULL) {
846 netlink_overrun(sk);
847 /* Clone failed. Notify ALL listeners. */
848 p->failure = 1;
849 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
850 netlink_overrun(sk);
851 } else {
852 p->congested |= val;
853 p->delivered = 1;
854 p->skb2 = NULL;
855 }
856 sock_put(sk);
857
858out:
859 return 0;
860}
861
862int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
863 u32 group, int allocation)
864{
865 struct netlink_broadcast_data info;
866 struct hlist_node *node;
867 struct sock *sk;
868
869 skb = netlink_trim(skb, allocation);
870
871 info.exclude_sk = ssk;
872 info.pid = pid;
873 info.group = group;
874 info.failure = 0;
875 info.congested = 0;
876 info.delivered = 0;
877 info.allocation = allocation;
878 info.skb = skb;
879 info.skb2 = NULL;
880
881 /* While we sleep in clone, do not allow to change socket list */
882
883 netlink_lock_table();
884
885 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
886 do_one_broadcast(sk, &info);
887
aa1c6a6f
TC
888 kfree_skb(skb);
889
1da177e4
LT
890 netlink_unlock_table();
891
892 if (info.skb2)
893 kfree_skb(info.skb2);
1da177e4
LT
894
895 if (info.delivered) {
896 if (info.congested && (allocation & __GFP_WAIT))
897 yield();
898 return 0;
899 }
900 if (info.failure)
901 return -ENOBUFS;
902 return -ESRCH;
903}
904
905struct netlink_set_err_data {
906 struct sock *exclude_sk;
907 u32 pid;
908 u32 group;
909 int code;
910};
911
912static inline int do_one_set_err(struct sock *sk,
913 struct netlink_set_err_data *p)
914{
915 struct netlink_sock *nlk = nlk_sk(sk);
916
917 if (sk == p->exclude_sk)
918 goto out;
919
f7fa9b10
PM
920 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
921 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
922 goto out;
923
924 sk->sk_err = p->code;
925 sk->sk_error_report(sk);
926out:
927 return 0;
928}
929
930void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
931{
932 struct netlink_set_err_data info;
933 struct hlist_node *node;
934 struct sock *sk;
935
936 info.exclude_sk = ssk;
937 info.pid = pid;
938 info.group = group;
939 info.code = code;
940
941 read_lock(&nl_table_lock);
942
943 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
944 do_one_set_err(sk, &info);
945
946 read_unlock(&nl_table_lock);
947}
948
949static inline void netlink_rcv_wake(struct sock *sk)
950{
951 struct netlink_sock *nlk = nlk_sk(sk);
952
b03efcfb 953 if (skb_queue_empty(&sk->sk_receive_queue))
1da177e4
LT
954 clear_bit(0, &nlk->state);
955 if (!test_bit(0, &nlk->state))
956 wake_up_interruptible(&nlk->wait);
957}
958
959static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
960 struct msghdr *msg, size_t len)
961{
962 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
963 struct sock *sk = sock->sk;
964 struct netlink_sock *nlk = nlk_sk(sk);
965 struct sockaddr_nl *addr=msg->msg_name;
966 u32 dst_pid;
d629b836 967 u32 dst_group;
1da177e4
LT
968 struct sk_buff *skb;
969 int err;
970 struct scm_cookie scm;
971
972 if (msg->msg_flags&MSG_OOB)
973 return -EOPNOTSUPP;
974
975 if (NULL == siocb->scm)
976 siocb->scm = &scm;
977 err = scm_send(sock, msg, siocb->scm);
978 if (err < 0)
979 return err;
980
981 if (msg->msg_namelen) {
982 if (addr->nl_family != AF_NETLINK)
983 return -EINVAL;
984 dst_pid = addr->nl_pid;
d629b836
PM
985 dst_group = ffs(addr->nl_groups);
986 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1da177e4
LT
987 return -EPERM;
988 } else {
989 dst_pid = nlk->dst_pid;
d629b836 990 dst_group = nlk->dst_group;
1da177e4
LT
991 }
992
993 if (!nlk->pid) {
994 err = netlink_autobind(sock);
995 if (err)
996 goto out;
997 }
998
999 err = -EMSGSIZE;
1000 if (len > sk->sk_sndbuf - 32)
1001 goto out;
1002 err = -ENOBUFS;
1003 skb = alloc_skb(len, GFP_KERNEL);
1004 if (skb==NULL)
1005 goto out;
1006
1007 NETLINK_CB(skb).pid = nlk->pid;
1da177e4 1008 NETLINK_CB(skb).dst_pid = dst_pid;
d629b836 1009 NETLINK_CB(skb).dst_group = dst_group;
c94c257c 1010 NETLINK_CB(skb).loginuid = audit_get_loginuid(current->audit_context);
1da177e4
LT
1011 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1012
1013 /* What can I do? Netlink is asynchronous, so that
1014 we will have to save current capabilities to
1015 check them, when this message will be delivered
1016 to corresponding kernel module. --ANK (980802)
1017 */
1018
1019 err = -EFAULT;
1020 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
1021 kfree_skb(skb);
1022 goto out;
1023 }
1024
1025 err = security_netlink_send(sk, skb);
1026 if (err) {
1027 kfree_skb(skb);
1028 goto out;
1029 }
1030
d629b836 1031 if (dst_group) {
1da177e4 1032 atomic_inc(&skb->users);
d629b836 1033 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1da177e4
LT
1034 }
1035 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1036
1037out:
1038 return err;
1039}
1040
1041static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1042 struct msghdr *msg, size_t len,
1043 int flags)
1044{
1045 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1046 struct scm_cookie scm;
1047 struct sock *sk = sock->sk;
1048 struct netlink_sock *nlk = nlk_sk(sk);
1049 int noblock = flags&MSG_DONTWAIT;
1050 size_t copied;
1051 struct sk_buff *skb;
1052 int err;
1053
1054 if (flags&MSG_OOB)
1055 return -EOPNOTSUPP;
1056
1057 copied = 0;
1058
1059 skb = skb_recv_datagram(sk,flags,noblock,&err);
1060 if (skb==NULL)
1061 goto out;
1062
1063 msg->msg_namelen = 0;
1064
1065 copied = skb->len;
1066 if (len < copied) {
1067 msg->msg_flags |= MSG_TRUNC;
1068 copied = len;
1069 }
1070
1071 skb->h.raw = skb->data;
1072 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1073
1074 if (msg->msg_name) {
1075 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
1076 addr->nl_family = AF_NETLINK;
1077 addr->nl_pad = 0;
1078 addr->nl_pid = NETLINK_CB(skb).pid;
d629b836 1079 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
1080 msg->msg_namelen = sizeof(*addr);
1081 }
1082
1083 if (NULL == siocb->scm) {
1084 memset(&scm, 0, sizeof(scm));
1085 siocb->scm = &scm;
1086 }
1087 siocb->scm->creds = *NETLINK_CREDS(skb);
1088 skb_free_datagram(sk, skb);
1089
1090 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
1091 netlink_dump(sk);
1092
1093 scm_recv(sock, msg, siocb->scm, flags);
1094
1095out:
1096 netlink_rcv_wake(sk);
1097 return err ? : copied;
1098}
1099
1100static void netlink_data_ready(struct sock *sk, int len)
1101{
1102 struct netlink_sock *nlk = nlk_sk(sk);
1103
1104 if (nlk->data_ready)
1105 nlk->data_ready(sk, len);
1106 netlink_rcv_wake(sk);
1107}
1108
1109/*
1110 * We export these functions to other modules. They provide a
1111 * complete set of kernel non-blocking support for message
1112 * queueing.
1113 */
1114
1115struct sock *
4fdb3bb7 1116netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len), struct module *module)
1da177e4
LT
1117{
1118 struct socket *sock;
1119 struct sock *sk;
77247bbb 1120 struct netlink_sock *nlk;
1da177e4
LT
1121
1122 if (!nl_table)
1123 return NULL;
1124
1125 if (unit<0 || unit>=MAX_LINKS)
1126 return NULL;
1127
1128 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
1129 return NULL;
1130
ab33a171 1131 if (__netlink_create(sock, unit) < 0)
77247bbb 1132 goto out_sock_release;
4fdb3bb7 1133
1da177e4
LT
1134 sk = sock->sk;
1135 sk->sk_data_ready = netlink_data_ready;
1136 if (input)
1137 nlk_sk(sk)->data_ready = input;
1138
77247bbb
PM
1139 if (netlink_insert(sk, 0))
1140 goto out_sock_release;
4fdb3bb7 1141
77247bbb
PM
1142 nlk = nlk_sk(sk);
1143 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 1144
4fdb3bb7 1145 netlink_table_grab();
f7fa9b10 1146 nl_table[unit].groups = 32;
77247bbb 1147 nl_table[unit].module = module;
ab33a171 1148 nl_table[unit].registered = 1;
4fdb3bb7 1149 netlink_table_ungrab();
77247bbb
PM
1150
1151 return sk;
1152
4fdb3bb7
HW
1153out_sock_release:
1154 sock_release(sock);
77247bbb 1155 return NULL;
1da177e4
LT
1156}
1157
1158void netlink_set_nonroot(int protocol, unsigned int flags)
1159{
1160 if ((unsigned int)protocol < MAX_LINKS)
1161 nl_table[protocol].nl_nonroot = flags;
1162}
1163
1164static void netlink_destroy_callback(struct netlink_callback *cb)
1165{
1166 if (cb->skb)
1167 kfree_skb(cb->skb);
1168 kfree(cb);
1169}
1170
1171/*
1172 * It looks a bit ugly.
1173 * It would be better to create kernel thread.
1174 */
1175
1176static int netlink_dump(struct sock *sk)
1177{
1178 struct netlink_sock *nlk = nlk_sk(sk);
1179 struct netlink_callback *cb;
1180 struct sk_buff *skb;
1181 struct nlmsghdr *nlh;
1182 int len;
1183
1184 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1185 if (!skb)
1186 return -ENOBUFS;
1187
1188 spin_lock(&nlk->cb_lock);
1189
1190 cb = nlk->cb;
1191 if (cb == NULL) {
1192 spin_unlock(&nlk->cb_lock);
1193 kfree_skb(skb);
1194 return -EINVAL;
1195 }
1196
1197 len = cb->dump(skb, cb);
1198
1199 if (len > 0) {
1200 spin_unlock(&nlk->cb_lock);
1201 skb_queue_tail(&sk->sk_receive_queue, skb);
1202 sk->sk_data_ready(sk, len);
1203 return 0;
1204 }
1205
1797754e 1206 nlh = NLMSG_NEW_ANSWER(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
1da177e4
LT
1207 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
1208 skb_queue_tail(&sk->sk_receive_queue, skb);
1209 sk->sk_data_ready(sk, skb->len);
1210
1211 cb->done(cb);
1212 nlk->cb = NULL;
1213 spin_unlock(&nlk->cb_lock);
1214
1215 netlink_destroy_callback(cb);
1da177e4 1216 return 0;
1797754e
TG
1217
1218nlmsg_failure:
1219 return -ENOBUFS;
1da177e4
LT
1220}
1221
1222int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1223 struct nlmsghdr *nlh,
1224 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1225 int (*done)(struct netlink_callback*))
1226{
1227 struct netlink_callback *cb;
1228 struct sock *sk;
1229 struct netlink_sock *nlk;
1230
1231 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1232 if (cb == NULL)
1233 return -ENOBUFS;
1234
1235 memset(cb, 0, sizeof(*cb));
1236 cb->dump = dump;
1237 cb->done = done;
1238 cb->nlh = nlh;
1239 atomic_inc(&skb->users);
1240 cb->skb = skb;
1241
1242 sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid);
1243 if (sk == NULL) {
1244 netlink_destroy_callback(cb);
1245 return -ECONNREFUSED;
1246 }
1247 nlk = nlk_sk(sk);
1248 /* A dump is in progress... */
1249 spin_lock(&nlk->cb_lock);
1250 if (nlk->cb) {
1251 spin_unlock(&nlk->cb_lock);
1252 netlink_destroy_callback(cb);
1253 sock_put(sk);
1254 return -EBUSY;
1255 }
1256 nlk->cb = cb;
1da177e4
LT
1257 spin_unlock(&nlk->cb_lock);
1258
1259 netlink_dump(sk);
1260 sock_put(sk);
1261 return 0;
1262}
1263
1264void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1265{
1266 struct sk_buff *skb;
1267 struct nlmsghdr *rep;
1268 struct nlmsgerr *errmsg;
1269 int size;
1270
1271 if (err == 0)
1272 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1273 else
1274 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1275
1276 skb = alloc_skb(size, GFP_KERNEL);
1277 if (!skb) {
1278 struct sock *sk;
1279
1280 sk = netlink_lookup(in_skb->sk->sk_protocol,
1281 NETLINK_CB(in_skb).pid);
1282 if (sk) {
1283 sk->sk_err = ENOBUFS;
1284 sk->sk_error_report(sk);
1285 sock_put(sk);
1286 }
1287 return;
1288 }
1289
1290 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1797754e 1291 NLMSG_ERROR, sizeof(struct nlmsgerr), 0);
1da177e4
LT
1292 errmsg = NLMSG_DATA(rep);
1293 errmsg->error = err;
1294 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1295 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1296}
1297
1298
1299#ifdef CONFIG_PROC_FS
1300struct nl_seq_iter {
1301 int link;
1302 int hash_idx;
1303};
1304
1305static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1306{
1307 struct nl_seq_iter *iter = seq->private;
1308 int i, j;
1309 struct sock *s;
1310 struct hlist_node *node;
1311 loff_t off = 0;
1312
1313 for (i=0; i<MAX_LINKS; i++) {
1314 struct nl_pid_hash *hash = &nl_table[i].hash;
1315
1316 for (j = 0; j <= hash->mask; j++) {
1317 sk_for_each(s, node, &hash->table[j]) {
1318 if (off == pos) {
1319 iter->link = i;
1320 iter->hash_idx = j;
1321 return s;
1322 }
1323 ++off;
1324 }
1325 }
1326 }
1327 return NULL;
1328}
1329
1330static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
1331{
1332 read_lock(&nl_table_lock);
1333 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1334}
1335
1336static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1337{
1338 struct sock *s;
1339 struct nl_seq_iter *iter;
1340 int i, j;
1341
1342 ++*pos;
1343
1344 if (v == SEQ_START_TOKEN)
1345 return netlink_seq_socket_idx(seq, 0);
1346
1347 s = sk_next(v);
1348 if (s)
1349 return s;
1350
1351 iter = seq->private;
1352 i = iter->link;
1353 j = iter->hash_idx + 1;
1354
1355 do {
1356 struct nl_pid_hash *hash = &nl_table[i].hash;
1357
1358 for (; j <= hash->mask; j++) {
1359 s = sk_head(&hash->table[j]);
1360 if (s) {
1361 iter->link = i;
1362 iter->hash_idx = j;
1363 return s;
1364 }
1365 }
1366
1367 j = 0;
1368 } while (++i < MAX_LINKS);
1369
1370 return NULL;
1371}
1372
1373static void netlink_seq_stop(struct seq_file *seq, void *v)
1374{
1375 read_unlock(&nl_table_lock);
1376}
1377
1378
1379static int netlink_seq_show(struct seq_file *seq, void *v)
1380{
1381 if (v == SEQ_START_TOKEN)
1382 seq_puts(seq,
1383 "sk Eth Pid Groups "
1384 "Rmem Wmem Dump Locks\n");
1385 else {
1386 struct sock *s = v;
1387 struct netlink_sock *nlk = nlk_sk(s);
1388
1389 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n",
1390 s,
1391 s->sk_protocol,
1392 nlk->pid,
f7fa9b10
PM
1393 nlk->flags & NETLINK_KERNEL_SOCKET ?
1394 0 : (unsigned int)nlk->groups[0],
1da177e4
LT
1395 atomic_read(&s->sk_rmem_alloc),
1396 atomic_read(&s->sk_wmem_alloc),
1397 nlk->cb,
1398 atomic_read(&s->sk_refcnt)
1399 );
1400
1401 }
1402 return 0;
1403}
1404
1405static struct seq_operations netlink_seq_ops = {
1406 .start = netlink_seq_start,
1407 .next = netlink_seq_next,
1408 .stop = netlink_seq_stop,
1409 .show = netlink_seq_show,
1410};
1411
1412
1413static int netlink_seq_open(struct inode *inode, struct file *file)
1414{
1415 struct seq_file *seq;
1416 struct nl_seq_iter *iter;
1417 int err;
1418
1419 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1420 if (!iter)
1421 return -ENOMEM;
1422
1423 err = seq_open(file, &netlink_seq_ops);
1424 if (err) {
1425 kfree(iter);
1426 return err;
1427 }
1428
1429 memset(iter, 0, sizeof(*iter));
1430 seq = file->private_data;
1431 seq->private = iter;
1432 return 0;
1433}
1434
1435static struct file_operations netlink_seq_fops = {
1436 .owner = THIS_MODULE,
1437 .open = netlink_seq_open,
1438 .read = seq_read,
1439 .llseek = seq_lseek,
1440 .release = seq_release_private,
1441};
1442
1443#endif
1444
1445int netlink_register_notifier(struct notifier_block *nb)
1446{
1447 return notifier_chain_register(&netlink_chain, nb);
1448}
1449
1450int netlink_unregister_notifier(struct notifier_block *nb)
1451{
1452 return notifier_chain_unregister(&netlink_chain, nb);
1453}
1454
1455static struct proto_ops netlink_ops = {
1456 .family = PF_NETLINK,
1457 .owner = THIS_MODULE,
1458 .release = netlink_release,
1459 .bind = netlink_bind,
1460 .connect = netlink_connect,
1461 .socketpair = sock_no_socketpair,
1462 .accept = sock_no_accept,
1463 .getname = netlink_getname,
1464 .poll = datagram_poll,
1465 .ioctl = sock_no_ioctl,
1466 .listen = sock_no_listen,
1467 .shutdown = sock_no_shutdown,
1468 .setsockopt = sock_no_setsockopt,
1469 .getsockopt = sock_no_getsockopt,
1470 .sendmsg = netlink_sendmsg,
1471 .recvmsg = netlink_recvmsg,
1472 .mmap = sock_no_mmap,
1473 .sendpage = sock_no_sendpage,
1474};
1475
1476static struct net_proto_family netlink_family_ops = {
1477 .family = PF_NETLINK,
1478 .create = netlink_create,
1479 .owner = THIS_MODULE, /* for consistency 8) */
1480};
1481
1482extern void netlink_skb_parms_too_large(void);
1483
1484static int __init netlink_proto_init(void)
1485{
1486 struct sk_buff *dummy_skb;
1487 int i;
1488 unsigned long max;
1489 unsigned int order;
1490 int err = proto_register(&netlink_proto, 0);
1491
1492 if (err != 0)
1493 goto out;
1494
1495 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1496 netlink_skb_parms_too_large();
1497
1498 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1499 if (!nl_table) {
1500enomem:
1501 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1502 return -ENOMEM;
1503 }
1504
1505 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1506
1507 if (num_physpages >= (128 * 1024))
1508 max = num_physpages >> (21 - PAGE_SHIFT);
1509 else
1510 max = num_physpages >> (23 - PAGE_SHIFT);
1511
1512 order = get_bitmask_order(max) - 1 + PAGE_SHIFT;
1513 max = (1UL << order) / sizeof(struct hlist_head);
1514 order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1;
1515
1516 for (i = 0; i < MAX_LINKS; i++) {
1517 struct nl_pid_hash *hash = &nl_table[i].hash;
1518
1519 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1520 if (!hash->table) {
1521 while (i-- > 0)
1522 nl_pid_hash_free(nl_table[i].hash.table,
1523 1 * sizeof(*hash->table));
1524 kfree(nl_table);
1525 goto enomem;
1526 }
1527 memset(hash->table, 0, 1 * sizeof(*hash->table));
1528 hash->max_shift = order;
1529 hash->shift = 0;
1530 hash->mask = 0;
1531 hash->rehash_time = jiffies;
1532 }
1533
1534 sock_register(&netlink_family_ops);
1535#ifdef CONFIG_PROC_FS
1536 proc_net_fops_create("netlink", 0, &netlink_seq_fops);
1537#endif
1538 /* The netlink device handler may be needed early. */
1539 rtnetlink_init();
1540out:
1541 return err;
1542}
1543
1da177e4 1544core_initcall(netlink_proto_init);
1da177e4
LT
1545
1546EXPORT_SYMBOL(netlink_ack);
1547EXPORT_SYMBOL(netlink_broadcast);
1548EXPORT_SYMBOL(netlink_dump_start);
1549EXPORT_SYMBOL(netlink_kernel_create);
1550EXPORT_SYMBOL(netlink_register_notifier);
1551EXPORT_SYMBOL(netlink_set_err);
1552EXPORT_SYMBOL(netlink_set_nonroot);
1553EXPORT_SYMBOL(netlink_unicast);
1554EXPORT_SYMBOL(netlink_unregister_notifier);
1555
This page took 0.129751 seconds and 5 git commands to generate.