Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[deliverable/linux.git] / net / ipv4 / netfilter / ip_queue.c
1 /*
2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
4 *
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/ip.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/security.h>
26 #include <linux/mutex.h>
27 #include <net/sock.h>
28 #include <net/route.h>
29
30 #define IPQ_QMAX_DEFAULT 1024
31 #define IPQ_PROC_FS_NAME "ip_queue"
32 #define NET_IPQ_QMAX 2088
33 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
34
35 struct ipq_queue_entry {
36 struct list_head list;
37 struct nf_info *info;
38 struct sk_buff *skb;
39 };
40
41 typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
42
43 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45 static DEFINE_RWLOCK(queue_lock);
46 static int peer_pid __read_mostly;
47 static unsigned int copy_range __read_mostly;
48 static unsigned int queue_total;
49 static unsigned int queue_dropped = 0;
50 static unsigned int queue_user_dropped = 0;
51 static struct sock *ipqnl __read_mostly;
52 static LIST_HEAD(queue_list);
53 static DEFINE_MUTEX(ipqnl_mutex);
54
55 static void
56 ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
57 {
58 /* TCP input path (and probably other bits) assume to be called
59 * from softirq context, not from syscall, like ipq_issue_verdict is
60 * called. TCP input path deadlocks with locks taken from timer
61 * softirq, e.g. We therefore emulate this by local_bh_disable() */
62
63 local_bh_disable();
64 nf_reinject(entry->skb, entry->info, verdict);
65 local_bh_enable();
66
67 kfree(entry);
68 }
69
70 static inline void
71 __ipq_enqueue_entry(struct ipq_queue_entry *entry)
72 {
73 list_add(&entry->list, &queue_list);
74 queue_total++;
75 }
76
77 /*
78 * Find and return a queued entry matched by cmpfn, or return the last
79 * entry if cmpfn is NULL.
80 */
81 static inline struct ipq_queue_entry *
82 __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data)
83 {
84 struct list_head *p;
85
86 list_for_each_prev(p, &queue_list) {
87 struct ipq_queue_entry *entry = (struct ipq_queue_entry *)p;
88
89 if (!cmpfn || cmpfn(entry, data))
90 return entry;
91 }
92 return NULL;
93 }
94
95 static inline void
96 __ipq_dequeue_entry(struct ipq_queue_entry *entry)
97 {
98 list_del(&entry->list);
99 queue_total--;
100 }
101
102 static inline struct ipq_queue_entry *
103 __ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
104 {
105 struct ipq_queue_entry *entry;
106
107 entry = __ipq_find_entry(cmpfn, data);
108 if (entry == NULL)
109 return NULL;
110
111 __ipq_dequeue_entry(entry);
112 return entry;
113 }
114
115
116 static inline void
117 __ipq_flush(int verdict)
118 {
119 struct ipq_queue_entry *entry;
120
121 while ((entry = __ipq_find_dequeue_entry(NULL, 0)))
122 ipq_issue_verdict(entry, verdict);
123 }
124
125 static inline int
126 __ipq_set_mode(unsigned char mode, unsigned int range)
127 {
128 int status = 0;
129
130 switch(mode) {
131 case IPQ_COPY_NONE:
132 case IPQ_COPY_META:
133 copy_mode = mode;
134 copy_range = 0;
135 break;
136
137 case IPQ_COPY_PACKET:
138 copy_mode = mode;
139 copy_range = range;
140 if (copy_range > 0xFFFF)
141 copy_range = 0xFFFF;
142 break;
143
144 default:
145 status = -EINVAL;
146
147 }
148 return status;
149 }
150
151 static inline void
152 __ipq_reset(void)
153 {
154 peer_pid = 0;
155 net_disable_timestamp();
156 __ipq_set_mode(IPQ_COPY_NONE, 0);
157 __ipq_flush(NF_DROP);
158 }
159
160 static struct ipq_queue_entry *
161 ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data)
162 {
163 struct ipq_queue_entry *entry;
164
165 write_lock_bh(&queue_lock);
166 entry = __ipq_find_dequeue_entry(cmpfn, data);
167 write_unlock_bh(&queue_lock);
168 return entry;
169 }
170
171 static void
172 ipq_flush(int verdict)
173 {
174 write_lock_bh(&queue_lock);
175 __ipq_flush(verdict);
176 write_unlock_bh(&queue_lock);
177 }
178
179 static struct sk_buff *
180 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
181 {
182 sk_buff_data_t old_tail;
183 size_t size = 0;
184 size_t data_len = 0;
185 struct sk_buff *skb;
186 struct ipq_packet_msg *pmsg;
187 struct nlmsghdr *nlh;
188 struct timeval tv;
189
190 read_lock_bh(&queue_lock);
191
192 switch (copy_mode) {
193 case IPQ_COPY_META:
194 case IPQ_COPY_NONE:
195 size = NLMSG_SPACE(sizeof(*pmsg));
196 data_len = 0;
197 break;
198
199 case IPQ_COPY_PACKET:
200 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
201 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
202 (*errp = skb_checksum_help(entry->skb))) {
203 read_unlock_bh(&queue_lock);
204 return NULL;
205 }
206 if (copy_range == 0 || copy_range > entry->skb->len)
207 data_len = entry->skb->len;
208 else
209 data_len = copy_range;
210
211 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
212 break;
213
214 default:
215 *errp = -EINVAL;
216 read_unlock_bh(&queue_lock);
217 return NULL;
218 }
219
220 read_unlock_bh(&queue_lock);
221
222 skb = alloc_skb(size, GFP_ATOMIC);
223 if (!skb)
224 goto nlmsg_failure;
225
226 old_tail = skb->tail;
227 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
228 pmsg = NLMSG_DATA(nlh);
229 memset(pmsg, 0, sizeof(*pmsg));
230
231 pmsg->packet_id = (unsigned long )entry;
232 pmsg->data_len = data_len;
233 tv = ktime_to_timeval(entry->skb->tstamp);
234 pmsg->timestamp_sec = tv.tv_sec;
235 pmsg->timestamp_usec = tv.tv_usec;
236 pmsg->mark = entry->skb->mark;
237 pmsg->hook = entry->info->hook;
238 pmsg->hw_protocol = entry->skb->protocol;
239
240 if (entry->info->indev)
241 strcpy(pmsg->indev_name, entry->info->indev->name);
242 else
243 pmsg->indev_name[0] = '\0';
244
245 if (entry->info->outdev)
246 strcpy(pmsg->outdev_name, entry->info->outdev->name);
247 else
248 pmsg->outdev_name[0] = '\0';
249
250 if (entry->info->indev && entry->skb->dev) {
251 pmsg->hw_type = entry->skb->dev->type;
252 if (entry->skb->dev->hard_header_parse)
253 pmsg->hw_addrlen =
254 entry->skb->dev->hard_header_parse(entry->skb,
255 pmsg->hw_addr);
256 }
257
258 if (data_len)
259 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
260 BUG();
261
262 nlh->nlmsg_len = skb->tail - old_tail;
263 return skb;
264
265 nlmsg_failure:
266 if (skb)
267 kfree_skb(skb);
268 *errp = -EINVAL;
269 printk(KERN_ERR "ip_queue: error creating packet message\n");
270 return NULL;
271 }
272
273 static int
274 ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
275 unsigned int queuenum, void *data)
276 {
277 int status = -EINVAL;
278 struct sk_buff *nskb;
279 struct ipq_queue_entry *entry;
280
281 if (copy_mode == IPQ_COPY_NONE)
282 return -EAGAIN;
283
284 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
285 if (entry == NULL) {
286 printk(KERN_ERR "ip_queue: OOM in ipq_enqueue_packet()\n");
287 return -ENOMEM;
288 }
289
290 entry->info = info;
291 entry->skb = skb;
292
293 nskb = ipq_build_packet_message(entry, &status);
294 if (nskb == NULL)
295 goto err_out_free;
296
297 write_lock_bh(&queue_lock);
298
299 if (!peer_pid)
300 goto err_out_free_nskb;
301
302 if (queue_total >= queue_maxlen) {
303 queue_dropped++;
304 status = -ENOSPC;
305 if (net_ratelimit())
306 printk (KERN_WARNING "ip_queue: full at %d entries, "
307 "dropping packets(s). Dropped: %d\n", queue_total,
308 queue_dropped);
309 goto err_out_free_nskb;
310 }
311
312 /* netlink_unicast will either free the nskb or attach it to a socket */
313 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
314 if (status < 0) {
315 queue_user_dropped++;
316 goto err_out_unlock;
317 }
318
319 __ipq_enqueue_entry(entry);
320
321 write_unlock_bh(&queue_lock);
322 return status;
323
324 err_out_free_nskb:
325 kfree_skb(nskb);
326
327 err_out_unlock:
328 write_unlock_bh(&queue_lock);
329
330 err_out_free:
331 kfree(entry);
332 return status;
333 }
334
335 static int
336 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
337 {
338 int diff;
339 struct iphdr *user_iph = (struct iphdr *)v->payload;
340
341 if (v->data_len < sizeof(*user_iph))
342 return 0;
343 diff = v->data_len - e->skb->len;
344 if (diff < 0) {
345 if (pskb_trim(e->skb, v->data_len))
346 return -ENOMEM;
347 } else if (diff > 0) {
348 if (v->data_len > 0xFFFF)
349 return -EINVAL;
350 if (diff > skb_tailroom(e->skb)) {
351 struct sk_buff *newskb;
352
353 newskb = skb_copy_expand(e->skb,
354 skb_headroom(e->skb),
355 diff,
356 GFP_ATOMIC);
357 if (newskb == NULL) {
358 printk(KERN_WARNING "ip_queue: OOM "
359 "in mangle, dropping packet\n");
360 return -ENOMEM;
361 }
362 if (e->skb->sk)
363 skb_set_owner_w(newskb, e->skb->sk);
364 kfree_skb(e->skb);
365 e->skb = newskb;
366 }
367 skb_put(e->skb, diff);
368 }
369 if (!skb_make_writable(&e->skb, v->data_len))
370 return -ENOMEM;
371 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
372 e->skb->ip_summed = CHECKSUM_NONE;
373
374 return 0;
375 }
376
377 static inline int
378 id_cmp(struct ipq_queue_entry *e, unsigned long id)
379 {
380 return (id == (unsigned long )e);
381 }
382
383 static int
384 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
385 {
386 struct ipq_queue_entry *entry;
387
388 if (vmsg->value > NF_MAX_VERDICT)
389 return -EINVAL;
390
391 entry = ipq_find_dequeue_entry(id_cmp, vmsg->id);
392 if (entry == NULL)
393 return -ENOENT;
394 else {
395 int verdict = vmsg->value;
396
397 if (vmsg->data_len && vmsg->data_len == len)
398 if (ipq_mangle_ipv4(vmsg, entry) < 0)
399 verdict = NF_DROP;
400
401 ipq_issue_verdict(entry, verdict);
402 return 0;
403 }
404 }
405
406 static int
407 ipq_set_mode(unsigned char mode, unsigned int range)
408 {
409 int status;
410
411 write_lock_bh(&queue_lock);
412 status = __ipq_set_mode(mode, range);
413 write_unlock_bh(&queue_lock);
414 return status;
415 }
416
417 static int
418 ipq_receive_peer(struct ipq_peer_msg *pmsg,
419 unsigned char type, unsigned int len)
420 {
421 int status = 0;
422
423 if (len < sizeof(*pmsg))
424 return -EINVAL;
425
426 switch (type) {
427 case IPQM_MODE:
428 status = ipq_set_mode(pmsg->msg.mode.value,
429 pmsg->msg.mode.range);
430 break;
431
432 case IPQM_VERDICT:
433 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
434 status = -EINVAL;
435 else
436 status = ipq_set_verdict(&pmsg->msg.verdict,
437 len - sizeof(*pmsg));
438 break;
439 default:
440 status = -EINVAL;
441 }
442 return status;
443 }
444
445 static int
446 dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex)
447 {
448 if (entry->info->indev)
449 if (entry->info->indev->ifindex == ifindex)
450 return 1;
451 if (entry->info->outdev)
452 if (entry->info->outdev->ifindex == ifindex)
453 return 1;
454 #ifdef CONFIG_BRIDGE_NETFILTER
455 if (entry->skb->nf_bridge) {
456 if (entry->skb->nf_bridge->physindev &&
457 entry->skb->nf_bridge->physindev->ifindex == ifindex)
458 return 1;
459 if (entry->skb->nf_bridge->physoutdev &&
460 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
461 return 1;
462 }
463 #endif
464 return 0;
465 }
466
467 static void
468 ipq_dev_drop(int ifindex)
469 {
470 struct ipq_queue_entry *entry;
471
472 while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL)
473 ipq_issue_verdict(entry, NF_DROP);
474 }
475
476 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
477
478 static inline void
479 ipq_rcv_skb(struct sk_buff *skb)
480 {
481 int status, type, pid, flags, nlmsglen, skblen;
482 struct nlmsghdr *nlh;
483
484 skblen = skb->len;
485 if (skblen < sizeof(*nlh))
486 return;
487
488 nlh = nlmsg_hdr(skb);
489 nlmsglen = nlh->nlmsg_len;
490 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
491 return;
492
493 pid = nlh->nlmsg_pid;
494 flags = nlh->nlmsg_flags;
495
496 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
497 RCV_SKB_FAIL(-EINVAL);
498
499 if (flags & MSG_TRUNC)
500 RCV_SKB_FAIL(-ECOMM);
501
502 type = nlh->nlmsg_type;
503 if (type < NLMSG_NOOP || type >= IPQM_MAX)
504 RCV_SKB_FAIL(-EINVAL);
505
506 if (type <= IPQM_BASE)
507 return;
508
509 if (security_netlink_recv(skb, CAP_NET_ADMIN))
510 RCV_SKB_FAIL(-EPERM);
511
512 write_lock_bh(&queue_lock);
513
514 if (peer_pid) {
515 if (peer_pid != pid) {
516 write_unlock_bh(&queue_lock);
517 RCV_SKB_FAIL(-EBUSY);
518 }
519 } else {
520 net_enable_timestamp();
521 peer_pid = pid;
522 }
523
524 write_unlock_bh(&queue_lock);
525
526 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
527 nlmsglen - NLMSG_LENGTH(0));
528 if (status < 0)
529 RCV_SKB_FAIL(status);
530
531 if (flags & NLM_F_ACK)
532 netlink_ack(skb, nlh, 0);
533 return;
534 }
535
536 static void
537 ipq_rcv_sk(struct sock *sk, int len)
538 {
539 struct sk_buff *skb;
540 unsigned int qlen;
541
542 mutex_lock(&ipqnl_mutex);
543
544 for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) {
545 skb = skb_dequeue(&sk->sk_receive_queue);
546 ipq_rcv_skb(skb);
547 kfree_skb(skb);
548 }
549
550 mutex_unlock(&ipqnl_mutex);
551 }
552
553 static int
554 ipq_rcv_dev_event(struct notifier_block *this,
555 unsigned long event, void *ptr)
556 {
557 struct net_device *dev = ptr;
558
559 /* Drop any packets associated with the downed device */
560 if (event == NETDEV_DOWN)
561 ipq_dev_drop(dev->ifindex);
562 return NOTIFY_DONE;
563 }
564
565 static struct notifier_block ipq_dev_notifier = {
566 .notifier_call = ipq_rcv_dev_event,
567 };
568
569 static int
570 ipq_rcv_nl_event(struct notifier_block *this,
571 unsigned long event, void *ptr)
572 {
573 struct netlink_notify *n = ptr;
574
575 if (event == NETLINK_URELEASE &&
576 n->protocol == NETLINK_FIREWALL && n->pid) {
577 write_lock_bh(&queue_lock);
578 if (n->pid == peer_pid)
579 __ipq_reset();
580 write_unlock_bh(&queue_lock);
581 }
582 return NOTIFY_DONE;
583 }
584
585 static struct notifier_block ipq_nl_notifier = {
586 .notifier_call = ipq_rcv_nl_event,
587 };
588
589 static struct ctl_table_header *ipq_sysctl_header;
590
591 static ctl_table ipq_table[] = {
592 {
593 .ctl_name = NET_IPQ_QMAX,
594 .procname = NET_IPQ_QMAX_NAME,
595 .data = &queue_maxlen,
596 .maxlen = sizeof(queue_maxlen),
597 .mode = 0644,
598 .proc_handler = proc_dointvec
599 },
600 { .ctl_name = 0 }
601 };
602
603 static ctl_table ipq_dir_table[] = {
604 {
605 .ctl_name = NET_IPV4,
606 .procname = "ipv4",
607 .mode = 0555,
608 .child = ipq_table
609 },
610 { .ctl_name = 0 }
611 };
612
613 static ctl_table ipq_root_table[] = {
614 {
615 .ctl_name = CTL_NET,
616 .procname = "net",
617 .mode = 0555,
618 .child = ipq_dir_table
619 },
620 { .ctl_name = 0 }
621 };
622
623 #ifdef CONFIG_PROC_FS
624 static int
625 ipq_get_info(char *buffer, char **start, off_t offset, int length)
626 {
627 int len;
628
629 read_lock_bh(&queue_lock);
630
631 len = sprintf(buffer,
632 "Peer PID : %d\n"
633 "Copy mode : %hu\n"
634 "Copy range : %u\n"
635 "Queue length : %u\n"
636 "Queue max. length : %u\n"
637 "Queue dropped : %u\n"
638 "Netlink dropped : %u\n",
639 peer_pid,
640 copy_mode,
641 copy_range,
642 queue_total,
643 queue_maxlen,
644 queue_dropped,
645 queue_user_dropped);
646
647 read_unlock_bh(&queue_lock);
648
649 *start = buffer + offset;
650 len -= offset;
651 if (len > length)
652 len = length;
653 else if (len < 0)
654 len = 0;
655 return len;
656 }
657 #endif /* CONFIG_PROC_FS */
658
659 static struct nf_queue_handler nfqh = {
660 .name = "ip_queue",
661 .outfn = &ipq_enqueue_packet,
662 };
663
664 static int __init ip_queue_init(void)
665 {
666 int status = -ENOMEM;
667 struct proc_dir_entry *proc;
668
669 netlink_register_notifier(&ipq_nl_notifier);
670 ipqnl = netlink_kernel_create(NETLINK_FIREWALL, 0, ipq_rcv_sk,
671 NULL, THIS_MODULE);
672 if (ipqnl == NULL) {
673 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
674 goto cleanup_netlink_notifier;
675 }
676
677 proc = proc_net_create(IPQ_PROC_FS_NAME, 0, ipq_get_info);
678 if (proc)
679 proc->owner = THIS_MODULE;
680 else {
681 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
682 goto cleanup_ipqnl;
683 }
684
685 register_netdevice_notifier(&ipq_dev_notifier);
686 ipq_sysctl_header = register_sysctl_table(ipq_root_table);
687
688 status = nf_register_queue_handler(PF_INET, &nfqh);
689 if (status < 0) {
690 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
691 goto cleanup_sysctl;
692 }
693 return status;
694
695 cleanup_sysctl:
696 unregister_sysctl_table(ipq_sysctl_header);
697 unregister_netdevice_notifier(&ipq_dev_notifier);
698 proc_net_remove(IPQ_PROC_FS_NAME);
699
700 cleanup_ipqnl:
701 sock_release(ipqnl->sk_socket);
702 mutex_lock(&ipqnl_mutex);
703 mutex_unlock(&ipqnl_mutex);
704
705 cleanup_netlink_notifier:
706 netlink_unregister_notifier(&ipq_nl_notifier);
707 return status;
708 }
709
710 static void __exit ip_queue_fini(void)
711 {
712 nf_unregister_queue_handlers(&nfqh);
713 synchronize_net();
714 ipq_flush(NF_DROP);
715
716 unregister_sysctl_table(ipq_sysctl_header);
717 unregister_netdevice_notifier(&ipq_dev_notifier);
718 proc_net_remove(IPQ_PROC_FS_NAME);
719
720 sock_release(ipqnl->sk_socket);
721 mutex_lock(&ipqnl_mutex);
722 mutex_unlock(&ipqnl_mutex);
723
724 netlink_unregister_notifier(&ipq_nl_notifier);
725 }
726
727 MODULE_DESCRIPTION("IPv4 packet queue handler");
728 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
729 MODULE_LICENSE("GPL");
730
731 module_init(ip_queue_init);
732 module_exit(ip_queue_fini);
This page took 0.052646 seconds and 6 git commands to generate.