Pull percpureserve into release branch
[deliverable/linux.git] / net / ipv4 / netfilter / ip_queue.c
1 /*
2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
4 *
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <linux/ip.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/mutex.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/route.h>
31 #include <net/netfilter/nf_queue.h>
32 #include <net/ip.h>
33
34 #define IPQ_QMAX_DEFAULT 1024
35 #define IPQ_PROC_FS_NAME "ip_queue"
36 #define NET_IPQ_QMAX 2088
37 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
38
39 typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
40
41 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
42 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
43 static DEFINE_RWLOCK(queue_lock);
44 static int peer_pid __read_mostly;
45 static unsigned int copy_range __read_mostly;
46 static unsigned int queue_total;
47 static unsigned int queue_dropped = 0;
48 static unsigned int queue_user_dropped = 0;
49 static struct sock *ipqnl __read_mostly;
50 static LIST_HEAD(queue_list);
51 static DEFINE_MUTEX(ipqnl_mutex);
52
53 static inline void
54 __ipq_enqueue_entry(struct nf_queue_entry *entry)
55 {
56 list_add_tail(&entry->list, &queue_list);
57 queue_total++;
58 }
59
60 static inline int
61 __ipq_set_mode(unsigned char mode, unsigned int range)
62 {
63 int status = 0;
64
65 switch(mode) {
66 case IPQ_COPY_NONE:
67 case IPQ_COPY_META:
68 copy_mode = mode;
69 copy_range = 0;
70 break;
71
72 case IPQ_COPY_PACKET:
73 copy_mode = mode;
74 copy_range = range;
75 if (copy_range > 0xFFFF)
76 copy_range = 0xFFFF;
77 break;
78
79 default:
80 status = -EINVAL;
81
82 }
83 return status;
84 }
85
86 static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
87
88 static inline void
89 __ipq_reset(void)
90 {
91 peer_pid = 0;
92 net_disable_timestamp();
93 __ipq_set_mode(IPQ_COPY_NONE, 0);
94 __ipq_flush(NULL, 0);
95 }
96
97 static struct nf_queue_entry *
98 ipq_find_dequeue_entry(unsigned long id)
99 {
100 struct nf_queue_entry *entry = NULL, *i;
101
102 write_lock_bh(&queue_lock);
103
104 list_for_each_entry(i, &queue_list, list) {
105 if ((unsigned long)i == id) {
106 entry = i;
107 break;
108 }
109 }
110
111 if (entry) {
112 list_del(&entry->list);
113 queue_total--;
114 }
115
116 write_unlock_bh(&queue_lock);
117 return entry;
118 }
119
120 static void
121 __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
122 {
123 struct nf_queue_entry *entry, *next;
124
125 list_for_each_entry_safe(entry, next, &queue_list, list) {
126 if (!cmpfn || cmpfn(entry, data)) {
127 list_del(&entry->list);
128 queue_total--;
129 nf_reinject(entry, NF_DROP);
130 }
131 }
132 }
133
134 static void
135 ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
136 {
137 write_lock_bh(&queue_lock);
138 __ipq_flush(cmpfn, data);
139 write_unlock_bh(&queue_lock);
140 }
141
142 static struct sk_buff *
143 ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
144 {
145 sk_buff_data_t old_tail;
146 size_t size = 0;
147 size_t data_len = 0;
148 struct sk_buff *skb;
149 struct ipq_packet_msg *pmsg;
150 struct nlmsghdr *nlh;
151 struct timeval tv;
152
153 read_lock_bh(&queue_lock);
154
155 switch (copy_mode) {
156 case IPQ_COPY_META:
157 case IPQ_COPY_NONE:
158 size = NLMSG_SPACE(sizeof(*pmsg));
159 data_len = 0;
160 break;
161
162 case IPQ_COPY_PACKET:
163 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
164 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
165 (*errp = skb_checksum_help(entry->skb))) {
166 read_unlock_bh(&queue_lock);
167 return NULL;
168 }
169 if (copy_range == 0 || copy_range > entry->skb->len)
170 data_len = entry->skb->len;
171 else
172 data_len = copy_range;
173
174 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
175 break;
176
177 default:
178 *errp = -EINVAL;
179 read_unlock_bh(&queue_lock);
180 return NULL;
181 }
182
183 read_unlock_bh(&queue_lock);
184
185 skb = alloc_skb(size, GFP_ATOMIC);
186 if (!skb)
187 goto nlmsg_failure;
188
189 old_tail = skb->tail;
190 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
191 pmsg = NLMSG_DATA(nlh);
192 memset(pmsg, 0, sizeof(*pmsg));
193
194 pmsg->packet_id = (unsigned long )entry;
195 pmsg->data_len = data_len;
196 tv = ktime_to_timeval(entry->skb->tstamp);
197 pmsg->timestamp_sec = tv.tv_sec;
198 pmsg->timestamp_usec = tv.tv_usec;
199 pmsg->mark = entry->skb->mark;
200 pmsg->hook = entry->hook;
201 pmsg->hw_protocol = entry->skb->protocol;
202
203 if (entry->indev)
204 strcpy(pmsg->indev_name, entry->indev->name);
205 else
206 pmsg->indev_name[0] = '\0';
207
208 if (entry->outdev)
209 strcpy(pmsg->outdev_name, entry->outdev->name);
210 else
211 pmsg->outdev_name[0] = '\0';
212
213 if (entry->indev && entry->skb->dev) {
214 pmsg->hw_type = entry->skb->dev->type;
215 pmsg->hw_addrlen = dev_parse_header(entry->skb,
216 pmsg->hw_addr);
217 }
218
219 if (data_len)
220 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
221 BUG();
222
223 nlh->nlmsg_len = skb->tail - old_tail;
224 return skb;
225
226 nlmsg_failure:
227 if (skb)
228 kfree_skb(skb);
229 *errp = -EINVAL;
230 printk(KERN_ERR "ip_queue: error creating packet message\n");
231 return NULL;
232 }
233
234 static int
235 ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
236 {
237 int status = -EINVAL;
238 struct sk_buff *nskb;
239
240 if (copy_mode == IPQ_COPY_NONE)
241 return -EAGAIN;
242
243 nskb = ipq_build_packet_message(entry, &status);
244 if (nskb == NULL)
245 return status;
246
247 write_lock_bh(&queue_lock);
248
249 if (!peer_pid)
250 goto err_out_free_nskb;
251
252 if (queue_total >= queue_maxlen) {
253 queue_dropped++;
254 status = -ENOSPC;
255 if (net_ratelimit())
256 printk (KERN_WARNING "ip_queue: full at %d entries, "
257 "dropping packets(s). Dropped: %d\n", queue_total,
258 queue_dropped);
259 goto err_out_free_nskb;
260 }
261
262 /* netlink_unicast will either free the nskb or attach it to a socket */
263 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
264 if (status < 0) {
265 queue_user_dropped++;
266 goto err_out_unlock;
267 }
268
269 __ipq_enqueue_entry(entry);
270
271 write_unlock_bh(&queue_lock);
272 return status;
273
274 err_out_free_nskb:
275 kfree_skb(nskb);
276
277 err_out_unlock:
278 write_unlock_bh(&queue_lock);
279 return status;
280 }
281
282 static int
283 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
284 {
285 int diff;
286 struct iphdr *user_iph = (struct iphdr *)v->payload;
287 struct sk_buff *nskb;
288
289 if (v->data_len < sizeof(*user_iph))
290 return 0;
291 diff = v->data_len - e->skb->len;
292 if (diff < 0) {
293 if (pskb_trim(e->skb, v->data_len))
294 return -ENOMEM;
295 } else if (diff > 0) {
296 if (v->data_len > 0xFFFF)
297 return -EINVAL;
298 if (diff > skb_tailroom(e->skb)) {
299 nskb = skb_copy_expand(e->skb, 0,
300 diff - skb_tailroom(e->skb),
301 GFP_ATOMIC);
302 if (!nskb) {
303 printk(KERN_WARNING "ip_queue: error "
304 "in mangle, dropping packet\n");
305 return -ENOMEM;
306 }
307 kfree_skb(e->skb);
308 e->skb = nskb;
309 }
310 skb_put(e->skb, diff);
311 }
312 if (!skb_make_writable(e->skb, v->data_len))
313 return -ENOMEM;
314 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
315 e->skb->ip_summed = CHECKSUM_NONE;
316
317 return 0;
318 }
319
320 static int
321 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
322 {
323 struct nf_queue_entry *entry;
324
325 if (vmsg->value > NF_MAX_VERDICT)
326 return -EINVAL;
327
328 entry = ipq_find_dequeue_entry(vmsg->id);
329 if (entry == NULL)
330 return -ENOENT;
331 else {
332 int verdict = vmsg->value;
333
334 if (vmsg->data_len && vmsg->data_len == len)
335 if (ipq_mangle_ipv4(vmsg, entry) < 0)
336 verdict = NF_DROP;
337
338 nf_reinject(entry, verdict);
339 return 0;
340 }
341 }
342
343 static int
344 ipq_set_mode(unsigned char mode, unsigned int range)
345 {
346 int status;
347
348 write_lock_bh(&queue_lock);
349 status = __ipq_set_mode(mode, range);
350 write_unlock_bh(&queue_lock);
351 return status;
352 }
353
354 static int
355 ipq_receive_peer(struct ipq_peer_msg *pmsg,
356 unsigned char type, unsigned int len)
357 {
358 int status = 0;
359
360 if (len < sizeof(*pmsg))
361 return -EINVAL;
362
363 switch (type) {
364 case IPQM_MODE:
365 status = ipq_set_mode(pmsg->msg.mode.value,
366 pmsg->msg.mode.range);
367 break;
368
369 case IPQM_VERDICT:
370 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
371 status = -EINVAL;
372 else
373 status = ipq_set_verdict(&pmsg->msg.verdict,
374 len - sizeof(*pmsg));
375 break;
376 default:
377 status = -EINVAL;
378 }
379 return status;
380 }
381
382 static int
383 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
384 {
385 if (entry->indev)
386 if (entry->indev->ifindex == ifindex)
387 return 1;
388 if (entry->outdev)
389 if (entry->outdev->ifindex == ifindex)
390 return 1;
391 #ifdef CONFIG_BRIDGE_NETFILTER
392 if (entry->skb->nf_bridge) {
393 if (entry->skb->nf_bridge->physindev &&
394 entry->skb->nf_bridge->physindev->ifindex == ifindex)
395 return 1;
396 if (entry->skb->nf_bridge->physoutdev &&
397 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
398 return 1;
399 }
400 #endif
401 return 0;
402 }
403
404 static void
405 ipq_dev_drop(int ifindex)
406 {
407 ipq_flush(dev_cmp, ifindex);
408 }
409
410 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
411
412 static inline void
413 __ipq_rcv_skb(struct sk_buff *skb)
414 {
415 int status, type, pid, flags, nlmsglen, skblen;
416 struct nlmsghdr *nlh;
417
418 skblen = skb->len;
419 if (skblen < sizeof(*nlh))
420 return;
421
422 nlh = nlmsg_hdr(skb);
423 nlmsglen = nlh->nlmsg_len;
424 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
425 return;
426
427 pid = nlh->nlmsg_pid;
428 flags = nlh->nlmsg_flags;
429
430 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
431 RCV_SKB_FAIL(-EINVAL);
432
433 if (flags & MSG_TRUNC)
434 RCV_SKB_FAIL(-ECOMM);
435
436 type = nlh->nlmsg_type;
437 if (type < NLMSG_NOOP || type >= IPQM_MAX)
438 RCV_SKB_FAIL(-EINVAL);
439
440 if (type <= IPQM_BASE)
441 return;
442
443 if (security_netlink_recv(skb, CAP_NET_ADMIN))
444 RCV_SKB_FAIL(-EPERM);
445
446 write_lock_bh(&queue_lock);
447
448 if (peer_pid) {
449 if (peer_pid != pid) {
450 write_unlock_bh(&queue_lock);
451 RCV_SKB_FAIL(-EBUSY);
452 }
453 } else {
454 net_enable_timestamp();
455 peer_pid = pid;
456 }
457
458 write_unlock_bh(&queue_lock);
459
460 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
461 nlmsglen - NLMSG_LENGTH(0));
462 if (status < 0)
463 RCV_SKB_FAIL(status);
464
465 if (flags & NLM_F_ACK)
466 netlink_ack(skb, nlh, 0);
467 return;
468 }
469
470 static void
471 ipq_rcv_skb(struct sk_buff *skb)
472 {
473 mutex_lock(&ipqnl_mutex);
474 __ipq_rcv_skb(skb);
475 mutex_unlock(&ipqnl_mutex);
476 }
477
478 static int
479 ipq_rcv_dev_event(struct notifier_block *this,
480 unsigned long event, void *ptr)
481 {
482 struct net_device *dev = ptr;
483
484 if (dev->nd_net != &init_net)
485 return NOTIFY_DONE;
486
487 /* Drop any packets associated with the downed device */
488 if (event == NETDEV_DOWN)
489 ipq_dev_drop(dev->ifindex);
490 return NOTIFY_DONE;
491 }
492
493 static struct notifier_block ipq_dev_notifier = {
494 .notifier_call = ipq_rcv_dev_event,
495 };
496
497 static int
498 ipq_rcv_nl_event(struct notifier_block *this,
499 unsigned long event, void *ptr)
500 {
501 struct netlink_notify *n = ptr;
502
503 if (event == NETLINK_URELEASE &&
504 n->protocol == NETLINK_FIREWALL && n->pid) {
505 write_lock_bh(&queue_lock);
506 if ((n->net == &init_net) && (n->pid == peer_pid))
507 __ipq_reset();
508 write_unlock_bh(&queue_lock);
509 }
510 return NOTIFY_DONE;
511 }
512
513 static struct notifier_block ipq_nl_notifier = {
514 .notifier_call = ipq_rcv_nl_event,
515 };
516
517 #ifdef CONFIG_SYSCTL
518 static struct ctl_table_header *ipq_sysctl_header;
519
520 static ctl_table ipq_table[] = {
521 {
522 .ctl_name = NET_IPQ_QMAX,
523 .procname = NET_IPQ_QMAX_NAME,
524 .data = &queue_maxlen,
525 .maxlen = sizeof(queue_maxlen),
526 .mode = 0644,
527 .proc_handler = proc_dointvec
528 },
529 { .ctl_name = 0 }
530 };
531 #endif
532
533 #ifdef CONFIG_PROC_FS
534 static int ip_queue_show(struct seq_file *m, void *v)
535 {
536 read_lock_bh(&queue_lock);
537
538 seq_printf(m,
539 "Peer PID : %d\n"
540 "Copy mode : %hu\n"
541 "Copy range : %u\n"
542 "Queue length : %u\n"
543 "Queue max. length : %u\n"
544 "Queue dropped : %u\n"
545 "Netlink dropped : %u\n",
546 peer_pid,
547 copy_mode,
548 copy_range,
549 queue_total,
550 queue_maxlen,
551 queue_dropped,
552 queue_user_dropped);
553
554 read_unlock_bh(&queue_lock);
555 return 0;
556 }
557
558 static int ip_queue_open(struct inode *inode, struct file *file)
559 {
560 return single_open(file, ip_queue_show, NULL);
561 }
562
563 static const struct file_operations ip_queue_proc_fops = {
564 .open = ip_queue_open,
565 .read = seq_read,
566 .llseek = seq_lseek,
567 .release = single_release,
568 .owner = THIS_MODULE,
569 };
570 #endif
571
572 static const struct nf_queue_handler nfqh = {
573 .name = "ip_queue",
574 .outfn = &ipq_enqueue_packet,
575 };
576
577 static int __init ip_queue_init(void)
578 {
579 int status = -ENOMEM;
580 struct proc_dir_entry *proc __maybe_unused;
581
582 netlink_register_notifier(&ipq_nl_notifier);
583 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
584 ipq_rcv_skb, NULL, THIS_MODULE);
585 if (ipqnl == NULL) {
586 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
587 goto cleanup_netlink_notifier;
588 }
589
590 #ifdef CONFIG_PROC_FS
591 proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
592 &ip_queue_proc_fops);
593 if (!proc) {
594 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
595 goto cleanup_ipqnl;
596 }
597 #endif
598 register_netdevice_notifier(&ipq_dev_notifier);
599 #ifdef CONFIG_SYSCTL
600 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
601 #endif
602 status = nf_register_queue_handler(PF_INET, &nfqh);
603 if (status < 0) {
604 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
605 goto cleanup_sysctl;
606 }
607 return status;
608
609 cleanup_sysctl:
610 #ifdef CONFIG_SYSCTL
611 unregister_sysctl_table(ipq_sysctl_header);
612 #endif
613 unregister_netdevice_notifier(&ipq_dev_notifier);
614 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
615 cleanup_ipqnl: __maybe_unused
616 netlink_kernel_release(ipqnl);
617 mutex_lock(&ipqnl_mutex);
618 mutex_unlock(&ipqnl_mutex);
619
620 cleanup_netlink_notifier:
621 netlink_unregister_notifier(&ipq_nl_notifier);
622 return status;
623 }
624
625 static void __exit ip_queue_fini(void)
626 {
627 nf_unregister_queue_handlers(&nfqh);
628 synchronize_net();
629 ipq_flush(NULL, 0);
630
631 #ifdef CONFIG_SYSCTL
632 unregister_sysctl_table(ipq_sysctl_header);
633 #endif
634 unregister_netdevice_notifier(&ipq_dev_notifier);
635 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
636
637 netlink_kernel_release(ipqnl);
638 mutex_lock(&ipqnl_mutex);
639 mutex_unlock(&ipqnl_mutex);
640
641 netlink_unregister_notifier(&ipq_nl_notifier);
642 }
643
644 MODULE_DESCRIPTION("IPv4 packet queue handler");
645 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
646 MODULE_LICENSE("GPL");
647
648 module_init(ip_queue_init);
649 module_exit(ip_queue_fini);
This page took 0.053875 seconds and 5 git commands to generate.