[NETFILTER]: Fix possible overflow in netfilters do_replace()
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
13 * a table
14 * 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
15 * - new extension header parser code
16 * 15 Oct 2005 Harald Welte <laforge@netfilter.org>
17 * - Unification of {ip,ip6}_tables into x_tables
18 * - Removed tcp and udp code, since it's not ipv6 specific
19 */
20
21 #include <linux/capability.h>
22 #include <linux/config.h>
23 #include <linux/in.h>
24 #include <linux/skbuff.h>
25 #include <linux/kmod.h>
26 #include <linux/vmalloc.h>
27 #include <linux/netdevice.h>
28 #include <linux/module.h>
29 #include <linux/icmpv6.h>
30 #include <net/ipv6.h>
31 #include <asm/uaccess.h>
32 #include <asm/semaphore.h>
33 #include <linux/proc_fs.h>
34 #include <linux/cpumask.h>
35
36 #include <linux/netfilter_ipv6/ip6_tables.h>
37 #include <linux/netfilter/x_tables.h>
38
39 MODULE_LICENSE("GPL");
40 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
41 MODULE_DESCRIPTION("IPv6 packet filter");
42
43 #define IPV6_HDR_LEN (sizeof(struct ipv6hdr))
44 #define IPV6_OPTHDR_LEN (sizeof(struct ipv6_opt_hdr))
45
46 /*#define DEBUG_IP_FIREWALL*/
47 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
48 /*#define DEBUG_IP_FIREWALL_USER*/
49
50 #ifdef DEBUG_IP_FIREWALL
51 #define dprintf(format, args...) printk(format , ## args)
52 #else
53 #define dprintf(format, args...)
54 #endif
55
56 #ifdef DEBUG_IP_FIREWALL_USER
57 #define duprintf(format, args...) printk(format , ## args)
58 #else
59 #define duprintf(format, args...)
60 #endif
61
62 #ifdef CONFIG_NETFILTER_DEBUG
63 #define IP_NF_ASSERT(x) \
64 do { \
65 if (!(x)) \
66 printk("IP_NF_ASSERT: %s:%s:%u\n", \
67 __FUNCTION__, __FILE__, __LINE__); \
68 } while(0)
69 #else
70 #define IP_NF_ASSERT(x)
71 #endif
72
73
74 #include <linux/netfilter_ipv4/listhelp.h>
75
76 #if 0
77 /* All the better to debug you with... */
78 #define static
79 #define inline
80 #endif
81
82 /*
83 We keep a set of rules for each CPU, so we can avoid write-locking
84 them in the softirq when updating the counters and therefore
85 only need to read-lock in the softirq; doing a write_lock_bh() in user
86 context stops packets coming through and allows user context to read
87 the counters or update the rules.
88
89 Hence the start of any table is given by get_table() below. */
90
91 #if 0
92 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
93 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
94 #define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
95 #endif
96
97 int
98 ip6_masked_addrcmp(const struct in6_addr *addr1, const struct in6_addr *mask,
99 const struct in6_addr *addr2)
100 {
101 int i;
102 for( i = 0; i < 16; i++){
103 if((addr1->s6_addr[i] & mask->s6_addr[i]) !=
104 (addr2->s6_addr[i] & mask->s6_addr[i]))
105 return 1;
106 }
107 return 0;
108 }
109
110 /* Check for an extension */
111 int
112 ip6t_ext_hdr(u8 nexthdr)
113 {
114 return ( (nexthdr == IPPROTO_HOPOPTS) ||
115 (nexthdr == IPPROTO_ROUTING) ||
116 (nexthdr == IPPROTO_FRAGMENT) ||
117 (nexthdr == IPPROTO_ESP) ||
118 (nexthdr == IPPROTO_AH) ||
119 (nexthdr == IPPROTO_NONE) ||
120 (nexthdr == IPPROTO_DSTOPTS) );
121 }
122
123 /* Returns whether matches rule or not. */
124 static inline int
125 ip6_packet_match(const struct sk_buff *skb,
126 const char *indev,
127 const char *outdev,
128 const struct ip6t_ip6 *ip6info,
129 unsigned int *protoff,
130 int *fragoff)
131 {
132 size_t i;
133 unsigned long ret;
134 const struct ipv6hdr *ipv6 = skb->nh.ipv6h;
135
136 #define FWINV(bool,invflg) ((bool) ^ !!(ip6info->invflags & invflg))
137
138 if (FWINV(ip6_masked_addrcmp(&ipv6->saddr, &ip6info->smsk,
139 &ip6info->src), IP6T_INV_SRCIP)
140 || FWINV(ip6_masked_addrcmp(&ipv6->daddr, &ip6info->dmsk,
141 &ip6info->dst), IP6T_INV_DSTIP)) {
142 dprintf("Source or dest mismatch.\n");
143 /*
144 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
145 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
146 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
147 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
148 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
149 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
150 return 0;
151 }
152
153 /* Look for ifname matches; this should unroll nicely. */
154 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
155 ret |= (((const unsigned long *)indev)[i]
156 ^ ((const unsigned long *)ip6info->iniface)[i])
157 & ((const unsigned long *)ip6info->iniface_mask)[i];
158 }
159
160 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
161 dprintf("VIA in mismatch (%s vs %s).%s\n",
162 indev, ip6info->iniface,
163 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
164 return 0;
165 }
166
167 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
168 ret |= (((const unsigned long *)outdev)[i]
169 ^ ((const unsigned long *)ip6info->outiface)[i])
170 & ((const unsigned long *)ip6info->outiface_mask)[i];
171 }
172
173 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
174 dprintf("VIA out mismatch (%s vs %s).%s\n",
175 outdev, ip6info->outiface,
176 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
177 return 0;
178 }
179
180 /* ... might want to do something with class and flowlabel here ... */
181
182 /* look for the desired protocol header */
183 if((ip6info->flags & IP6T_F_PROTO)) {
184 int protohdr;
185 unsigned short _frag_off;
186
187 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
188 if (protohdr < 0)
189 return 0;
190
191 *fragoff = _frag_off;
192
193 dprintf("Packet protocol %hi ?= %s%hi.\n",
194 protohdr,
195 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
196 ip6info->proto);
197
198 if (ip6info->proto == protohdr) {
199 if(ip6info->invflags & IP6T_INV_PROTO) {
200 return 0;
201 }
202 return 1;
203 }
204
205 /* We need match for the '-p all', too! */
206 if ((ip6info->proto != 0) &&
207 !(ip6info->invflags & IP6T_INV_PROTO))
208 return 0;
209 }
210 return 1;
211 }
212
213 /* should be ip6 safe */
214 static inline int
215 ip6_checkentry(const struct ip6t_ip6 *ipv6)
216 {
217 if (ipv6->flags & ~IP6T_F_MASK) {
218 duprintf("Unknown flag bits set: %08X\n",
219 ipv6->flags & ~IP6T_F_MASK);
220 return 0;
221 }
222 if (ipv6->invflags & ~IP6T_INV_MASK) {
223 duprintf("Unknown invflag bits set: %08X\n",
224 ipv6->invflags & ~IP6T_INV_MASK);
225 return 0;
226 }
227 return 1;
228 }
229
230 static unsigned int
231 ip6t_error(struct sk_buff **pskb,
232 const struct net_device *in,
233 const struct net_device *out,
234 unsigned int hooknum,
235 const void *targinfo,
236 void *userinfo)
237 {
238 if (net_ratelimit())
239 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
240
241 return NF_DROP;
242 }
243
244 static inline
245 int do_match(struct ip6t_entry_match *m,
246 const struct sk_buff *skb,
247 const struct net_device *in,
248 const struct net_device *out,
249 int offset,
250 unsigned int protoff,
251 int *hotdrop)
252 {
253 /* Stop iteration if it doesn't match */
254 if (!m->u.kernel.match->match(skb, in, out, m->data,
255 offset, protoff, hotdrop))
256 return 1;
257 else
258 return 0;
259 }
260
261 static inline struct ip6t_entry *
262 get_entry(void *base, unsigned int offset)
263 {
264 return (struct ip6t_entry *)(base + offset);
265 }
266
267 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
268 unsigned int
269 ip6t_do_table(struct sk_buff **pskb,
270 unsigned int hook,
271 const struct net_device *in,
272 const struct net_device *out,
273 struct xt_table *table,
274 void *userdata)
275 {
276 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
277 int offset = 0;
278 unsigned int protoff = 0;
279 int hotdrop = 0;
280 /* Initializing verdict to NF_DROP keeps gcc happy. */
281 unsigned int verdict = NF_DROP;
282 const char *indev, *outdev;
283 void *table_base;
284 struct ip6t_entry *e, *back;
285 struct xt_table_info *private;
286
287 /* Initialization */
288 indev = in ? in->name : nulldevname;
289 outdev = out ? out->name : nulldevname;
290 /* We handle fragments by dealing with the first fragment as
291 * if it was a normal packet. All other fragments are treated
292 * normally, except that they will NEVER match rules that ask
293 * things we don't know, ie. tcp syn flag or ports). If the
294 * rule is also a fragment-specific rule, non-fragments won't
295 * match it. */
296
297 read_lock_bh(&table->lock);
298 private = table->private;
299 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
300 table_base = (void *)private->entries[smp_processor_id()];
301 e = get_entry(table_base, private->hook_entry[hook]);
302
303 #ifdef CONFIG_NETFILTER_DEBUG
304 /* Check noone else using our table */
305 if (((struct ip6t_entry *)table_base)->comefrom != 0xdead57ac
306 && ((struct ip6t_entry *)table_base)->comefrom != 0xeeeeeeec) {
307 printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
308 smp_processor_id(),
309 table->name,
310 &((struct ip6t_entry *)table_base)->comefrom,
311 ((struct ip6t_entry *)table_base)->comefrom);
312 }
313 ((struct ip6t_entry *)table_base)->comefrom = 0x57acc001;
314 #endif
315
316 /* For return from builtin chain */
317 back = get_entry(table_base, private->underflow[hook]);
318
319 do {
320 IP_NF_ASSERT(e);
321 IP_NF_ASSERT(back);
322 if (ip6_packet_match(*pskb, indev, outdev, &e->ipv6,
323 &protoff, &offset)) {
324 struct ip6t_entry_target *t;
325
326 if (IP6T_MATCH_ITERATE(e, do_match,
327 *pskb, in, out,
328 offset, protoff, &hotdrop) != 0)
329 goto no_match;
330
331 ADD_COUNTER(e->counters,
332 ntohs((*pskb)->nh.ipv6h->payload_len)
333 + IPV6_HDR_LEN,
334 1);
335
336 t = ip6t_get_target(e);
337 IP_NF_ASSERT(t->u.kernel.target);
338 /* Standard target? */
339 if (!t->u.kernel.target->target) {
340 int v;
341
342 v = ((struct ip6t_standard_target *)t)->verdict;
343 if (v < 0) {
344 /* Pop from stack? */
345 if (v != IP6T_RETURN) {
346 verdict = (unsigned)(-v) - 1;
347 break;
348 }
349 e = back;
350 back = get_entry(table_base,
351 back->comefrom);
352 continue;
353 }
354 if (table_base + v != (void *)e + e->next_offset
355 && !(e->ipv6.flags & IP6T_F_GOTO)) {
356 /* Save old back ptr in next entry */
357 struct ip6t_entry *next
358 = (void *)e + e->next_offset;
359 next->comefrom
360 = (void *)back - table_base;
361 /* set back pointer to next entry */
362 back = next;
363 }
364
365 e = get_entry(table_base, v);
366 } else {
367 /* Targets which reenter must return
368 abs. verdicts */
369 #ifdef CONFIG_NETFILTER_DEBUG
370 ((struct ip6t_entry *)table_base)->comefrom
371 = 0xeeeeeeec;
372 #endif
373 verdict = t->u.kernel.target->target(pskb,
374 in, out,
375 hook,
376 t->data,
377 userdata);
378
379 #ifdef CONFIG_NETFILTER_DEBUG
380 if (((struct ip6t_entry *)table_base)->comefrom
381 != 0xeeeeeeec
382 && verdict == IP6T_CONTINUE) {
383 printk("Target %s reentered!\n",
384 t->u.kernel.target->name);
385 verdict = NF_DROP;
386 }
387 ((struct ip6t_entry *)table_base)->comefrom
388 = 0x57acc001;
389 #endif
390 if (verdict == IP6T_CONTINUE)
391 e = (void *)e + e->next_offset;
392 else
393 /* Verdict */
394 break;
395 }
396 } else {
397
398 no_match:
399 e = (void *)e + e->next_offset;
400 }
401 } while (!hotdrop);
402
403 #ifdef CONFIG_NETFILTER_DEBUG
404 ((struct ip6t_entry *)table_base)->comefrom = 0xdead57ac;
405 #endif
406 read_unlock_bh(&table->lock);
407
408 #ifdef DEBUG_ALLOW_ALL
409 return NF_ACCEPT;
410 #else
411 if (hotdrop)
412 return NF_DROP;
413 else return verdict;
414 #endif
415 }
416
417 /* All zeroes == unconditional rule. */
418 static inline int
419 unconditional(const struct ip6t_ip6 *ipv6)
420 {
421 unsigned int i;
422
423 for (i = 0; i < sizeof(*ipv6); i++)
424 if (((char *)ipv6)[i])
425 break;
426
427 return (i == sizeof(*ipv6));
428 }
429
430 /* Figures out from what hook each rule can be called: returns 0 if
431 there are loops. Puts hook bitmask in comefrom. */
432 static int
433 mark_source_chains(struct xt_table_info *newinfo,
434 unsigned int valid_hooks, void *entry0)
435 {
436 unsigned int hook;
437
438 /* No recursion; use packet counter to save back ptrs (reset
439 to 0 as we leave), and comefrom to save source hook bitmask */
440 for (hook = 0; hook < NF_IP6_NUMHOOKS; hook++) {
441 unsigned int pos = newinfo->hook_entry[hook];
442 struct ip6t_entry *e
443 = (struct ip6t_entry *)(entry0 + pos);
444
445 if (!(valid_hooks & (1 << hook)))
446 continue;
447
448 /* Set initial back pointer. */
449 e->counters.pcnt = pos;
450
451 for (;;) {
452 struct ip6t_standard_target *t
453 = (void *)ip6t_get_target(e);
454
455 if (e->comefrom & (1 << NF_IP6_NUMHOOKS)) {
456 printk("iptables: loop hook %u pos %u %08X.\n",
457 hook, pos, e->comefrom);
458 return 0;
459 }
460 e->comefrom
461 |= ((1 << hook) | (1 << NF_IP6_NUMHOOKS));
462
463 /* Unconditional return/END. */
464 if (e->target_offset == sizeof(struct ip6t_entry)
465 && (strcmp(t->target.u.user.name,
466 IP6T_STANDARD_TARGET) == 0)
467 && t->verdict < 0
468 && unconditional(&e->ipv6)) {
469 unsigned int oldpos, size;
470
471 /* Return: backtrack through the last
472 big jump. */
473 do {
474 e->comefrom ^= (1<<NF_IP6_NUMHOOKS);
475 #ifdef DEBUG_IP_FIREWALL_USER
476 if (e->comefrom
477 & (1 << NF_IP6_NUMHOOKS)) {
478 duprintf("Back unset "
479 "on hook %u "
480 "rule %u\n",
481 hook, pos);
482 }
483 #endif
484 oldpos = pos;
485 pos = e->counters.pcnt;
486 e->counters.pcnt = 0;
487
488 /* We're at the start. */
489 if (pos == oldpos)
490 goto next;
491
492 e = (struct ip6t_entry *)
493 (entry0 + pos);
494 } while (oldpos == pos + e->next_offset);
495
496 /* Move along one */
497 size = e->next_offset;
498 e = (struct ip6t_entry *)
499 (entry0 + pos + size);
500 e->counters.pcnt = pos;
501 pos += size;
502 } else {
503 int newpos = t->verdict;
504
505 if (strcmp(t->target.u.user.name,
506 IP6T_STANDARD_TARGET) == 0
507 && newpos >= 0) {
508 /* This a jump; chase it. */
509 duprintf("Jump rule %u -> %u\n",
510 pos, newpos);
511 } else {
512 /* ... this is a fallthru */
513 newpos = pos + e->next_offset;
514 }
515 e = (struct ip6t_entry *)
516 (entry0 + newpos);
517 e->counters.pcnt = pos;
518 pos = newpos;
519 }
520 }
521 next:
522 duprintf("Finished chain %u\n", hook);
523 }
524 return 1;
525 }
526
527 static inline int
528 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
529 {
530 if (i && (*i)-- == 0)
531 return 1;
532
533 if (m->u.kernel.match->destroy)
534 m->u.kernel.match->destroy(m->data,
535 m->u.match_size - sizeof(*m));
536 module_put(m->u.kernel.match->me);
537 return 0;
538 }
539
540 static inline int
541 standard_check(const struct ip6t_entry_target *t,
542 unsigned int max_offset)
543 {
544 struct ip6t_standard_target *targ = (void *)t;
545
546 /* Check standard info. */
547 if (t->u.target_size
548 != IP6T_ALIGN(sizeof(struct ip6t_standard_target))) {
549 duprintf("standard_check: target size %u != %u\n",
550 t->u.target_size,
551 IP6T_ALIGN(sizeof(struct ip6t_standard_target)));
552 return 0;
553 }
554
555 if (targ->verdict >= 0
556 && targ->verdict > max_offset - sizeof(struct ip6t_entry)) {
557 duprintf("ip6t_standard_check: bad verdict (%i)\n",
558 targ->verdict);
559 return 0;
560 }
561
562 if (targ->verdict < -NF_MAX_VERDICT - 1) {
563 duprintf("ip6t_standard_check: bad negative verdict (%i)\n",
564 targ->verdict);
565 return 0;
566 }
567 return 1;
568 }
569
570 static inline int
571 check_match(struct ip6t_entry_match *m,
572 const char *name,
573 const struct ip6t_ip6 *ipv6,
574 unsigned int hookmask,
575 unsigned int *i)
576 {
577 struct ip6t_match *match;
578
579 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
580 m->u.user.revision),
581 "ip6t_%s", m->u.user.name);
582 if (IS_ERR(match) || !match) {
583 duprintf("check_match: `%s' not found\n", m->u.user.name);
584 return match ? PTR_ERR(match) : -ENOENT;
585 }
586 m->u.kernel.match = match;
587
588 if (m->u.kernel.match->checkentry
589 && !m->u.kernel.match->checkentry(name, ipv6, m->data,
590 m->u.match_size - sizeof(*m),
591 hookmask)) {
592 module_put(m->u.kernel.match->me);
593 duprintf("ip_tables: check failed for `%s'.\n",
594 m->u.kernel.match->name);
595 return -EINVAL;
596 }
597
598 (*i)++;
599 return 0;
600 }
601
602 static struct ip6t_target ip6t_standard_target;
603
604 static inline int
605 check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
606 unsigned int *i)
607 {
608 struct ip6t_entry_target *t;
609 struct ip6t_target *target;
610 int ret;
611 unsigned int j;
612
613 if (!ip6_checkentry(&e->ipv6)) {
614 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
615 return -EINVAL;
616 }
617
618 j = 0;
619 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6, e->comefrom, &j);
620 if (ret != 0)
621 goto cleanup_matches;
622
623 t = ip6t_get_target(e);
624 target = try_then_request_module(xt_find_target(AF_INET6,
625 t->u.user.name,
626 t->u.user.revision),
627 "ip6t_%s", t->u.user.name);
628 if (IS_ERR(target) || !target) {
629 duprintf("check_entry: `%s' not found\n", t->u.user.name);
630 ret = target ? PTR_ERR(target) : -ENOENT;
631 goto cleanup_matches;
632 }
633 t->u.kernel.target = target;
634
635 if (t->u.kernel.target == &ip6t_standard_target) {
636 if (!standard_check(t, size)) {
637 ret = -EINVAL;
638 goto cleanup_matches;
639 }
640 } else if (t->u.kernel.target->checkentry
641 && !t->u.kernel.target->checkentry(name, e, t->data,
642 t->u.target_size
643 - sizeof(*t),
644 e->comefrom)) {
645 module_put(t->u.kernel.target->me);
646 duprintf("ip_tables: check failed for `%s'.\n",
647 t->u.kernel.target->name);
648 ret = -EINVAL;
649 goto cleanup_matches;
650 }
651
652 (*i)++;
653 return 0;
654
655 cleanup_matches:
656 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
657 return ret;
658 }
659
660 static inline int
661 check_entry_size_and_hooks(struct ip6t_entry *e,
662 struct xt_table_info *newinfo,
663 unsigned char *base,
664 unsigned char *limit,
665 const unsigned int *hook_entries,
666 const unsigned int *underflows,
667 unsigned int *i)
668 {
669 unsigned int h;
670
671 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
672 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
673 duprintf("Bad offset %p\n", e);
674 return -EINVAL;
675 }
676
677 if (e->next_offset
678 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
679 duprintf("checking: element %p size %u\n",
680 e, e->next_offset);
681 return -EINVAL;
682 }
683
684 /* Check hooks & underflows */
685 for (h = 0; h < NF_IP6_NUMHOOKS; h++) {
686 if ((unsigned char *)e - base == hook_entries[h])
687 newinfo->hook_entry[h] = hook_entries[h];
688 if ((unsigned char *)e - base == underflows[h])
689 newinfo->underflow[h] = underflows[h];
690 }
691
692 /* FIXME: underflows must be unconditional, standard verdicts
693 < 0 (not IP6T_RETURN). --RR */
694
695 /* Clear counters and comefrom */
696 e->counters = ((struct xt_counters) { 0, 0 });
697 e->comefrom = 0;
698
699 (*i)++;
700 return 0;
701 }
702
703 static inline int
704 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
705 {
706 struct ip6t_entry_target *t;
707
708 if (i && (*i)-- == 0)
709 return 1;
710
711 /* Cleanup all matches */
712 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
713 t = ip6t_get_target(e);
714 if (t->u.kernel.target->destroy)
715 t->u.kernel.target->destroy(t->data,
716 t->u.target_size - sizeof(*t));
717 module_put(t->u.kernel.target->me);
718 return 0;
719 }
720
721 /* Checks and translates the user-supplied table segment (held in
722 newinfo) */
723 static int
724 translate_table(const char *name,
725 unsigned int valid_hooks,
726 struct xt_table_info *newinfo,
727 void *entry0,
728 unsigned int size,
729 unsigned int number,
730 const unsigned int *hook_entries,
731 const unsigned int *underflows)
732 {
733 unsigned int i;
734 int ret;
735
736 newinfo->size = size;
737 newinfo->number = number;
738
739 /* Init all hooks to impossible value. */
740 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
741 newinfo->hook_entry[i] = 0xFFFFFFFF;
742 newinfo->underflow[i] = 0xFFFFFFFF;
743 }
744
745 duprintf("translate_table: size %u\n", newinfo->size);
746 i = 0;
747 /* Walk through entries, checking offsets. */
748 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
749 check_entry_size_and_hooks,
750 newinfo,
751 entry0,
752 entry0 + size,
753 hook_entries, underflows, &i);
754 if (ret != 0)
755 return ret;
756
757 if (i != number) {
758 duprintf("translate_table: %u not %u entries\n",
759 i, number);
760 return -EINVAL;
761 }
762
763 /* Check hooks all assigned */
764 for (i = 0; i < NF_IP6_NUMHOOKS; i++) {
765 /* Only hooks which are valid */
766 if (!(valid_hooks & (1 << i)))
767 continue;
768 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
769 duprintf("Invalid hook entry %u %u\n",
770 i, hook_entries[i]);
771 return -EINVAL;
772 }
773 if (newinfo->underflow[i] == 0xFFFFFFFF) {
774 duprintf("Invalid underflow %u %u\n",
775 i, underflows[i]);
776 return -EINVAL;
777 }
778 }
779
780 if (!mark_source_chains(newinfo, valid_hooks, entry0))
781 return -ELOOP;
782
783 /* Finally, each sanity check must pass */
784 i = 0;
785 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
786 check_entry, name, size, &i);
787
788 if (ret != 0) {
789 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
790 cleanup_entry, &i);
791 return ret;
792 }
793
794 /* And one copy for every other CPU */
795 for_each_cpu(i) {
796 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
797 memcpy(newinfo->entries[i], entry0, newinfo->size);
798 }
799
800 return ret;
801 }
802
803 /* Gets counters. */
804 static inline int
805 add_entry_to_counter(const struct ip6t_entry *e,
806 struct xt_counters total[],
807 unsigned int *i)
808 {
809 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
810
811 (*i)++;
812 return 0;
813 }
814
815 static inline int
816 set_entry_to_counter(const struct ip6t_entry *e,
817 struct ip6t_counters total[],
818 unsigned int *i)
819 {
820 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
821
822 (*i)++;
823 return 0;
824 }
825
826 static void
827 get_counters(const struct xt_table_info *t,
828 struct xt_counters counters[])
829 {
830 unsigned int cpu;
831 unsigned int i;
832 unsigned int curcpu;
833
834 /* Instead of clearing (by a previous call to memset())
835 * the counters and using adds, we set the counters
836 * with data used by 'current' CPU
837 * We dont care about preemption here.
838 */
839 curcpu = raw_smp_processor_id();
840
841 i = 0;
842 IP6T_ENTRY_ITERATE(t->entries[curcpu],
843 t->size,
844 set_entry_to_counter,
845 counters,
846 &i);
847
848 for_each_cpu(cpu) {
849 if (cpu == curcpu)
850 continue;
851 i = 0;
852 IP6T_ENTRY_ITERATE(t->entries[cpu],
853 t->size,
854 add_entry_to_counter,
855 counters,
856 &i);
857 }
858 }
859
860 static int
861 copy_entries_to_user(unsigned int total_size,
862 struct xt_table *table,
863 void __user *userptr)
864 {
865 unsigned int off, num, countersize;
866 struct ip6t_entry *e;
867 struct xt_counters *counters;
868 struct xt_table_info *private = table->private;
869 int ret = 0;
870 void *loc_cpu_entry;
871
872 /* We need atomic snapshot of counters: rest doesn't change
873 (other than comefrom, which userspace doesn't care
874 about). */
875 countersize = sizeof(struct xt_counters) * private->number;
876 counters = vmalloc(countersize);
877
878 if (counters == NULL)
879 return -ENOMEM;
880
881 /* First, sum counters... */
882 write_lock_bh(&table->lock);
883 get_counters(private, counters);
884 write_unlock_bh(&table->lock);
885
886 /* choose the copy that is on ourc node/cpu */
887 loc_cpu_entry = private->entries[raw_smp_processor_id()];
888 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
889 ret = -EFAULT;
890 goto free_counters;
891 }
892
893 /* FIXME: use iterator macros --RR */
894 /* ... then go back and fix counters and names */
895 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
896 unsigned int i;
897 struct ip6t_entry_match *m;
898 struct ip6t_entry_target *t;
899
900 e = (struct ip6t_entry *)(loc_cpu_entry + off);
901 if (copy_to_user(userptr + off
902 + offsetof(struct ip6t_entry, counters),
903 &counters[num],
904 sizeof(counters[num])) != 0) {
905 ret = -EFAULT;
906 goto free_counters;
907 }
908
909 for (i = sizeof(struct ip6t_entry);
910 i < e->target_offset;
911 i += m->u.match_size) {
912 m = (void *)e + i;
913
914 if (copy_to_user(userptr + off + i
915 + offsetof(struct ip6t_entry_match,
916 u.user.name),
917 m->u.kernel.match->name,
918 strlen(m->u.kernel.match->name)+1)
919 != 0) {
920 ret = -EFAULT;
921 goto free_counters;
922 }
923 }
924
925 t = ip6t_get_target(e);
926 if (copy_to_user(userptr + off + e->target_offset
927 + offsetof(struct ip6t_entry_target,
928 u.user.name),
929 t->u.kernel.target->name,
930 strlen(t->u.kernel.target->name)+1) != 0) {
931 ret = -EFAULT;
932 goto free_counters;
933 }
934 }
935
936 free_counters:
937 vfree(counters);
938 return ret;
939 }
940
941 static int
942 get_entries(const struct ip6t_get_entries *entries,
943 struct ip6t_get_entries __user *uptr)
944 {
945 int ret;
946 struct xt_table *t;
947
948 t = xt_find_table_lock(AF_INET6, entries->name);
949 if (t && !IS_ERR(t)) {
950 struct xt_table_info *private = t->private;
951 duprintf("t->private->number = %u\n", private->number);
952 if (entries->size == private->size)
953 ret = copy_entries_to_user(private->size,
954 t, uptr->entrytable);
955 else {
956 duprintf("get_entries: I've got %u not %u!\n",
957 private->size, entries->size);
958 ret = -EINVAL;
959 }
960 module_put(t->me);
961 xt_table_unlock(t);
962 } else
963 ret = t ? PTR_ERR(t) : -ENOENT;
964
965 return ret;
966 }
967
968 static int
969 do_replace(void __user *user, unsigned int len)
970 {
971 int ret;
972 struct ip6t_replace tmp;
973 struct xt_table *t;
974 struct xt_table_info *newinfo, *oldinfo;
975 struct xt_counters *counters;
976 void *loc_cpu_entry, *loc_cpu_old_entry;
977
978 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
979 return -EFAULT;
980
981 /* overflow check */
982 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
983 SMP_CACHE_BYTES)
984 return -ENOMEM;
985 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
986 return -ENOMEM;
987
988 newinfo = xt_alloc_table_info(tmp.size);
989 if (!newinfo)
990 return -ENOMEM;
991
992 /* choose the copy that is on our node/cpu */
993 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
994 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
995 tmp.size) != 0) {
996 ret = -EFAULT;
997 goto free_newinfo;
998 }
999
1000 counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
1001 if (!counters) {
1002 ret = -ENOMEM;
1003 goto free_newinfo;
1004 }
1005
1006 ret = translate_table(tmp.name, tmp.valid_hooks,
1007 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1008 tmp.hook_entry, tmp.underflow);
1009 if (ret != 0)
1010 goto free_newinfo_counters;
1011
1012 duprintf("ip_tables: Translated table\n");
1013
1014 t = try_then_request_module(xt_find_table_lock(AF_INET6, tmp.name),
1015 "ip6table_%s", tmp.name);
1016 if (!t || IS_ERR(t)) {
1017 ret = t ? PTR_ERR(t) : -ENOENT;
1018 goto free_newinfo_counters_untrans;
1019 }
1020
1021 /* You lied! */
1022 if (tmp.valid_hooks != t->valid_hooks) {
1023 duprintf("Valid hook crap: %08X vs %08X\n",
1024 tmp.valid_hooks, t->valid_hooks);
1025 ret = -EINVAL;
1026 goto put_module;
1027 }
1028
1029 oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
1030 if (!oldinfo)
1031 goto put_module;
1032
1033 /* Update module usage count based on number of rules */
1034 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1035 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1036 if ((oldinfo->number > oldinfo->initial_entries) ||
1037 (newinfo->number <= oldinfo->initial_entries))
1038 module_put(t->me);
1039 if ((oldinfo->number > oldinfo->initial_entries) &&
1040 (newinfo->number <= oldinfo->initial_entries))
1041 module_put(t->me);
1042
1043 /* Get the old counters. */
1044 get_counters(oldinfo, counters);
1045 /* Decrease module usage counts and free resource */
1046 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1047 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1048 xt_free_table_info(oldinfo);
1049 if (copy_to_user(tmp.counters, counters,
1050 sizeof(struct xt_counters) * tmp.num_counters) != 0)
1051 ret = -EFAULT;
1052 vfree(counters);
1053 xt_table_unlock(t);
1054 return ret;
1055
1056 put_module:
1057 module_put(t->me);
1058 xt_table_unlock(t);
1059 free_newinfo_counters_untrans:
1060 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1061 free_newinfo_counters:
1062 vfree(counters);
1063 free_newinfo:
1064 xt_free_table_info(newinfo);
1065 return ret;
1066 }
1067
1068 /* We're lazy, and add to the first CPU; overflow works its fey magic
1069 * and everything is OK. */
1070 static inline int
1071 add_counter_to_entry(struct ip6t_entry *e,
1072 const struct xt_counters addme[],
1073 unsigned int *i)
1074 {
1075 #if 0
1076 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1077 *i,
1078 (long unsigned int)e->counters.pcnt,
1079 (long unsigned int)e->counters.bcnt,
1080 (long unsigned int)addme[*i].pcnt,
1081 (long unsigned int)addme[*i].bcnt);
1082 #endif
1083
1084 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1085
1086 (*i)++;
1087 return 0;
1088 }
1089
1090 static int
1091 do_add_counters(void __user *user, unsigned int len)
1092 {
1093 unsigned int i;
1094 struct xt_counters_info tmp, *paddc;
1095 struct xt_table_info *private;
1096 struct xt_table *t;
1097 int ret = 0;
1098 void *loc_cpu_entry;
1099
1100 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1101 return -EFAULT;
1102
1103 if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
1104 return -EINVAL;
1105
1106 paddc = vmalloc(len);
1107 if (!paddc)
1108 return -ENOMEM;
1109
1110 if (copy_from_user(paddc, user, len) != 0) {
1111 ret = -EFAULT;
1112 goto free;
1113 }
1114
1115 t = xt_find_table_lock(AF_INET6, tmp.name);
1116 if (!t || IS_ERR(t)) {
1117 ret = t ? PTR_ERR(t) : -ENOENT;
1118 goto free;
1119 }
1120
1121 write_lock_bh(&t->lock);
1122 private = t->private;
1123 if (private->number != paddc->num_counters) {
1124 ret = -EINVAL;
1125 goto unlock_up_free;
1126 }
1127
1128 i = 0;
1129 /* Choose the copy that is on our node */
1130 loc_cpu_entry = private->entries[smp_processor_id()];
1131 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1132 private->size,
1133 add_counter_to_entry,
1134 paddc->counters,
1135 &i);
1136 unlock_up_free:
1137 write_unlock_bh(&t->lock);
1138 xt_table_unlock(t);
1139 module_put(t->me);
1140 free:
1141 vfree(paddc);
1142
1143 return ret;
1144 }
1145
1146 static int
1147 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1148 {
1149 int ret;
1150
1151 if (!capable(CAP_NET_ADMIN))
1152 return -EPERM;
1153
1154 switch (cmd) {
1155 case IP6T_SO_SET_REPLACE:
1156 ret = do_replace(user, len);
1157 break;
1158
1159 case IP6T_SO_SET_ADD_COUNTERS:
1160 ret = do_add_counters(user, len);
1161 break;
1162
1163 default:
1164 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1165 ret = -EINVAL;
1166 }
1167
1168 return ret;
1169 }
1170
1171 static int
1172 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1173 {
1174 int ret;
1175
1176 if (!capable(CAP_NET_ADMIN))
1177 return -EPERM;
1178
1179 switch (cmd) {
1180 case IP6T_SO_GET_INFO: {
1181 char name[IP6T_TABLE_MAXNAMELEN];
1182 struct xt_table *t;
1183
1184 if (*len != sizeof(struct ip6t_getinfo)) {
1185 duprintf("length %u != %u\n", *len,
1186 sizeof(struct ip6t_getinfo));
1187 ret = -EINVAL;
1188 break;
1189 }
1190
1191 if (copy_from_user(name, user, sizeof(name)) != 0) {
1192 ret = -EFAULT;
1193 break;
1194 }
1195 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1196
1197 t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
1198 "ip6table_%s", name);
1199 if (t && !IS_ERR(t)) {
1200 struct ip6t_getinfo info;
1201 struct xt_table_info *private = t->private;
1202
1203 info.valid_hooks = t->valid_hooks;
1204 memcpy(info.hook_entry, private->hook_entry,
1205 sizeof(info.hook_entry));
1206 memcpy(info.underflow, private->underflow,
1207 sizeof(info.underflow));
1208 info.num_entries = private->number;
1209 info.size = private->size;
1210 memcpy(info.name, name, sizeof(info.name));
1211
1212 if (copy_to_user(user, &info, *len) != 0)
1213 ret = -EFAULT;
1214 else
1215 ret = 0;
1216 xt_table_unlock(t);
1217 module_put(t->me);
1218 } else
1219 ret = t ? PTR_ERR(t) : -ENOENT;
1220 }
1221 break;
1222
1223 case IP6T_SO_GET_ENTRIES: {
1224 struct ip6t_get_entries get;
1225
1226 if (*len < sizeof(get)) {
1227 duprintf("get_entries: %u < %u\n", *len, sizeof(get));
1228 ret = -EINVAL;
1229 } else if (copy_from_user(&get, user, sizeof(get)) != 0) {
1230 ret = -EFAULT;
1231 } else if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1232 duprintf("get_entries: %u != %u\n", *len,
1233 sizeof(struct ip6t_get_entries) + get.size);
1234 ret = -EINVAL;
1235 } else
1236 ret = get_entries(&get, user);
1237 break;
1238 }
1239
1240 case IP6T_SO_GET_REVISION_MATCH:
1241 case IP6T_SO_GET_REVISION_TARGET: {
1242 struct ip6t_get_revision rev;
1243 int target;
1244
1245 if (*len != sizeof(rev)) {
1246 ret = -EINVAL;
1247 break;
1248 }
1249 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1250 ret = -EFAULT;
1251 break;
1252 }
1253
1254 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1255 target = 1;
1256 else
1257 target = 0;
1258
1259 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1260 rev.revision,
1261 target, &ret),
1262 "ip6t_%s", rev.name);
1263 break;
1264 }
1265
1266 default:
1267 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1268 ret = -EINVAL;
1269 }
1270
1271 return ret;
1272 }
1273
1274 int ip6t_register_table(struct xt_table *table,
1275 const struct ip6t_replace *repl)
1276 {
1277 int ret;
1278 struct xt_table_info *newinfo;
1279 static struct xt_table_info bootstrap
1280 = { 0, 0, 0, { 0 }, { 0 }, { } };
1281 void *loc_cpu_entry;
1282
1283 newinfo = xt_alloc_table_info(repl->size);
1284 if (!newinfo)
1285 return -ENOMEM;
1286
1287 /* choose the copy on our node/cpu */
1288 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1289 memcpy(loc_cpu_entry, repl->entries, repl->size);
1290
1291 ret = translate_table(table->name, table->valid_hooks,
1292 newinfo, loc_cpu_entry, repl->size,
1293 repl->num_entries,
1294 repl->hook_entry,
1295 repl->underflow);
1296 if (ret != 0) {
1297 xt_free_table_info(newinfo);
1298 return ret;
1299 }
1300
1301 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
1302 xt_free_table_info(newinfo);
1303 return ret;
1304 }
1305
1306 return 0;
1307 }
1308
1309 void ip6t_unregister_table(struct xt_table *table)
1310 {
1311 struct xt_table_info *private;
1312 void *loc_cpu_entry;
1313
1314 private = xt_unregister_table(table);
1315
1316 /* Decrease module usage counts and free resources */
1317 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1318 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
1319 xt_free_table_info(private);
1320 }
1321
1322 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1323 static inline int
1324 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1325 u_int8_t type, u_int8_t code,
1326 int invert)
1327 {
1328 return (type == test_type && code >= min_code && code <= max_code)
1329 ^ invert;
1330 }
1331
1332 static int
1333 icmp6_match(const struct sk_buff *skb,
1334 const struct net_device *in,
1335 const struct net_device *out,
1336 const void *matchinfo,
1337 int offset,
1338 unsigned int protoff,
1339 int *hotdrop)
1340 {
1341 struct icmp6hdr _icmp, *ic;
1342 const struct ip6t_icmp *icmpinfo = matchinfo;
1343
1344 /* Must not be a fragment. */
1345 if (offset)
1346 return 0;
1347
1348 ic = skb_header_pointer(skb, protoff, sizeof(_icmp), &_icmp);
1349 if (ic == NULL) {
1350 /* We've been asked to examine this packet, and we
1351 can't. Hence, no choice but to drop. */
1352 duprintf("Dropping evil ICMP tinygram.\n");
1353 *hotdrop = 1;
1354 return 0;
1355 }
1356
1357 return icmp6_type_code_match(icmpinfo->type,
1358 icmpinfo->code[0],
1359 icmpinfo->code[1],
1360 ic->icmp6_type, ic->icmp6_code,
1361 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1362 }
1363
1364 /* Called when user tries to insert an entry of this type. */
1365 static int
1366 icmp6_checkentry(const char *tablename,
1367 const void *entry,
1368 void *matchinfo,
1369 unsigned int matchsize,
1370 unsigned int hook_mask)
1371 {
1372 const struct ip6t_ip6 *ipv6 = entry;
1373 const struct ip6t_icmp *icmpinfo = matchinfo;
1374
1375 /* Must specify proto == ICMP, and no unknown invflags */
1376 return ipv6->proto == IPPROTO_ICMPV6
1377 && !(ipv6->invflags & IP6T_INV_PROTO)
1378 && matchsize == IP6T_ALIGN(sizeof(struct ip6t_icmp))
1379 && !(icmpinfo->invflags & ~IP6T_ICMP_INV);
1380 }
1381
1382 /* The built-in targets: standard (NULL) and error. */
1383 static struct ip6t_target ip6t_standard_target = {
1384 .name = IP6T_STANDARD_TARGET,
1385 };
1386
1387 static struct ip6t_target ip6t_error_target = {
1388 .name = IP6T_ERROR_TARGET,
1389 .target = ip6t_error,
1390 };
1391
1392 static struct nf_sockopt_ops ip6t_sockopts = {
1393 .pf = PF_INET6,
1394 .set_optmin = IP6T_BASE_CTL,
1395 .set_optmax = IP6T_SO_SET_MAX+1,
1396 .set = do_ip6t_set_ctl,
1397 .get_optmin = IP6T_BASE_CTL,
1398 .get_optmax = IP6T_SO_GET_MAX+1,
1399 .get = do_ip6t_get_ctl,
1400 };
1401
1402 static struct ip6t_match icmp6_matchstruct = {
1403 .name = "icmp6",
1404 .match = &icmp6_match,
1405 .checkentry = &icmp6_checkentry,
1406 };
1407
1408 static int __init init(void)
1409 {
1410 int ret;
1411
1412 xt_proto_init(AF_INET6);
1413
1414 /* Noone else will be downing sem now, so we won't sleep */
1415 xt_register_target(AF_INET6, &ip6t_standard_target);
1416 xt_register_target(AF_INET6, &ip6t_error_target);
1417 xt_register_match(AF_INET6, &icmp6_matchstruct);
1418
1419 /* Register setsockopt */
1420 ret = nf_register_sockopt(&ip6t_sockopts);
1421 if (ret < 0) {
1422 duprintf("Unable to register sockopts.\n");
1423 xt_proto_fini(AF_INET6);
1424 return ret;
1425 }
1426
1427 printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
1428 return 0;
1429 }
1430
1431 static void __exit fini(void)
1432 {
1433 nf_unregister_sockopt(&ip6t_sockopts);
1434 xt_unregister_match(AF_INET6, &icmp6_matchstruct);
1435 xt_unregister_target(AF_INET6, &ip6t_error_target);
1436 xt_unregister_target(AF_INET6, &ip6t_standard_target);
1437 xt_proto_fini(AF_INET6);
1438 }
1439
1440 /*
1441 * find the offset to specified header or the protocol number of last header
1442 * if target < 0. "last header" is transport protocol header, ESP, or
1443 * "No next header".
1444 *
1445 * If target header is found, its offset is set in *offset and return protocol
1446 * number. Otherwise, return -1.
1447 *
1448 * Note that non-1st fragment is special case that "the protocol number
1449 * of last header" is "next header" field in Fragment header. In this case,
1450 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
1451 * isn't NULL.
1452 *
1453 */
1454 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
1455 int target, unsigned short *fragoff)
1456 {
1457 unsigned int start = (u8*)(skb->nh.ipv6h + 1) - skb->data;
1458 u8 nexthdr = skb->nh.ipv6h->nexthdr;
1459 unsigned int len = skb->len - start;
1460
1461 if (fragoff)
1462 *fragoff = 0;
1463
1464 while (nexthdr != target) {
1465 struct ipv6_opt_hdr _hdr, *hp;
1466 unsigned int hdrlen;
1467
1468 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
1469 if (target < 0)
1470 break;
1471 return -1;
1472 }
1473
1474 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
1475 if (hp == NULL)
1476 return -1;
1477 if (nexthdr == NEXTHDR_FRAGMENT) {
1478 unsigned short _frag_off, *fp;
1479 fp = skb_header_pointer(skb,
1480 start+offsetof(struct frag_hdr,
1481 frag_off),
1482 sizeof(_frag_off),
1483 &_frag_off);
1484 if (fp == NULL)
1485 return -1;
1486
1487 _frag_off = ntohs(*fp) & ~0x7;
1488 if (_frag_off) {
1489 if (target < 0 &&
1490 ((!ipv6_ext_hdr(hp->nexthdr)) ||
1491 nexthdr == NEXTHDR_NONE)) {
1492 if (fragoff)
1493 *fragoff = _frag_off;
1494 return hp->nexthdr;
1495 }
1496 return -1;
1497 }
1498 hdrlen = 8;
1499 } else if (nexthdr == NEXTHDR_AUTH)
1500 hdrlen = (hp->hdrlen + 2) << 2;
1501 else
1502 hdrlen = ipv6_optlen(hp);
1503
1504 nexthdr = hp->nexthdr;
1505 len -= hdrlen;
1506 start += hdrlen;
1507 }
1508
1509 *offset = start;
1510 return nexthdr;
1511 }
1512
1513 EXPORT_SYMBOL(ip6t_register_table);
1514 EXPORT_SYMBOL(ip6t_unregister_table);
1515 EXPORT_SYMBOL(ip6t_do_table);
1516 EXPORT_SYMBOL(ip6t_ext_hdr);
1517 EXPORT_SYMBOL(ipv6_find_hdr);
1518 EXPORT_SYMBOL(ip6_masked_addrcmp);
1519
1520 module_init(init);
1521 module_exit(fini);
This page took 0.062999 seconds and 5 git commands to generate.