netfilter: xtables: move extension arguments into compound structure (1/6)
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
36
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
40
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
46
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
52
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 /*
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
76
77 Hence the start of any table is given by get_table() below. */
78
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
82 {
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
90 }
91
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
101 {
102 size_t i;
103 unsigned long ret;
104 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
105
106 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
107
108 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
109 &ip6info->src), IP6T_INV_SRCIP)
110 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
111 &ip6info->dst), IP6T_INV_DSTIP)) {
112 dprintf("Source or dest mismatch.\n");
113 /*
114 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
115 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
116 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
117 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
118 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
119 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
120 return false;
121 }
122
123 /* Look for ifname matches; this should unroll nicely. */
124 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
125 ret |= (((const unsigned long *)indev)[i]
126 ^ ((const unsigned long *)ip6info->iniface)[i])
127 & ((const unsigned long *)ip6info->iniface_mask)[i];
128 }
129
130 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
131 dprintf("VIA in mismatch (%s vs %s).%s\n",
132 indev, ip6info->iniface,
133 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
134 return false;
135 }
136
137 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
138 ret |= (((const unsigned long *)outdev)[i]
139 ^ ((const unsigned long *)ip6info->outiface)[i])
140 & ((const unsigned long *)ip6info->outiface_mask)[i];
141 }
142
143 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
144 dprintf("VIA out mismatch (%s vs %s).%s\n",
145 outdev, ip6info->outiface,
146 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 return false;
148 }
149
150 /* ... might want to do something with class and flowlabel here ... */
151
152 /* look for the desired protocol header */
153 if((ip6info->flags & IP6T_F_PROTO)) {
154 int protohdr;
155 unsigned short _frag_off;
156
157 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
158 if (protohdr < 0) {
159 if (_frag_off == 0)
160 *hotdrop = true;
161 return false;
162 }
163 *fragoff = _frag_off;
164
165 dprintf("Packet protocol %hi ?= %s%hi.\n",
166 protohdr,
167 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
168 ip6info->proto);
169
170 if (ip6info->proto == protohdr) {
171 if(ip6info->invflags & IP6T_INV_PROTO) {
172 return false;
173 }
174 return true;
175 }
176
177 /* We need match for the '-p all', too! */
178 if ((ip6info->proto != 0) &&
179 !(ip6info->invflags & IP6T_INV_PROTO))
180 return false;
181 }
182 return true;
183 }
184
185 /* should be ip6 safe */
186 static bool
187 ip6_checkentry(const struct ip6t_ip6 *ipv6)
188 {
189 if (ipv6->flags & ~IP6T_F_MASK) {
190 duprintf("Unknown flag bits set: %08X\n",
191 ipv6->flags & ~IP6T_F_MASK);
192 return false;
193 }
194 if (ipv6->invflags & ~IP6T_INV_MASK) {
195 duprintf("Unknown invflag bits set: %08X\n",
196 ipv6->invflags & ~IP6T_INV_MASK);
197 return false;
198 }
199 return true;
200 }
201
202 static unsigned int
203 ip6t_error(struct sk_buff *skb,
204 const struct net_device *in,
205 const struct net_device *out,
206 unsigned int hooknum,
207 const struct xt_target *target,
208 const void *targinfo)
209 {
210 if (net_ratelimit())
211 printk("ip6_tables: error: `%s'\n", (char *)targinfo);
212
213 return NF_DROP;
214 }
215
216 /* Performance critical - called for every packet */
217 static inline bool
218 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
219 struct xt_match_param *par)
220 {
221 par->match = m->u.kernel.match;
222 par->matchinfo = m->data;
223
224 /* Stop iteration if it doesn't match */
225 if (!m->u.kernel.match->match(skb, par))
226 return true;
227 else
228 return false;
229 }
230
231 static inline struct ip6t_entry *
232 get_entry(void *base, unsigned int offset)
233 {
234 return (struct ip6t_entry *)(base + offset);
235 }
236
237 /* All zeroes == unconditional rule. */
238 /* Mildly perf critical (only if packet tracing is on) */
239 static inline int
240 unconditional(const struct ip6t_ip6 *ipv6)
241 {
242 unsigned int i;
243
244 for (i = 0; i < sizeof(*ipv6); i++)
245 if (((char *)ipv6)[i])
246 break;
247
248 return (i == sizeof(*ipv6));
249 }
250
251 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
252 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
253 /* This cries for unification! */
254 static const char *const hooknames[] = {
255 [NF_INET_PRE_ROUTING] = "PREROUTING",
256 [NF_INET_LOCAL_IN] = "INPUT",
257 [NF_INET_FORWARD] = "FORWARD",
258 [NF_INET_LOCAL_OUT] = "OUTPUT",
259 [NF_INET_POST_ROUTING] = "POSTROUTING",
260 };
261
262 enum nf_ip_trace_comments {
263 NF_IP6_TRACE_COMMENT_RULE,
264 NF_IP6_TRACE_COMMENT_RETURN,
265 NF_IP6_TRACE_COMMENT_POLICY,
266 };
267
268 static const char *const comments[] = {
269 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
270 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
271 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
272 };
273
274 static struct nf_loginfo trace_loginfo = {
275 .type = NF_LOG_TYPE_LOG,
276 .u = {
277 .log = {
278 .level = 4,
279 .logflags = NF_LOG_MASK,
280 },
281 },
282 };
283
284 /* Mildly perf critical (only if packet tracing is on) */
285 static inline int
286 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
287 char *hookname, char **chainname,
288 char **comment, unsigned int *rulenum)
289 {
290 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
291
292 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
293 /* Head of user chain: ERROR target with chainname */
294 *chainname = t->target.data;
295 (*rulenum) = 0;
296 } else if (s == e) {
297 (*rulenum)++;
298
299 if (s->target_offset == sizeof(struct ip6t_entry)
300 && strcmp(t->target.u.kernel.target->name,
301 IP6T_STANDARD_TARGET) == 0
302 && t->verdict < 0
303 && unconditional(&s->ipv6)) {
304 /* Tail of chains: STANDARD target (return/policy) */
305 *comment = *chainname == hookname
306 ? (char *)comments[NF_IP6_TRACE_COMMENT_POLICY]
307 : (char *)comments[NF_IP6_TRACE_COMMENT_RETURN];
308 }
309 return 1;
310 } else
311 (*rulenum)++;
312
313 return 0;
314 }
315
316 static void trace_packet(struct sk_buff *skb,
317 unsigned int hook,
318 const struct net_device *in,
319 const struct net_device *out,
320 const char *tablename,
321 struct xt_table_info *private,
322 struct ip6t_entry *e)
323 {
324 void *table_base;
325 const struct ip6t_entry *root;
326 char *hookname, *chainname, *comment;
327 unsigned int rulenum = 0;
328
329 table_base = (void *)private->entries[smp_processor_id()];
330 root = get_entry(table_base, private->hook_entry[hook]);
331
332 hookname = chainname = (char *)hooknames[hook];
333 comment = (char *)comments[NF_IP6_TRACE_COMMENT_RULE];
334
335 IP6T_ENTRY_ITERATE(root,
336 private->size - private->hook_entry[hook],
337 get_chainname_rulenum,
338 e, hookname, &chainname, &comment, &rulenum);
339
340 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
341 "TRACE: %s:%s:%s:%u ",
342 tablename, chainname, comment, rulenum);
343 }
344 #endif
345
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
347 unsigned int
348 ip6t_do_table(struct sk_buff *skb,
349 unsigned int hook,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
353 {
354 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
355 bool hotdrop = false;
356 /* Initializing verdict to NF_DROP keeps gcc happy. */
357 unsigned int verdict = NF_DROP;
358 const char *indev, *outdev;
359 void *table_base;
360 struct ip6t_entry *e, *back;
361 struct xt_table_info *private;
362 struct xt_match_param mtpar;
363
364 /* Initialization */
365 indev = in ? in->name : nulldevname;
366 outdev = out ? out->name : nulldevname;
367 /* We handle fragments by dealing with the first fragment as
368 * if it was a normal packet. All other fragments are treated
369 * normally, except that they will NEVER match rules that ask
370 * things we don't know, ie. tcp syn flag or ports). If the
371 * rule is also a fragment-specific rule, non-fragments won't
372 * match it. */
373 mtpar.hotdrop = &hotdrop;
374 mtpar.in = in;
375 mtpar.out = out;
376
377 read_lock_bh(&table->lock);
378 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
379 private = table->private;
380 table_base = (void *)private->entries[smp_processor_id()];
381 e = get_entry(table_base, private->hook_entry[hook]);
382
383 /* For return from builtin chain */
384 back = get_entry(table_base, private->underflow[hook]);
385
386 do {
387 IP_NF_ASSERT(e);
388 IP_NF_ASSERT(back);
389 if (ip6_packet_match(skb, indev, outdev, &e->ipv6,
390 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
391 struct ip6t_entry_target *t;
392
393 if (IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0)
394 goto no_match;
395
396 ADD_COUNTER(e->counters,
397 ntohs(ipv6_hdr(skb)->payload_len) +
398 sizeof(struct ipv6hdr), 1);
399
400 t = ip6t_get_target(e);
401 IP_NF_ASSERT(t->u.kernel.target);
402
403 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
404 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
405 /* The packet is traced: log it */
406 if (unlikely(skb->nf_trace))
407 trace_packet(skb, hook, in, out,
408 table->name, private, e);
409 #endif
410 /* Standard target? */
411 if (!t->u.kernel.target->target) {
412 int v;
413
414 v = ((struct ip6t_standard_target *)t)->verdict;
415 if (v < 0) {
416 /* Pop from stack? */
417 if (v != IP6T_RETURN) {
418 verdict = (unsigned)(-v) - 1;
419 break;
420 }
421 e = back;
422 back = get_entry(table_base,
423 back->comefrom);
424 continue;
425 }
426 if (table_base + v != (void *)e + e->next_offset
427 && !(e->ipv6.flags & IP6T_F_GOTO)) {
428 /* Save old back ptr in next entry */
429 struct ip6t_entry *next
430 = (void *)e + e->next_offset;
431 next->comefrom
432 = (void *)back - table_base;
433 /* set back pointer to next entry */
434 back = next;
435 }
436
437 e = get_entry(table_base, v);
438 } else {
439 /* Targets which reenter must return
440 abs. verdicts */
441 #ifdef CONFIG_NETFILTER_DEBUG
442 ((struct ip6t_entry *)table_base)->comefrom
443 = 0xeeeeeeec;
444 #endif
445 verdict = t->u.kernel.target->target(skb,
446 in, out,
447 hook,
448 t->u.kernel.target,
449 t->data);
450
451 #ifdef CONFIG_NETFILTER_DEBUG
452 if (((struct ip6t_entry *)table_base)->comefrom
453 != 0xeeeeeeec
454 && verdict == IP6T_CONTINUE) {
455 printk("Target %s reentered!\n",
456 t->u.kernel.target->name);
457 verdict = NF_DROP;
458 }
459 ((struct ip6t_entry *)table_base)->comefrom
460 = 0x57acc001;
461 #endif
462 if (verdict == IP6T_CONTINUE)
463 e = (void *)e + e->next_offset;
464 else
465 /* Verdict */
466 break;
467 }
468 } else {
469
470 no_match:
471 e = (void *)e + e->next_offset;
472 }
473 } while (!hotdrop);
474
475 #ifdef CONFIG_NETFILTER_DEBUG
476 ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;
477 #endif
478 read_unlock_bh(&table->lock);
479
480 #ifdef DEBUG_ALLOW_ALL
481 return NF_ACCEPT;
482 #else
483 if (hotdrop)
484 return NF_DROP;
485 else return verdict;
486 #endif
487 }
488
489 /* Figures out from what hook each rule can be called: returns 0 if
490 there are loops. Puts hook bitmask in comefrom. */
491 static int
492 mark_source_chains(struct xt_table_info *newinfo,
493 unsigned int valid_hooks, void *entry0)
494 {
495 unsigned int hook;
496
497 /* No recursion; use packet counter to save back ptrs (reset
498 to 0 as we leave), and comefrom to save source hook bitmask */
499 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
500 unsigned int pos = newinfo->hook_entry[hook];
501 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
502
503 if (!(valid_hooks & (1 << hook)))
504 continue;
505
506 /* Set initial back pointer. */
507 e->counters.pcnt = pos;
508
509 for (;;) {
510 struct ip6t_standard_target *t
511 = (void *)ip6t_get_target(e);
512 int visited = e->comefrom & (1 << hook);
513
514 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
515 printk("iptables: loop hook %u pos %u %08X.\n",
516 hook, pos, e->comefrom);
517 return 0;
518 }
519 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
520
521 /* Unconditional return/END. */
522 if ((e->target_offset == sizeof(struct ip6t_entry)
523 && (strcmp(t->target.u.user.name,
524 IP6T_STANDARD_TARGET) == 0)
525 && t->verdict < 0
526 && unconditional(&e->ipv6)) || visited) {
527 unsigned int oldpos, size;
528
529 if (t->verdict < -NF_MAX_VERDICT - 1) {
530 duprintf("mark_source_chains: bad "
531 "negative verdict (%i)\n",
532 t->verdict);
533 return 0;
534 }
535
536 /* Return: backtrack through the last
537 big jump. */
538 do {
539 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
540 #ifdef DEBUG_IP_FIREWALL_USER
541 if (e->comefrom
542 & (1 << NF_INET_NUMHOOKS)) {
543 duprintf("Back unset "
544 "on hook %u "
545 "rule %u\n",
546 hook, pos);
547 }
548 #endif
549 oldpos = pos;
550 pos = e->counters.pcnt;
551 e->counters.pcnt = 0;
552
553 /* We're at the start. */
554 if (pos == oldpos)
555 goto next;
556
557 e = (struct ip6t_entry *)
558 (entry0 + pos);
559 } while (oldpos == pos + e->next_offset);
560
561 /* Move along one */
562 size = e->next_offset;
563 e = (struct ip6t_entry *)
564 (entry0 + pos + size);
565 e->counters.pcnt = pos;
566 pos += size;
567 } else {
568 int newpos = t->verdict;
569
570 if (strcmp(t->target.u.user.name,
571 IP6T_STANDARD_TARGET) == 0
572 && newpos >= 0) {
573 if (newpos > newinfo->size -
574 sizeof(struct ip6t_entry)) {
575 duprintf("mark_source_chains: "
576 "bad verdict (%i)\n",
577 newpos);
578 return 0;
579 }
580 /* This a jump; chase it. */
581 duprintf("Jump rule %u -> %u\n",
582 pos, newpos);
583 } else {
584 /* ... this is a fallthru */
585 newpos = pos + e->next_offset;
586 }
587 e = (struct ip6t_entry *)
588 (entry0 + newpos);
589 e->counters.pcnt = pos;
590 pos = newpos;
591 }
592 }
593 next:
594 duprintf("Finished chain %u\n", hook);
595 }
596 return 1;
597 }
598
599 static int
600 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
601 {
602 if (i && (*i)-- == 0)
603 return 1;
604
605 if (m->u.kernel.match->destroy)
606 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
607 module_put(m->u.kernel.match->me);
608 return 0;
609 }
610
611 static int
612 check_entry(struct ip6t_entry *e, const char *name)
613 {
614 struct ip6t_entry_target *t;
615
616 if (!ip6_checkentry(&e->ipv6)) {
617 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
618 return -EINVAL;
619 }
620
621 if (e->target_offset + sizeof(struct ip6t_entry_target) >
622 e->next_offset)
623 return -EINVAL;
624
625 t = ip6t_get_target(e);
626 if (e->target_offset + t->u.target_size > e->next_offset)
627 return -EINVAL;
628
629 return 0;
630 }
631
632 static int check_match(struct ip6t_entry_match *m, const char *name,
633 const struct ip6t_ip6 *ipv6,
634 unsigned int hookmask, unsigned int *i)
635 {
636 struct xt_match *match;
637 int ret;
638
639 match = m->u.kernel.match;
640 ret = xt_check_match(match, AF_INET6, m->u.match_size - sizeof(*m),
641 name, hookmask, ipv6->proto,
642 ipv6->invflags & IP6T_INV_PROTO, ipv6, m->data);
643 if (ret < 0) {
644 duprintf("ip_tables: check failed for `%s'.\n",
645 m->u.kernel.match->name);
646 return ret;
647 }
648 ++*i;
649 return 0;
650 }
651
652 static int
653 find_check_match(struct ip6t_entry_match *m,
654 const char *name,
655 const struct ip6t_ip6 *ipv6,
656 unsigned int hookmask,
657 unsigned int *i)
658 {
659 struct xt_match *match;
660 int ret;
661
662 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
663 m->u.user.revision),
664 "ip6t_%s", m->u.user.name);
665 if (IS_ERR(match) || !match) {
666 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
667 return match ? PTR_ERR(match) : -ENOENT;
668 }
669 m->u.kernel.match = match;
670
671 ret = check_match(m, name, ipv6, hookmask, i);
672 if (ret)
673 goto err;
674
675 return 0;
676 err:
677 module_put(m->u.kernel.match->me);
678 return ret;
679 }
680
681 static int check_target(struct ip6t_entry *e, const char *name)
682 {
683 struct ip6t_entry_target *t;
684 struct xt_target *target;
685 int ret;
686
687 t = ip6t_get_target(e);
688 target = t->u.kernel.target;
689 ret = xt_check_target(target, AF_INET6, t->u.target_size - sizeof(*t),
690 name, e->comefrom, e->ipv6.proto,
691 e->ipv6.invflags & IP6T_INV_PROTO, e, t->data);
692 if (ret < 0) {
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
695 return ret;
696 }
697 return 0;
698 }
699
700 static int
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
702 unsigned int *i)
703 {
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
706 int ret;
707 unsigned int j;
708
709 ret = check_entry(e, name);
710 if (ret)
711 return ret;
712
713 j = 0;
714 ret = IP6T_MATCH_ITERATE(e, find_check_match, name, &e->ipv6,
715 e->comefrom, &j);
716 if (ret != 0)
717 goto cleanup_matches;
718
719 t = ip6t_get_target(e);
720 target = try_then_request_module(xt_find_target(AF_INET6,
721 t->u.user.name,
722 t->u.user.revision),
723 "ip6t_%s", t->u.user.name);
724 if (IS_ERR(target) || !target) {
725 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
726 ret = target ? PTR_ERR(target) : -ENOENT;
727 goto cleanup_matches;
728 }
729 t->u.kernel.target = target;
730
731 ret = check_target(e, name);
732 if (ret)
733 goto err;
734
735 (*i)++;
736 return 0;
737 err:
738 module_put(t->u.kernel.target->me);
739 cleanup_matches:
740 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
741 return ret;
742 }
743
744 static int
745 check_entry_size_and_hooks(struct ip6t_entry *e,
746 struct xt_table_info *newinfo,
747 unsigned char *base,
748 unsigned char *limit,
749 const unsigned int *hook_entries,
750 const unsigned int *underflows,
751 unsigned int *i)
752 {
753 unsigned int h;
754
755 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
756 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
757 duprintf("Bad offset %p\n", e);
758 return -EINVAL;
759 }
760
761 if (e->next_offset
762 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
763 duprintf("checking: element %p size %u\n",
764 e, e->next_offset);
765 return -EINVAL;
766 }
767
768 /* Check hooks & underflows */
769 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
770 if ((unsigned char *)e - base == hook_entries[h])
771 newinfo->hook_entry[h] = hook_entries[h];
772 if ((unsigned char *)e - base == underflows[h])
773 newinfo->underflow[h] = underflows[h];
774 }
775
776 /* FIXME: underflows must be unconditional, standard verdicts
777 < 0 (not IP6T_RETURN). --RR */
778
779 /* Clear counters and comefrom */
780 e->counters = ((struct xt_counters) { 0, 0 });
781 e->comefrom = 0;
782
783 (*i)++;
784 return 0;
785 }
786
787 static int
788 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
789 {
790 struct ip6t_entry_target *t;
791
792 if (i && (*i)-- == 0)
793 return 1;
794
795 /* Cleanup all matches */
796 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
797 t = ip6t_get_target(e);
798 if (t->u.kernel.target->destroy)
799 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
800 module_put(t->u.kernel.target->me);
801 return 0;
802 }
803
804 /* Checks and translates the user-supplied table segment (held in
805 newinfo) */
806 static int
807 translate_table(const char *name,
808 unsigned int valid_hooks,
809 struct xt_table_info *newinfo,
810 void *entry0,
811 unsigned int size,
812 unsigned int number,
813 const unsigned int *hook_entries,
814 const unsigned int *underflows)
815 {
816 unsigned int i;
817 int ret;
818
819 newinfo->size = size;
820 newinfo->number = number;
821
822 /* Init all hooks to impossible value. */
823 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
824 newinfo->hook_entry[i] = 0xFFFFFFFF;
825 newinfo->underflow[i] = 0xFFFFFFFF;
826 }
827
828 duprintf("translate_table: size %u\n", newinfo->size);
829 i = 0;
830 /* Walk through entries, checking offsets. */
831 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
832 check_entry_size_and_hooks,
833 newinfo,
834 entry0,
835 entry0 + size,
836 hook_entries, underflows, &i);
837 if (ret != 0)
838 return ret;
839
840 if (i != number) {
841 duprintf("translate_table: %u not %u entries\n",
842 i, number);
843 return -EINVAL;
844 }
845
846 /* Check hooks all assigned */
847 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
848 /* Only hooks which are valid */
849 if (!(valid_hooks & (1 << i)))
850 continue;
851 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
852 duprintf("Invalid hook entry %u %u\n",
853 i, hook_entries[i]);
854 return -EINVAL;
855 }
856 if (newinfo->underflow[i] == 0xFFFFFFFF) {
857 duprintf("Invalid underflow %u %u\n",
858 i, underflows[i]);
859 return -EINVAL;
860 }
861 }
862
863 if (!mark_source_chains(newinfo, valid_hooks, entry0))
864 return -ELOOP;
865
866 /* Finally, each sanity check must pass */
867 i = 0;
868 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
869 find_check_entry, name, size, &i);
870
871 if (ret != 0) {
872 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
873 cleanup_entry, &i);
874 return ret;
875 }
876
877 /* And one copy for every other CPU */
878 for_each_possible_cpu(i) {
879 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
880 memcpy(newinfo->entries[i], entry0, newinfo->size);
881 }
882
883 return ret;
884 }
885
886 /* Gets counters. */
887 static inline int
888 add_entry_to_counter(const struct ip6t_entry *e,
889 struct xt_counters total[],
890 unsigned int *i)
891 {
892 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
893
894 (*i)++;
895 return 0;
896 }
897
898 static inline int
899 set_entry_to_counter(const struct ip6t_entry *e,
900 struct ip6t_counters total[],
901 unsigned int *i)
902 {
903 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
904
905 (*i)++;
906 return 0;
907 }
908
909 static void
910 get_counters(const struct xt_table_info *t,
911 struct xt_counters counters[])
912 {
913 unsigned int cpu;
914 unsigned int i;
915 unsigned int curcpu;
916
917 /* Instead of clearing (by a previous call to memset())
918 * the counters and using adds, we set the counters
919 * with data used by 'current' CPU
920 * We dont care about preemption here.
921 */
922 curcpu = raw_smp_processor_id();
923
924 i = 0;
925 IP6T_ENTRY_ITERATE(t->entries[curcpu],
926 t->size,
927 set_entry_to_counter,
928 counters,
929 &i);
930
931 for_each_possible_cpu(cpu) {
932 if (cpu == curcpu)
933 continue;
934 i = 0;
935 IP6T_ENTRY_ITERATE(t->entries[cpu],
936 t->size,
937 add_entry_to_counter,
938 counters,
939 &i);
940 }
941 }
942
943 static struct xt_counters *alloc_counters(struct xt_table *table)
944 {
945 unsigned int countersize;
946 struct xt_counters *counters;
947 const struct xt_table_info *private = table->private;
948
949 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care
951 about). */
952 countersize = sizeof(struct xt_counters) * private->number;
953 counters = vmalloc_node(countersize, numa_node_id());
954
955 if (counters == NULL)
956 return ERR_PTR(-ENOMEM);
957
958 /* First, sum counters... */
959 write_lock_bh(&table->lock);
960 get_counters(private, counters);
961 write_unlock_bh(&table->lock);
962
963 return counters;
964 }
965
966 static int
967 copy_entries_to_user(unsigned int total_size,
968 struct xt_table *table,
969 void __user *userptr)
970 {
971 unsigned int off, num;
972 struct ip6t_entry *e;
973 struct xt_counters *counters;
974 const struct xt_table_info *private = table->private;
975 int ret = 0;
976 const void *loc_cpu_entry;
977
978 counters = alloc_counters(table);
979 if (IS_ERR(counters))
980 return PTR_ERR(counters);
981
982 /* choose the copy that is on our node/cpu, ...
983 * This choice is lazy (because current thread is
984 * allowed to migrate to another cpu)
985 */
986 loc_cpu_entry = private->entries[raw_smp_processor_id()];
987 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
988 ret = -EFAULT;
989 goto free_counters;
990 }
991
992 /* FIXME: use iterator macros --RR */
993 /* ... then go back and fix counters and names */
994 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
995 unsigned int i;
996 const struct ip6t_entry_match *m;
997 const struct ip6t_entry_target *t;
998
999 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1000 if (copy_to_user(userptr + off
1001 + offsetof(struct ip6t_entry, counters),
1002 &counters[num],
1003 sizeof(counters[num])) != 0) {
1004 ret = -EFAULT;
1005 goto free_counters;
1006 }
1007
1008 for (i = sizeof(struct ip6t_entry);
1009 i < e->target_offset;
1010 i += m->u.match_size) {
1011 m = (void *)e + i;
1012
1013 if (copy_to_user(userptr + off + i
1014 + offsetof(struct ip6t_entry_match,
1015 u.user.name),
1016 m->u.kernel.match->name,
1017 strlen(m->u.kernel.match->name)+1)
1018 != 0) {
1019 ret = -EFAULT;
1020 goto free_counters;
1021 }
1022 }
1023
1024 t = ip6t_get_target(e);
1025 if (copy_to_user(userptr + off + e->target_offset
1026 + offsetof(struct ip6t_entry_target,
1027 u.user.name),
1028 t->u.kernel.target->name,
1029 strlen(t->u.kernel.target->name)+1) != 0) {
1030 ret = -EFAULT;
1031 goto free_counters;
1032 }
1033 }
1034
1035 free_counters:
1036 vfree(counters);
1037 return ret;
1038 }
1039
1040 #ifdef CONFIG_COMPAT
1041 static void compat_standard_from_user(void *dst, void *src)
1042 {
1043 int v = *(compat_int_t *)src;
1044
1045 if (v > 0)
1046 v += xt_compat_calc_jump(AF_INET6, v);
1047 memcpy(dst, &v, sizeof(v));
1048 }
1049
1050 static int compat_standard_to_user(void __user *dst, void *src)
1051 {
1052 compat_int_t cv = *(int *)src;
1053
1054 if (cv > 0)
1055 cv -= xt_compat_calc_jump(AF_INET6, cv);
1056 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1057 }
1058
1059 static inline int
1060 compat_calc_match(struct ip6t_entry_match *m, int *size)
1061 {
1062 *size += xt_compat_match_offset(m->u.kernel.match);
1063 return 0;
1064 }
1065
1066 static int compat_calc_entry(struct ip6t_entry *e,
1067 const struct xt_table_info *info,
1068 void *base, struct xt_table_info *newinfo)
1069 {
1070 struct ip6t_entry_target *t;
1071 unsigned int entry_offset;
1072 int off, i, ret;
1073
1074 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1075 entry_offset = (void *)e - base;
1076 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1077 t = ip6t_get_target(e);
1078 off += xt_compat_target_offset(t->u.kernel.target);
1079 newinfo->size -= off;
1080 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1081 if (ret)
1082 return ret;
1083
1084 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1085 if (info->hook_entry[i] &&
1086 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1087 newinfo->hook_entry[i] -= off;
1088 if (info->underflow[i] &&
1089 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1090 newinfo->underflow[i] -= off;
1091 }
1092 return 0;
1093 }
1094
1095 static int compat_table_info(const struct xt_table_info *info,
1096 struct xt_table_info *newinfo)
1097 {
1098 void *loc_cpu_entry;
1099
1100 if (!newinfo || !info)
1101 return -EINVAL;
1102
1103 /* we dont care about newinfo->entries[] */
1104 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1105 newinfo->initial_entries = 0;
1106 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1107 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1108 compat_calc_entry, info, loc_cpu_entry,
1109 newinfo);
1110 }
1111 #endif
1112
1113 static int get_info(struct net *net, void __user *user, int *len, int compat)
1114 {
1115 char name[IP6T_TABLE_MAXNAMELEN];
1116 struct xt_table *t;
1117 int ret;
1118
1119 if (*len != sizeof(struct ip6t_getinfo)) {
1120 duprintf("length %u != %zu\n", *len,
1121 sizeof(struct ip6t_getinfo));
1122 return -EINVAL;
1123 }
1124
1125 if (copy_from_user(name, user, sizeof(name)) != 0)
1126 return -EFAULT;
1127
1128 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1129 #ifdef CONFIG_COMPAT
1130 if (compat)
1131 xt_compat_lock(AF_INET6);
1132 #endif
1133 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1134 "ip6table_%s", name);
1135 if (t && !IS_ERR(t)) {
1136 struct ip6t_getinfo info;
1137 const struct xt_table_info *private = t->private;
1138
1139 #ifdef CONFIG_COMPAT
1140 if (compat) {
1141 struct xt_table_info tmp;
1142 ret = compat_table_info(private, &tmp);
1143 xt_compat_flush_offsets(AF_INET6);
1144 private = &tmp;
1145 }
1146 #endif
1147 info.valid_hooks = t->valid_hooks;
1148 memcpy(info.hook_entry, private->hook_entry,
1149 sizeof(info.hook_entry));
1150 memcpy(info.underflow, private->underflow,
1151 sizeof(info.underflow));
1152 info.num_entries = private->number;
1153 info.size = private->size;
1154 strcpy(info.name, name);
1155
1156 if (copy_to_user(user, &info, *len) != 0)
1157 ret = -EFAULT;
1158 else
1159 ret = 0;
1160
1161 xt_table_unlock(t);
1162 module_put(t->me);
1163 } else
1164 ret = t ? PTR_ERR(t) : -ENOENT;
1165 #ifdef CONFIG_COMPAT
1166 if (compat)
1167 xt_compat_unlock(AF_INET6);
1168 #endif
1169 return ret;
1170 }
1171
1172 static int
1173 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1174 {
1175 int ret;
1176 struct ip6t_get_entries get;
1177 struct xt_table *t;
1178
1179 if (*len < sizeof(get)) {
1180 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1181 return -EINVAL;
1182 }
1183 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1184 return -EFAULT;
1185 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1186 duprintf("get_entries: %u != %zu\n",
1187 *len, sizeof(get) + get.size);
1188 return -EINVAL;
1189 }
1190
1191 t = xt_find_table_lock(net, AF_INET6, get.name);
1192 if (t && !IS_ERR(t)) {
1193 struct xt_table_info *private = t->private;
1194 duprintf("t->private->number = %u\n", private->number);
1195 if (get.size == private->size)
1196 ret = copy_entries_to_user(private->size,
1197 t, uptr->entrytable);
1198 else {
1199 duprintf("get_entries: I've got %u not %u!\n",
1200 private->size, get.size);
1201 ret = -EAGAIN;
1202 }
1203 module_put(t->me);
1204 xt_table_unlock(t);
1205 } else
1206 ret = t ? PTR_ERR(t) : -ENOENT;
1207
1208 return ret;
1209 }
1210
1211 static int
1212 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1213 struct xt_table_info *newinfo, unsigned int num_counters,
1214 void __user *counters_ptr)
1215 {
1216 int ret;
1217 struct xt_table *t;
1218 struct xt_table_info *oldinfo;
1219 struct xt_counters *counters;
1220 const void *loc_cpu_old_entry;
1221
1222 ret = 0;
1223 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1224 numa_node_id());
1225 if (!counters) {
1226 ret = -ENOMEM;
1227 goto out;
1228 }
1229
1230 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1231 "ip6table_%s", name);
1232 if (!t || IS_ERR(t)) {
1233 ret = t ? PTR_ERR(t) : -ENOENT;
1234 goto free_newinfo_counters_untrans;
1235 }
1236
1237 /* You lied! */
1238 if (valid_hooks != t->valid_hooks) {
1239 duprintf("Valid hook crap: %08X vs %08X\n",
1240 valid_hooks, t->valid_hooks);
1241 ret = -EINVAL;
1242 goto put_module;
1243 }
1244
1245 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1246 if (!oldinfo)
1247 goto put_module;
1248
1249 /* Update module usage count based on number of rules */
1250 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1251 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1252 if ((oldinfo->number > oldinfo->initial_entries) ||
1253 (newinfo->number <= oldinfo->initial_entries))
1254 module_put(t->me);
1255 if ((oldinfo->number > oldinfo->initial_entries) &&
1256 (newinfo->number <= oldinfo->initial_entries))
1257 module_put(t->me);
1258
1259 /* Get the old counters. */
1260 get_counters(oldinfo, counters);
1261 /* Decrease module usage counts and free resource */
1262 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1263 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1264 NULL);
1265 xt_free_table_info(oldinfo);
1266 if (copy_to_user(counters_ptr, counters,
1267 sizeof(struct xt_counters) * num_counters) != 0)
1268 ret = -EFAULT;
1269 vfree(counters);
1270 xt_table_unlock(t);
1271 return ret;
1272
1273 put_module:
1274 module_put(t->me);
1275 xt_table_unlock(t);
1276 free_newinfo_counters_untrans:
1277 vfree(counters);
1278 out:
1279 return ret;
1280 }
1281
1282 static int
1283 do_replace(struct net *net, void __user *user, unsigned int len)
1284 {
1285 int ret;
1286 struct ip6t_replace tmp;
1287 struct xt_table_info *newinfo;
1288 void *loc_cpu_entry;
1289
1290 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1291 return -EFAULT;
1292
1293 /* overflow check */
1294 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1295 return -ENOMEM;
1296
1297 newinfo = xt_alloc_table_info(tmp.size);
1298 if (!newinfo)
1299 return -ENOMEM;
1300
1301 /* choose the copy that is on our node/cpu */
1302 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1303 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1304 tmp.size) != 0) {
1305 ret = -EFAULT;
1306 goto free_newinfo;
1307 }
1308
1309 ret = translate_table(tmp.name, tmp.valid_hooks,
1310 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1311 tmp.hook_entry, tmp.underflow);
1312 if (ret != 0)
1313 goto free_newinfo;
1314
1315 duprintf("ip_tables: Translated table\n");
1316
1317 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1318 tmp.num_counters, tmp.counters);
1319 if (ret)
1320 goto free_newinfo_untrans;
1321 return 0;
1322
1323 free_newinfo_untrans:
1324 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1325 free_newinfo:
1326 xt_free_table_info(newinfo);
1327 return ret;
1328 }
1329
1330 /* We're lazy, and add to the first CPU; overflow works its fey magic
1331 * and everything is OK. */
1332 static inline int
1333 add_counter_to_entry(struct ip6t_entry *e,
1334 const struct xt_counters addme[],
1335 unsigned int *i)
1336 {
1337 #if 0
1338 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1339 *i,
1340 (long unsigned int)e->counters.pcnt,
1341 (long unsigned int)e->counters.bcnt,
1342 (long unsigned int)addme[*i].pcnt,
1343 (long unsigned int)addme[*i].bcnt);
1344 #endif
1345
1346 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1347
1348 (*i)++;
1349 return 0;
1350 }
1351
1352 static int
1353 do_add_counters(struct net *net, void __user *user, unsigned int len,
1354 int compat)
1355 {
1356 unsigned int i;
1357 struct xt_counters_info tmp;
1358 struct xt_counters *paddc;
1359 unsigned int num_counters;
1360 char *name;
1361 int size;
1362 void *ptmp;
1363 struct xt_table *t;
1364 const struct xt_table_info *private;
1365 int ret = 0;
1366 const void *loc_cpu_entry;
1367 #ifdef CONFIG_COMPAT
1368 struct compat_xt_counters_info compat_tmp;
1369
1370 if (compat) {
1371 ptmp = &compat_tmp;
1372 size = sizeof(struct compat_xt_counters_info);
1373 } else
1374 #endif
1375 {
1376 ptmp = &tmp;
1377 size = sizeof(struct xt_counters_info);
1378 }
1379
1380 if (copy_from_user(ptmp, user, size) != 0)
1381 return -EFAULT;
1382
1383 #ifdef CONFIG_COMPAT
1384 if (compat) {
1385 num_counters = compat_tmp.num_counters;
1386 name = compat_tmp.name;
1387 } else
1388 #endif
1389 {
1390 num_counters = tmp.num_counters;
1391 name = tmp.name;
1392 }
1393
1394 if (len != size + num_counters * sizeof(struct xt_counters))
1395 return -EINVAL;
1396
1397 paddc = vmalloc_node(len - size, numa_node_id());
1398 if (!paddc)
1399 return -ENOMEM;
1400
1401 if (copy_from_user(paddc, user + size, len - size) != 0) {
1402 ret = -EFAULT;
1403 goto free;
1404 }
1405
1406 t = xt_find_table_lock(net, AF_INET6, name);
1407 if (!t || IS_ERR(t)) {
1408 ret = t ? PTR_ERR(t) : -ENOENT;
1409 goto free;
1410 }
1411
1412 write_lock_bh(&t->lock);
1413 private = t->private;
1414 if (private->number != num_counters) {
1415 ret = -EINVAL;
1416 goto unlock_up_free;
1417 }
1418
1419 i = 0;
1420 /* Choose the copy that is on our node */
1421 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1422 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1423 private->size,
1424 add_counter_to_entry,
1425 paddc,
1426 &i);
1427 unlock_up_free:
1428 write_unlock_bh(&t->lock);
1429 xt_table_unlock(t);
1430 module_put(t->me);
1431 free:
1432 vfree(paddc);
1433
1434 return ret;
1435 }
1436
1437 #ifdef CONFIG_COMPAT
1438 struct compat_ip6t_replace {
1439 char name[IP6T_TABLE_MAXNAMELEN];
1440 u32 valid_hooks;
1441 u32 num_entries;
1442 u32 size;
1443 u32 hook_entry[NF_INET_NUMHOOKS];
1444 u32 underflow[NF_INET_NUMHOOKS];
1445 u32 num_counters;
1446 compat_uptr_t counters; /* struct ip6t_counters * */
1447 struct compat_ip6t_entry entries[0];
1448 };
1449
1450 static int
1451 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1452 unsigned int *size, struct xt_counters *counters,
1453 unsigned int *i)
1454 {
1455 struct ip6t_entry_target *t;
1456 struct compat_ip6t_entry __user *ce;
1457 u_int16_t target_offset, next_offset;
1458 compat_uint_t origsize;
1459 int ret;
1460
1461 ret = -EFAULT;
1462 origsize = *size;
1463 ce = (struct compat_ip6t_entry __user *)*dstptr;
1464 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1465 goto out;
1466
1467 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1468 goto out;
1469
1470 *dstptr += sizeof(struct compat_ip6t_entry);
1471 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1472
1473 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1474 target_offset = e->target_offset - (origsize - *size);
1475 if (ret)
1476 goto out;
1477 t = ip6t_get_target(e);
1478 ret = xt_compat_target_to_user(t, dstptr, size);
1479 if (ret)
1480 goto out;
1481 ret = -EFAULT;
1482 next_offset = e->next_offset - (origsize - *size);
1483 if (put_user(target_offset, &ce->target_offset))
1484 goto out;
1485 if (put_user(next_offset, &ce->next_offset))
1486 goto out;
1487
1488 (*i)++;
1489 return 0;
1490 out:
1491 return ret;
1492 }
1493
1494 static int
1495 compat_find_calc_match(struct ip6t_entry_match *m,
1496 const char *name,
1497 const struct ip6t_ip6 *ipv6,
1498 unsigned int hookmask,
1499 int *size, unsigned int *i)
1500 {
1501 struct xt_match *match;
1502
1503 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1504 m->u.user.revision),
1505 "ip6t_%s", m->u.user.name);
1506 if (IS_ERR(match) || !match) {
1507 duprintf("compat_check_calc_match: `%s' not found\n",
1508 m->u.user.name);
1509 return match ? PTR_ERR(match) : -ENOENT;
1510 }
1511 m->u.kernel.match = match;
1512 *size += xt_compat_match_offset(match);
1513
1514 (*i)++;
1515 return 0;
1516 }
1517
1518 static int
1519 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1520 {
1521 if (i && (*i)-- == 0)
1522 return 1;
1523
1524 module_put(m->u.kernel.match->me);
1525 return 0;
1526 }
1527
1528 static int
1529 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1530 {
1531 struct ip6t_entry_target *t;
1532
1533 if (i && (*i)-- == 0)
1534 return 1;
1535
1536 /* Cleanup all matches */
1537 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1538 t = compat_ip6t_get_target(e);
1539 module_put(t->u.kernel.target->me);
1540 return 0;
1541 }
1542
1543 static int
1544 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1545 struct xt_table_info *newinfo,
1546 unsigned int *size,
1547 unsigned char *base,
1548 unsigned char *limit,
1549 unsigned int *hook_entries,
1550 unsigned int *underflows,
1551 unsigned int *i,
1552 const char *name)
1553 {
1554 struct ip6t_entry_target *t;
1555 struct xt_target *target;
1556 unsigned int entry_offset;
1557 unsigned int j;
1558 int ret, off, h;
1559
1560 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1561 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1562 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1563 duprintf("Bad offset %p, limit = %p\n", e, limit);
1564 return -EINVAL;
1565 }
1566
1567 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1568 sizeof(struct compat_xt_entry_target)) {
1569 duprintf("checking: element %p size %u\n",
1570 e, e->next_offset);
1571 return -EINVAL;
1572 }
1573
1574 /* For purposes of check_entry casting the compat entry is fine */
1575 ret = check_entry((struct ip6t_entry *)e, name);
1576 if (ret)
1577 return ret;
1578
1579 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1580 entry_offset = (void *)e - (void *)base;
1581 j = 0;
1582 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1583 &e->ipv6, e->comefrom, &off, &j);
1584 if (ret != 0)
1585 goto release_matches;
1586
1587 t = compat_ip6t_get_target(e);
1588 target = try_then_request_module(xt_find_target(AF_INET6,
1589 t->u.user.name,
1590 t->u.user.revision),
1591 "ip6t_%s", t->u.user.name);
1592 if (IS_ERR(target) || !target) {
1593 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1594 t->u.user.name);
1595 ret = target ? PTR_ERR(target) : -ENOENT;
1596 goto release_matches;
1597 }
1598 t->u.kernel.target = target;
1599
1600 off += xt_compat_target_offset(target);
1601 *size += off;
1602 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1603 if (ret)
1604 goto out;
1605
1606 /* Check hooks & underflows */
1607 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1608 if ((unsigned char *)e - base == hook_entries[h])
1609 newinfo->hook_entry[h] = hook_entries[h];
1610 if ((unsigned char *)e - base == underflows[h])
1611 newinfo->underflow[h] = underflows[h];
1612 }
1613
1614 /* Clear counters and comefrom */
1615 memset(&e->counters, 0, sizeof(e->counters));
1616 e->comefrom = 0;
1617
1618 (*i)++;
1619 return 0;
1620
1621 out:
1622 module_put(t->u.kernel.target->me);
1623 release_matches:
1624 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1625 return ret;
1626 }
1627
1628 static int
1629 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1630 unsigned int *size, const char *name,
1631 struct xt_table_info *newinfo, unsigned char *base)
1632 {
1633 struct ip6t_entry_target *t;
1634 struct xt_target *target;
1635 struct ip6t_entry *de;
1636 unsigned int origsize;
1637 int ret, h;
1638
1639 ret = 0;
1640 origsize = *size;
1641 de = (struct ip6t_entry *)*dstptr;
1642 memcpy(de, e, sizeof(struct ip6t_entry));
1643 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1644
1645 *dstptr += sizeof(struct ip6t_entry);
1646 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1647
1648 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1649 dstptr, size);
1650 if (ret)
1651 return ret;
1652 de->target_offset = e->target_offset - (origsize - *size);
1653 t = compat_ip6t_get_target(e);
1654 target = t->u.kernel.target;
1655 xt_compat_target_from_user(t, dstptr, size);
1656
1657 de->next_offset = e->next_offset - (origsize - *size);
1658 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1659 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1660 newinfo->hook_entry[h] -= origsize - *size;
1661 if ((unsigned char *)de - base < newinfo->underflow[h])
1662 newinfo->underflow[h] -= origsize - *size;
1663 }
1664 return ret;
1665 }
1666
1667 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1668 unsigned int *i)
1669 {
1670 unsigned int j;
1671 int ret;
1672
1673 j = 0;
1674 ret = IP6T_MATCH_ITERATE(e, check_match, name, &e->ipv6,
1675 e->comefrom, &j);
1676 if (ret)
1677 goto cleanup_matches;
1678
1679 ret = check_target(e, name);
1680 if (ret)
1681 goto cleanup_matches;
1682
1683 (*i)++;
1684 return 0;
1685
1686 cleanup_matches:
1687 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1688 return ret;
1689 }
1690
1691 static int
1692 translate_compat_table(const char *name,
1693 unsigned int valid_hooks,
1694 struct xt_table_info **pinfo,
1695 void **pentry0,
1696 unsigned int total_size,
1697 unsigned int number,
1698 unsigned int *hook_entries,
1699 unsigned int *underflows)
1700 {
1701 unsigned int i, j;
1702 struct xt_table_info *newinfo, *info;
1703 void *pos, *entry0, *entry1;
1704 unsigned int size;
1705 int ret;
1706
1707 info = *pinfo;
1708 entry0 = *pentry0;
1709 size = total_size;
1710 info->number = number;
1711
1712 /* Init all hooks to impossible value. */
1713 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1714 info->hook_entry[i] = 0xFFFFFFFF;
1715 info->underflow[i] = 0xFFFFFFFF;
1716 }
1717
1718 duprintf("translate_compat_table: size %u\n", info->size);
1719 j = 0;
1720 xt_compat_lock(AF_INET6);
1721 /* Walk through entries, checking offsets. */
1722 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1723 check_compat_entry_size_and_hooks,
1724 info, &size, entry0,
1725 entry0 + total_size,
1726 hook_entries, underflows, &j, name);
1727 if (ret != 0)
1728 goto out_unlock;
1729
1730 ret = -EINVAL;
1731 if (j != number) {
1732 duprintf("translate_compat_table: %u not %u entries\n",
1733 j, number);
1734 goto out_unlock;
1735 }
1736
1737 /* Check hooks all assigned */
1738 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1739 /* Only hooks which are valid */
1740 if (!(valid_hooks & (1 << i)))
1741 continue;
1742 if (info->hook_entry[i] == 0xFFFFFFFF) {
1743 duprintf("Invalid hook entry %u %u\n",
1744 i, hook_entries[i]);
1745 goto out_unlock;
1746 }
1747 if (info->underflow[i] == 0xFFFFFFFF) {
1748 duprintf("Invalid underflow %u %u\n",
1749 i, underflows[i]);
1750 goto out_unlock;
1751 }
1752 }
1753
1754 ret = -ENOMEM;
1755 newinfo = xt_alloc_table_info(size);
1756 if (!newinfo)
1757 goto out_unlock;
1758
1759 newinfo->number = number;
1760 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1761 newinfo->hook_entry[i] = info->hook_entry[i];
1762 newinfo->underflow[i] = info->underflow[i];
1763 }
1764 entry1 = newinfo->entries[raw_smp_processor_id()];
1765 pos = entry1;
1766 size = total_size;
1767 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1768 compat_copy_entry_from_user,
1769 &pos, &size, name, newinfo, entry1);
1770 xt_compat_flush_offsets(AF_INET6);
1771 xt_compat_unlock(AF_INET6);
1772 if (ret)
1773 goto free_newinfo;
1774
1775 ret = -ELOOP;
1776 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1777 goto free_newinfo;
1778
1779 i = 0;
1780 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1781 name, &i);
1782 if (ret) {
1783 j -= i;
1784 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1785 compat_release_entry, &j);
1786 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1787 xt_free_table_info(newinfo);
1788 return ret;
1789 }
1790
1791 /* And one copy for every other CPU */
1792 for_each_possible_cpu(i)
1793 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1794 memcpy(newinfo->entries[i], entry1, newinfo->size);
1795
1796 *pinfo = newinfo;
1797 *pentry0 = entry1;
1798 xt_free_table_info(info);
1799 return 0;
1800
1801 free_newinfo:
1802 xt_free_table_info(newinfo);
1803 out:
1804 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1805 return ret;
1806 out_unlock:
1807 xt_compat_flush_offsets(AF_INET6);
1808 xt_compat_unlock(AF_INET6);
1809 goto out;
1810 }
1811
1812 static int
1813 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1814 {
1815 int ret;
1816 struct compat_ip6t_replace tmp;
1817 struct xt_table_info *newinfo;
1818 void *loc_cpu_entry;
1819
1820 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1821 return -EFAULT;
1822
1823 /* overflow check */
1824 if (tmp.size >= INT_MAX / num_possible_cpus())
1825 return -ENOMEM;
1826 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1827 return -ENOMEM;
1828
1829 newinfo = xt_alloc_table_info(tmp.size);
1830 if (!newinfo)
1831 return -ENOMEM;
1832
1833 /* choose the copy that is on our node/cpu */
1834 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1835 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1836 tmp.size) != 0) {
1837 ret = -EFAULT;
1838 goto free_newinfo;
1839 }
1840
1841 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1842 &newinfo, &loc_cpu_entry, tmp.size,
1843 tmp.num_entries, tmp.hook_entry,
1844 tmp.underflow);
1845 if (ret != 0)
1846 goto free_newinfo;
1847
1848 duprintf("compat_do_replace: Translated table\n");
1849
1850 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1851 tmp.num_counters, compat_ptr(tmp.counters));
1852 if (ret)
1853 goto free_newinfo_untrans;
1854 return 0;
1855
1856 free_newinfo_untrans:
1857 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1858 free_newinfo:
1859 xt_free_table_info(newinfo);
1860 return ret;
1861 }
1862
1863 static int
1864 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1865 unsigned int len)
1866 {
1867 int ret;
1868
1869 if (!capable(CAP_NET_ADMIN))
1870 return -EPERM;
1871
1872 switch (cmd) {
1873 case IP6T_SO_SET_REPLACE:
1874 ret = compat_do_replace(sock_net(sk), user, len);
1875 break;
1876
1877 case IP6T_SO_SET_ADD_COUNTERS:
1878 ret = do_add_counters(sock_net(sk), user, len, 1);
1879 break;
1880
1881 default:
1882 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1883 ret = -EINVAL;
1884 }
1885
1886 return ret;
1887 }
1888
1889 struct compat_ip6t_get_entries {
1890 char name[IP6T_TABLE_MAXNAMELEN];
1891 compat_uint_t size;
1892 struct compat_ip6t_entry entrytable[0];
1893 };
1894
1895 static int
1896 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1897 void __user *userptr)
1898 {
1899 struct xt_counters *counters;
1900 const struct xt_table_info *private = table->private;
1901 void __user *pos;
1902 unsigned int size;
1903 int ret = 0;
1904 const void *loc_cpu_entry;
1905 unsigned int i = 0;
1906
1907 counters = alloc_counters(table);
1908 if (IS_ERR(counters))
1909 return PTR_ERR(counters);
1910
1911 /* choose the copy that is on our node/cpu, ...
1912 * This choice is lazy (because current thread is
1913 * allowed to migrate to another cpu)
1914 */
1915 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1916 pos = userptr;
1917 size = total_size;
1918 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1919 compat_copy_entry_to_user,
1920 &pos, &size, counters, &i);
1921
1922 vfree(counters);
1923 return ret;
1924 }
1925
1926 static int
1927 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1928 int *len)
1929 {
1930 int ret;
1931 struct compat_ip6t_get_entries get;
1932 struct xt_table *t;
1933
1934 if (*len < sizeof(get)) {
1935 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1936 return -EINVAL;
1937 }
1938
1939 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1940 return -EFAULT;
1941
1942 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1943 duprintf("compat_get_entries: %u != %zu\n",
1944 *len, sizeof(get) + get.size);
1945 return -EINVAL;
1946 }
1947
1948 xt_compat_lock(AF_INET6);
1949 t = xt_find_table_lock(net, AF_INET6, get.name);
1950 if (t && !IS_ERR(t)) {
1951 const struct xt_table_info *private = t->private;
1952 struct xt_table_info info;
1953 duprintf("t->private->number = %u\n", private->number);
1954 ret = compat_table_info(private, &info);
1955 if (!ret && get.size == info.size) {
1956 ret = compat_copy_entries_to_user(private->size,
1957 t, uptr->entrytable);
1958 } else if (!ret) {
1959 duprintf("compat_get_entries: I've got %u not %u!\n",
1960 private->size, get.size);
1961 ret = -EAGAIN;
1962 }
1963 xt_compat_flush_offsets(AF_INET6);
1964 module_put(t->me);
1965 xt_table_unlock(t);
1966 } else
1967 ret = t ? PTR_ERR(t) : -ENOENT;
1968
1969 xt_compat_unlock(AF_INET6);
1970 return ret;
1971 }
1972
1973 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1974
1975 static int
1976 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1977 {
1978 int ret;
1979
1980 if (!capable(CAP_NET_ADMIN))
1981 return -EPERM;
1982
1983 switch (cmd) {
1984 case IP6T_SO_GET_INFO:
1985 ret = get_info(sock_net(sk), user, len, 1);
1986 break;
1987 case IP6T_SO_GET_ENTRIES:
1988 ret = compat_get_entries(sock_net(sk), user, len);
1989 break;
1990 default:
1991 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1992 }
1993 return ret;
1994 }
1995 #endif
1996
1997 static int
1998 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1999 {
2000 int ret;
2001
2002 if (!capable(CAP_NET_ADMIN))
2003 return -EPERM;
2004
2005 switch (cmd) {
2006 case IP6T_SO_SET_REPLACE:
2007 ret = do_replace(sock_net(sk), user, len);
2008 break;
2009
2010 case IP6T_SO_SET_ADD_COUNTERS:
2011 ret = do_add_counters(sock_net(sk), user, len, 0);
2012 break;
2013
2014 default:
2015 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2016 ret = -EINVAL;
2017 }
2018
2019 return ret;
2020 }
2021
2022 static int
2023 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2024 {
2025 int ret;
2026
2027 if (!capable(CAP_NET_ADMIN))
2028 return -EPERM;
2029
2030 switch (cmd) {
2031 case IP6T_SO_GET_INFO:
2032 ret = get_info(sock_net(sk), user, len, 0);
2033 break;
2034
2035 case IP6T_SO_GET_ENTRIES:
2036 ret = get_entries(sock_net(sk), user, len);
2037 break;
2038
2039 case IP6T_SO_GET_REVISION_MATCH:
2040 case IP6T_SO_GET_REVISION_TARGET: {
2041 struct ip6t_get_revision rev;
2042 int target;
2043
2044 if (*len != sizeof(rev)) {
2045 ret = -EINVAL;
2046 break;
2047 }
2048 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2049 ret = -EFAULT;
2050 break;
2051 }
2052
2053 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2054 target = 1;
2055 else
2056 target = 0;
2057
2058 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2059 rev.revision,
2060 target, &ret),
2061 "ip6t_%s", rev.name);
2062 break;
2063 }
2064
2065 default:
2066 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2067 ret = -EINVAL;
2068 }
2069
2070 return ret;
2071 }
2072
2073 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2074 const struct ip6t_replace *repl)
2075 {
2076 int ret;
2077 struct xt_table_info *newinfo;
2078 struct xt_table_info bootstrap
2079 = { 0, 0, 0, { 0 }, { 0 }, { } };
2080 void *loc_cpu_entry;
2081 struct xt_table *new_table;
2082
2083 newinfo = xt_alloc_table_info(repl->size);
2084 if (!newinfo) {
2085 ret = -ENOMEM;
2086 goto out;
2087 }
2088
2089 /* choose the copy on our node/cpu, but dont care about preemption */
2090 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2091 memcpy(loc_cpu_entry, repl->entries, repl->size);
2092
2093 ret = translate_table(table->name, table->valid_hooks,
2094 newinfo, loc_cpu_entry, repl->size,
2095 repl->num_entries,
2096 repl->hook_entry,
2097 repl->underflow);
2098 if (ret != 0)
2099 goto out_free;
2100
2101 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2102 if (IS_ERR(new_table)) {
2103 ret = PTR_ERR(new_table);
2104 goto out_free;
2105 }
2106 return new_table;
2107
2108 out_free:
2109 xt_free_table_info(newinfo);
2110 out:
2111 return ERR_PTR(ret);
2112 }
2113
2114 void ip6t_unregister_table(struct xt_table *table)
2115 {
2116 struct xt_table_info *private;
2117 void *loc_cpu_entry;
2118 struct module *table_owner = table->me;
2119
2120 private = xt_unregister_table(table);
2121
2122 /* Decrease module usage counts and free resources */
2123 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2124 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2125 if (private->number > private->initial_entries)
2126 module_put(table_owner);
2127 xt_free_table_info(private);
2128 }
2129
2130 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2131 static inline bool
2132 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2133 u_int8_t type, u_int8_t code,
2134 bool invert)
2135 {
2136 return (type == test_type && code >= min_code && code <= max_code)
2137 ^ invert;
2138 }
2139
2140 static bool
2141 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2142 {
2143 const struct icmp6hdr *ic;
2144 struct icmp6hdr _icmph;
2145 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2146
2147 /* Must not be a fragment. */
2148 if (par->fragoff != 0)
2149 return false;
2150
2151 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2152 if (ic == NULL) {
2153 /* We've been asked to examine this packet, and we
2154 * can't. Hence, no choice but to drop.
2155 */
2156 duprintf("Dropping evil ICMP tinygram.\n");
2157 *par->hotdrop = true;
2158 return false;
2159 }
2160
2161 return icmp6_type_code_match(icmpinfo->type,
2162 icmpinfo->code[0],
2163 icmpinfo->code[1],
2164 ic->icmp6_type, ic->icmp6_code,
2165 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2166 }
2167
2168 /* Called when user tries to insert an entry of this type. */
2169 static bool
2170 icmp6_checkentry(const char *tablename,
2171 const void *entry,
2172 const struct xt_match *match,
2173 void *matchinfo,
2174 unsigned int hook_mask)
2175 {
2176 const struct ip6t_icmp *icmpinfo = matchinfo;
2177
2178 /* Must specify no unknown invflags */
2179 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2180 }
2181
2182 /* The built-in targets: standard (NULL) and error. */
2183 static struct xt_target ip6t_standard_target __read_mostly = {
2184 .name = IP6T_STANDARD_TARGET,
2185 .targetsize = sizeof(int),
2186 .family = AF_INET6,
2187 #ifdef CONFIG_COMPAT
2188 .compatsize = sizeof(compat_int_t),
2189 .compat_from_user = compat_standard_from_user,
2190 .compat_to_user = compat_standard_to_user,
2191 #endif
2192 };
2193
2194 static struct xt_target ip6t_error_target __read_mostly = {
2195 .name = IP6T_ERROR_TARGET,
2196 .target = ip6t_error,
2197 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2198 .family = AF_INET6,
2199 };
2200
2201 static struct nf_sockopt_ops ip6t_sockopts = {
2202 .pf = PF_INET6,
2203 .set_optmin = IP6T_BASE_CTL,
2204 .set_optmax = IP6T_SO_SET_MAX+1,
2205 .set = do_ip6t_set_ctl,
2206 #ifdef CONFIG_COMPAT
2207 .compat_set = compat_do_ip6t_set_ctl,
2208 #endif
2209 .get_optmin = IP6T_BASE_CTL,
2210 .get_optmax = IP6T_SO_GET_MAX+1,
2211 .get = do_ip6t_get_ctl,
2212 #ifdef CONFIG_COMPAT
2213 .compat_get = compat_do_ip6t_get_ctl,
2214 #endif
2215 .owner = THIS_MODULE,
2216 };
2217
2218 static struct xt_match icmp6_matchstruct __read_mostly = {
2219 .name = "icmp6",
2220 .match = icmp6_match,
2221 .matchsize = sizeof(struct ip6t_icmp),
2222 .checkentry = icmp6_checkentry,
2223 .proto = IPPROTO_ICMPV6,
2224 .family = AF_INET6,
2225 };
2226
2227 static int __net_init ip6_tables_net_init(struct net *net)
2228 {
2229 return xt_proto_init(net, AF_INET6);
2230 }
2231
2232 static void __net_exit ip6_tables_net_exit(struct net *net)
2233 {
2234 xt_proto_fini(net, AF_INET6);
2235 }
2236
2237 static struct pernet_operations ip6_tables_net_ops = {
2238 .init = ip6_tables_net_init,
2239 .exit = ip6_tables_net_exit,
2240 };
2241
2242 static int __init ip6_tables_init(void)
2243 {
2244 int ret;
2245
2246 ret = register_pernet_subsys(&ip6_tables_net_ops);
2247 if (ret < 0)
2248 goto err1;
2249
2250 /* Noone else will be downing sem now, so we won't sleep */
2251 ret = xt_register_target(&ip6t_standard_target);
2252 if (ret < 0)
2253 goto err2;
2254 ret = xt_register_target(&ip6t_error_target);
2255 if (ret < 0)
2256 goto err3;
2257 ret = xt_register_match(&icmp6_matchstruct);
2258 if (ret < 0)
2259 goto err4;
2260
2261 /* Register setsockopt */
2262 ret = nf_register_sockopt(&ip6t_sockopts);
2263 if (ret < 0)
2264 goto err5;
2265
2266 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2267 return 0;
2268
2269 err5:
2270 xt_unregister_match(&icmp6_matchstruct);
2271 err4:
2272 xt_unregister_target(&ip6t_error_target);
2273 err3:
2274 xt_unregister_target(&ip6t_standard_target);
2275 err2:
2276 unregister_pernet_subsys(&ip6_tables_net_ops);
2277 err1:
2278 return ret;
2279 }
2280
2281 static void __exit ip6_tables_fini(void)
2282 {
2283 nf_unregister_sockopt(&ip6t_sockopts);
2284
2285 xt_unregister_match(&icmp6_matchstruct);
2286 xt_unregister_target(&ip6t_error_target);
2287 xt_unregister_target(&ip6t_standard_target);
2288
2289 unregister_pernet_subsys(&ip6_tables_net_ops);
2290 }
2291
2292 /*
2293 * find the offset to specified header or the protocol number of last header
2294 * if target < 0. "last header" is transport protocol header, ESP, or
2295 * "No next header".
2296 *
2297 * If target header is found, its offset is set in *offset and return protocol
2298 * number. Otherwise, return -1.
2299 *
2300 * If the first fragment doesn't contain the final protocol header or
2301 * NEXTHDR_NONE it is considered invalid.
2302 *
2303 * Note that non-1st fragment is special case that "the protocol number
2304 * of last header" is "next header" field in Fragment header. In this case,
2305 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2306 * isn't NULL.
2307 *
2308 */
2309 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2310 int target, unsigned short *fragoff)
2311 {
2312 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2313 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2314 unsigned int len = skb->len - start;
2315
2316 if (fragoff)
2317 *fragoff = 0;
2318
2319 while (nexthdr != target) {
2320 struct ipv6_opt_hdr _hdr, *hp;
2321 unsigned int hdrlen;
2322
2323 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2324 if (target < 0)
2325 break;
2326 return -ENOENT;
2327 }
2328
2329 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2330 if (hp == NULL)
2331 return -EBADMSG;
2332 if (nexthdr == NEXTHDR_FRAGMENT) {
2333 unsigned short _frag_off;
2334 __be16 *fp;
2335 fp = skb_header_pointer(skb,
2336 start+offsetof(struct frag_hdr,
2337 frag_off),
2338 sizeof(_frag_off),
2339 &_frag_off);
2340 if (fp == NULL)
2341 return -EBADMSG;
2342
2343 _frag_off = ntohs(*fp) & ~0x7;
2344 if (_frag_off) {
2345 if (target < 0 &&
2346 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2347 hp->nexthdr == NEXTHDR_NONE)) {
2348 if (fragoff)
2349 *fragoff = _frag_off;
2350 return hp->nexthdr;
2351 }
2352 return -ENOENT;
2353 }
2354 hdrlen = 8;
2355 } else if (nexthdr == NEXTHDR_AUTH)
2356 hdrlen = (hp->hdrlen + 2) << 2;
2357 else
2358 hdrlen = ipv6_optlen(hp);
2359
2360 nexthdr = hp->nexthdr;
2361 len -= hdrlen;
2362 start += hdrlen;
2363 }
2364
2365 *offset = start;
2366 return nexthdr;
2367 }
2368
2369 EXPORT_SYMBOL(ip6t_register_table);
2370 EXPORT_SYMBOL(ip6t_unregister_table);
2371 EXPORT_SYMBOL(ip6t_do_table);
2372 EXPORT_SYMBOL(ip6t_ext_hdr);
2373 EXPORT_SYMBOL(ipv6_find_hdr);
2374
2375 module_init(ip6_tables_init);
2376 module_exit(ip6_tables_fini);
This page took 0.134151 seconds and 5 git commands to generate.