Merge branch 'hwmon-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelv...
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
35 MODULE_DESCRIPTION("IPv6 packet filter");
36
37 /*#define DEBUG_IP_FIREWALL*/
38 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
39 /*#define DEBUG_IP_FIREWALL_USER*/
40
41 #ifdef DEBUG_IP_FIREWALL
42 #define dprintf(format, args...) printk(format , ## args)
43 #else
44 #define dprintf(format, args...)
45 #endif
46
47 #ifdef DEBUG_IP_FIREWALL_USER
48 #define duprintf(format, args...) printk(format , ## args)
49 #else
50 #define duprintf(format, args...)
51 #endif
52
53 #ifdef CONFIG_NETFILTER_DEBUG
54 #define IP_NF_ASSERT(x) \
55 do { \
56 if (!(x)) \
57 printk("IP_NF_ASSERT: %s:%s:%u\n", \
58 __func__, __FILE__, __LINE__); \
59 } while(0)
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 /*
71 We keep a set of rules for each CPU, so we can avoid write-locking
72 them in the softirq when updating the counters and therefore
73 only need to read-lock in the softirq; doing a write_lock_bh() in user
74 context stops packets coming through and allows user context to read
75 the counters or update the rules.
76
77 Hence the start of any table is given by get_table() below. */
78
79 /* Check for an extension */
80 int
81 ip6t_ext_hdr(u8 nexthdr)
82 {
83 return ( (nexthdr == IPPROTO_HOPOPTS) ||
84 (nexthdr == IPPROTO_ROUTING) ||
85 (nexthdr == IPPROTO_FRAGMENT) ||
86 (nexthdr == IPPROTO_ESP) ||
87 (nexthdr == IPPROTO_AH) ||
88 (nexthdr == IPPROTO_NONE) ||
89 (nexthdr == IPPROTO_DSTOPTS) );
90 }
91
92 /* Returns whether matches rule or not. */
93 /* Performance critical - called for every packet */
94 static inline bool
95 ip6_packet_match(const struct sk_buff *skb,
96 const char *indev,
97 const char *outdev,
98 const struct ip6t_ip6 *ip6info,
99 unsigned int *protoff,
100 int *fragoff, bool *hotdrop)
101 {
102 unsigned long ret;
103 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
104
105 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
106
107 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
108 &ip6info->src), IP6T_INV_SRCIP)
109 || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
110 &ip6info->dst), IP6T_INV_DSTIP)) {
111 dprintf("Source or dest mismatch.\n");
112 /*
113 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
114 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
115 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
116 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
117 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
118 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
119 return false;
120 }
121
122 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
123
124 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
125 dprintf("VIA in mismatch (%s vs %s).%s\n",
126 indev, ip6info->iniface,
127 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
128 return false;
129 }
130
131 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
132
133 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
134 dprintf("VIA out mismatch (%s vs %s).%s\n",
135 outdev, ip6info->outiface,
136 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
137 return false;
138 }
139
140 /* ... might want to do something with class and flowlabel here ... */
141
142 /* look for the desired protocol header */
143 if((ip6info->flags & IP6T_F_PROTO)) {
144 int protohdr;
145 unsigned short _frag_off;
146
147 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
148 if (protohdr < 0) {
149 if (_frag_off == 0)
150 *hotdrop = true;
151 return false;
152 }
153 *fragoff = _frag_off;
154
155 dprintf("Packet protocol %hi ?= %s%hi.\n",
156 protohdr,
157 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
158 ip6info->proto);
159
160 if (ip6info->proto == protohdr) {
161 if(ip6info->invflags & IP6T_INV_PROTO) {
162 return false;
163 }
164 return true;
165 }
166
167 /* We need match for the '-p all', too! */
168 if ((ip6info->proto != 0) &&
169 !(ip6info->invflags & IP6T_INV_PROTO))
170 return false;
171 }
172 return true;
173 }
174
175 /* should be ip6 safe */
176 static bool
177 ip6_checkentry(const struct ip6t_ip6 *ipv6)
178 {
179 if (ipv6->flags & ~IP6T_F_MASK) {
180 duprintf("Unknown flag bits set: %08X\n",
181 ipv6->flags & ~IP6T_F_MASK);
182 return false;
183 }
184 if (ipv6->invflags & ~IP6T_INV_MASK) {
185 duprintf("Unknown invflag bits set: %08X\n",
186 ipv6->invflags & ~IP6T_INV_MASK);
187 return false;
188 }
189 return true;
190 }
191
192 static unsigned int
193 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
194 {
195 if (net_ratelimit())
196 printk("ip6_tables: error: `%s'\n",
197 (const char *)par->targinfo);
198
199 return NF_DROP;
200 }
201
202 /* Performance critical - called for every packet */
203 static inline bool
204 do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
205 struct xt_match_param *par)
206 {
207 par->match = m->u.kernel.match;
208 par->matchinfo = m->data;
209
210 /* Stop iteration if it doesn't match */
211 if (!m->u.kernel.match->match(skb, par))
212 return true;
213 else
214 return false;
215 }
216
217 static inline struct ip6t_entry *
218 get_entry(void *base, unsigned int offset)
219 {
220 return (struct ip6t_entry *)(base + offset);
221 }
222
223 /* All zeroes == unconditional rule. */
224 /* Mildly perf critical (only if packet tracing is on) */
225 static inline int
226 unconditional(const struct ip6t_ip6 *ipv6)
227 {
228 unsigned int i;
229
230 for (i = 0; i < sizeof(*ipv6); i++)
231 if (((char *)ipv6)[i])
232 break;
233
234 return (i == sizeof(*ipv6));
235 }
236
237 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
238 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
239 /* This cries for unification! */
240 static const char *const hooknames[] = {
241 [NF_INET_PRE_ROUTING] = "PREROUTING",
242 [NF_INET_LOCAL_IN] = "INPUT",
243 [NF_INET_FORWARD] = "FORWARD",
244 [NF_INET_LOCAL_OUT] = "OUTPUT",
245 [NF_INET_POST_ROUTING] = "POSTROUTING",
246 };
247
248 enum nf_ip_trace_comments {
249 NF_IP6_TRACE_COMMENT_RULE,
250 NF_IP6_TRACE_COMMENT_RETURN,
251 NF_IP6_TRACE_COMMENT_POLICY,
252 };
253
254 static const char *const comments[] = {
255 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
256 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
257 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
258 };
259
260 static struct nf_loginfo trace_loginfo = {
261 .type = NF_LOG_TYPE_LOG,
262 .u = {
263 .log = {
264 .level = 4,
265 .logflags = NF_LOG_MASK,
266 },
267 },
268 };
269
270 /* Mildly perf critical (only if packet tracing is on) */
271 static inline int
272 get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
273 const char *hookname, const char **chainname,
274 const char **comment, unsigned int *rulenum)
275 {
276 struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
277
278 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
279 /* Head of user chain: ERROR target with chainname */
280 *chainname = t->target.data;
281 (*rulenum) = 0;
282 } else if (s == e) {
283 (*rulenum)++;
284
285 if (s->target_offset == sizeof(struct ip6t_entry)
286 && strcmp(t->target.u.kernel.target->name,
287 IP6T_STANDARD_TARGET) == 0
288 && t->verdict < 0
289 && unconditional(&s->ipv6)) {
290 /* Tail of chains: STANDARD target (return/policy) */
291 *comment = *chainname == hookname
292 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
293 : comments[NF_IP6_TRACE_COMMENT_RETURN];
294 }
295 return 1;
296 } else
297 (*rulenum)++;
298
299 return 0;
300 }
301
302 static void trace_packet(struct sk_buff *skb,
303 unsigned int hook,
304 const struct net_device *in,
305 const struct net_device *out,
306 const char *tablename,
307 struct xt_table_info *private,
308 struct ip6t_entry *e)
309 {
310 void *table_base;
311 const struct ip6t_entry *root;
312 const char *hookname, *chainname, *comment;
313 unsigned int rulenum = 0;
314
315 table_base = private->entries[smp_processor_id()];
316 root = get_entry(table_base, private->hook_entry[hook]);
317
318 hookname = chainname = hooknames[hook];
319 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
320
321 IP6T_ENTRY_ITERATE(root,
322 private->size - private->hook_entry[hook],
323 get_chainname_rulenum,
324 e, hookname, &chainname, &comment, &rulenum);
325
326 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
327 "TRACE: %s:%s:%s:%u ",
328 tablename, chainname, comment, rulenum);
329 }
330 #endif
331
332 static inline __pure struct ip6t_entry *
333 ip6t_next_entry(const struct ip6t_entry *entry)
334 {
335 return (void *)entry + entry->next_offset;
336 }
337
338 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
339 unsigned int
340 ip6t_do_table(struct sk_buff *skb,
341 unsigned int hook,
342 const struct net_device *in,
343 const struct net_device *out,
344 struct xt_table *table)
345 {
346 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
347
348 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
349 bool hotdrop = false;
350 /* Initializing verdict to NF_DROP keeps gcc happy. */
351 unsigned int verdict = NF_DROP;
352 const char *indev, *outdev;
353 void *table_base;
354 struct ip6t_entry *e, *back;
355 struct xt_table_info *private;
356 struct xt_match_param mtpar;
357 struct xt_target_param tgpar;
358
359 /* Initialization */
360 indev = in ? in->name : nulldevname;
361 outdev = out ? out->name : nulldevname;
362 /* We handle fragments by dealing with the first fragment as
363 * if it was a normal packet. All other fragments are treated
364 * normally, except that they will NEVER match rules that ask
365 * things we don't know, ie. tcp syn flag or ports). If the
366 * rule is also a fragment-specific rule, non-fragments won't
367 * match it. */
368 mtpar.hotdrop = &hotdrop;
369 mtpar.in = tgpar.in = in;
370 mtpar.out = tgpar.out = out;
371 mtpar.family = tgpar.family = NFPROTO_IPV6;
372 mtpar.hooknum = tgpar.hooknum = hook;
373
374 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
375
376 xt_info_rdlock_bh();
377 private = table->private;
378 table_base = private->entries[smp_processor_id()];
379
380 e = get_entry(table_base, private->hook_entry[hook]);
381
382 /* For return from builtin chain */
383 back = get_entry(table_base, private->underflow[hook]);
384
385 do {
386 struct ip6t_entry_target *t;
387
388 IP_NF_ASSERT(e);
389 IP_NF_ASSERT(back);
390 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
391 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
392 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
393 e = ip6t_next_entry(e);
394 continue;
395 }
396
397 ADD_COUNTER(e->counters,
398 ntohs(ipv6_hdr(skb)->payload_len) +
399 sizeof(struct ipv6hdr), 1);
400
401 t = ip6t_get_target(e);
402 IP_NF_ASSERT(t->u.kernel.target);
403
404 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
405 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
406 /* The packet is traced: log it */
407 if (unlikely(skb->nf_trace))
408 trace_packet(skb, hook, in, out,
409 table->name, private, e);
410 #endif
411 /* Standard target? */
412 if (!t->u.kernel.target->target) {
413 int v;
414
415 v = ((struct ip6t_standard_target *)t)->verdict;
416 if (v < 0) {
417 /* Pop from stack? */
418 if (v != IP6T_RETURN) {
419 verdict = (unsigned)(-v) - 1;
420 break;
421 }
422 e = back;
423 back = get_entry(table_base, back->comefrom);
424 continue;
425 }
426 if (table_base + v != ip6t_next_entry(e)
427 && !(e->ipv6.flags & IP6T_F_GOTO)) {
428 /* Save old back ptr in next entry */
429 struct ip6t_entry *next = ip6t_next_entry(e);
430 next->comefrom = (void *)back - table_base;
431 /* set back pointer to next entry */
432 back = next;
433 }
434
435 e = get_entry(table_base, v);
436 continue;
437 }
438
439 /* Targets which reenter must return
440 abs. verdicts */
441 tgpar.target = t->u.kernel.target;
442 tgpar.targinfo = t->data;
443
444 #ifdef CONFIG_NETFILTER_DEBUG
445 tb_comefrom = 0xeeeeeeec;
446 #endif
447 verdict = t->u.kernel.target->target(skb, &tgpar);
448
449 #ifdef CONFIG_NETFILTER_DEBUG
450 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
451 printk("Target %s reentered!\n",
452 t->u.kernel.target->name);
453 verdict = NF_DROP;
454 }
455 tb_comefrom = 0x57acc001;
456 #endif
457 if (verdict == IP6T_CONTINUE)
458 e = ip6t_next_entry(e);
459 else
460 /* Verdict */
461 break;
462 } while (!hotdrop);
463
464 #ifdef CONFIG_NETFILTER_DEBUG
465 tb_comefrom = NETFILTER_LINK_POISON;
466 #endif
467 xt_info_rdunlock_bh();
468
469 #ifdef DEBUG_ALLOW_ALL
470 return NF_ACCEPT;
471 #else
472 if (hotdrop)
473 return NF_DROP;
474 else return verdict;
475 #endif
476
477 #undef tb_comefrom
478 }
479
480 /* Figures out from what hook each rule can be called: returns 0 if
481 there are loops. Puts hook bitmask in comefrom. */
482 static int
483 mark_source_chains(struct xt_table_info *newinfo,
484 unsigned int valid_hooks, void *entry0)
485 {
486 unsigned int hook;
487
488 /* No recursion; use packet counter to save back ptrs (reset
489 to 0 as we leave), and comefrom to save source hook bitmask */
490 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
491 unsigned int pos = newinfo->hook_entry[hook];
492 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
493
494 if (!(valid_hooks & (1 << hook)))
495 continue;
496
497 /* Set initial back pointer. */
498 e->counters.pcnt = pos;
499
500 for (;;) {
501 struct ip6t_standard_target *t
502 = (void *)ip6t_get_target(e);
503 int visited = e->comefrom & (1 << hook);
504
505 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
506 printk("iptables: loop hook %u pos %u %08X.\n",
507 hook, pos, e->comefrom);
508 return 0;
509 }
510 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
511
512 /* Unconditional return/END. */
513 if ((e->target_offset == sizeof(struct ip6t_entry)
514 && (strcmp(t->target.u.user.name,
515 IP6T_STANDARD_TARGET) == 0)
516 && t->verdict < 0
517 && unconditional(&e->ipv6)) || visited) {
518 unsigned int oldpos, size;
519
520 if ((strcmp(t->target.u.user.name,
521 IP6T_STANDARD_TARGET) == 0) &&
522 t->verdict < -NF_MAX_VERDICT - 1) {
523 duprintf("mark_source_chains: bad "
524 "negative verdict (%i)\n",
525 t->verdict);
526 return 0;
527 }
528
529 /* Return: backtrack through the last
530 big jump. */
531 do {
532 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
533 #ifdef DEBUG_IP_FIREWALL_USER
534 if (e->comefrom
535 & (1 << NF_INET_NUMHOOKS)) {
536 duprintf("Back unset "
537 "on hook %u "
538 "rule %u\n",
539 hook, pos);
540 }
541 #endif
542 oldpos = pos;
543 pos = e->counters.pcnt;
544 e->counters.pcnt = 0;
545
546 /* We're at the start. */
547 if (pos == oldpos)
548 goto next;
549
550 e = (struct ip6t_entry *)
551 (entry0 + pos);
552 } while (oldpos == pos + e->next_offset);
553
554 /* Move along one */
555 size = e->next_offset;
556 e = (struct ip6t_entry *)
557 (entry0 + pos + size);
558 e->counters.pcnt = pos;
559 pos += size;
560 } else {
561 int newpos = t->verdict;
562
563 if (strcmp(t->target.u.user.name,
564 IP6T_STANDARD_TARGET) == 0
565 && newpos >= 0) {
566 if (newpos > newinfo->size -
567 sizeof(struct ip6t_entry)) {
568 duprintf("mark_source_chains: "
569 "bad verdict (%i)\n",
570 newpos);
571 return 0;
572 }
573 /* This a jump; chase it. */
574 duprintf("Jump rule %u -> %u\n",
575 pos, newpos);
576 } else {
577 /* ... this is a fallthru */
578 newpos = pos + e->next_offset;
579 }
580 e = (struct ip6t_entry *)
581 (entry0 + newpos);
582 e->counters.pcnt = pos;
583 pos = newpos;
584 }
585 }
586 next:
587 duprintf("Finished chain %u\n", hook);
588 }
589 return 1;
590 }
591
592 static int
593 cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
594 {
595 struct xt_mtdtor_param par;
596
597 if (i && (*i)-- == 0)
598 return 1;
599
600 par.match = m->u.kernel.match;
601 par.matchinfo = m->data;
602 par.family = NFPROTO_IPV6;
603 if (par.match->destroy != NULL)
604 par.match->destroy(&par);
605 module_put(par.match->me);
606 return 0;
607 }
608
609 static int
610 check_entry(struct ip6t_entry *e, const char *name)
611 {
612 struct ip6t_entry_target *t;
613
614 if (!ip6_checkentry(&e->ipv6)) {
615 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
616 return -EINVAL;
617 }
618
619 if (e->target_offset + sizeof(struct ip6t_entry_target) >
620 e->next_offset)
621 return -EINVAL;
622
623 t = ip6t_get_target(e);
624 if (e->target_offset + t->u.target_size > e->next_offset)
625 return -EINVAL;
626
627 return 0;
628 }
629
630 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
631 unsigned int *i)
632 {
633 const struct ip6t_ip6 *ipv6 = par->entryinfo;
634 int ret;
635
636 par->match = m->u.kernel.match;
637 par->matchinfo = m->data;
638
639 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
640 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
641 if (ret < 0) {
642 duprintf("ip_tables: check failed for `%s'.\n",
643 par.match->name);
644 return ret;
645 }
646 ++*i;
647 return 0;
648 }
649
650 static int
651 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
652 unsigned int *i)
653 {
654 struct xt_match *match;
655 int ret;
656
657 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
658 m->u.user.revision),
659 "ip6t_%s", m->u.user.name);
660 if (IS_ERR(match) || !match) {
661 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
662 return match ? PTR_ERR(match) : -ENOENT;
663 }
664 m->u.kernel.match = match;
665
666 ret = check_match(m, par, i);
667 if (ret)
668 goto err;
669
670 return 0;
671 err:
672 module_put(m->u.kernel.match->me);
673 return ret;
674 }
675
676 static int check_target(struct ip6t_entry *e, const char *name)
677 {
678 struct ip6t_entry_target *t = ip6t_get_target(e);
679 struct xt_tgchk_param par = {
680 .table = name,
681 .entryinfo = e,
682 .target = t->u.kernel.target,
683 .targinfo = t->data,
684 .hook_mask = e->comefrom,
685 .family = NFPROTO_IPV6,
686 };
687 int ret;
688
689 t = ip6t_get_target(e);
690 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
691 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
692 if (ret < 0) {
693 duprintf("ip_tables: check failed for `%s'.\n",
694 t->u.kernel.target->name);
695 return ret;
696 }
697 return 0;
698 }
699
700 static int
701 find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
702 unsigned int *i)
703 {
704 struct ip6t_entry_target *t;
705 struct xt_target *target;
706 int ret;
707 unsigned int j;
708 struct xt_mtchk_param mtpar;
709
710 ret = check_entry(e, name);
711 if (ret)
712 return ret;
713
714 j = 0;
715 mtpar.table = name;
716 mtpar.entryinfo = &e->ipv6;
717 mtpar.hook_mask = e->comefrom;
718 mtpar.family = NFPROTO_IPV6;
719 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
720 if (ret != 0)
721 goto cleanup_matches;
722
723 t = ip6t_get_target(e);
724 target = try_then_request_module(xt_find_target(AF_INET6,
725 t->u.user.name,
726 t->u.user.revision),
727 "ip6t_%s", t->u.user.name);
728 if (IS_ERR(target) || !target) {
729 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
730 ret = target ? PTR_ERR(target) : -ENOENT;
731 goto cleanup_matches;
732 }
733 t->u.kernel.target = target;
734
735 ret = check_target(e, name);
736 if (ret)
737 goto err;
738
739 (*i)++;
740 return 0;
741 err:
742 module_put(t->u.kernel.target->me);
743 cleanup_matches:
744 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
745 return ret;
746 }
747
748 static int
749 check_entry_size_and_hooks(struct ip6t_entry *e,
750 struct xt_table_info *newinfo,
751 unsigned char *base,
752 unsigned char *limit,
753 const unsigned int *hook_entries,
754 const unsigned int *underflows,
755 unsigned int *i)
756 {
757 unsigned int h;
758
759 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0
760 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
761 duprintf("Bad offset %p\n", e);
762 return -EINVAL;
763 }
764
765 if (e->next_offset
766 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
767 duprintf("checking: element %p size %u\n",
768 e, e->next_offset);
769 return -EINVAL;
770 }
771
772 /* Check hooks & underflows */
773 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h])
777 newinfo->underflow[h] = underflows[h];
778 }
779
780 /* FIXME: underflows must be unconditional, standard verdicts
781 < 0 (not IP6T_RETURN). --RR */
782
783 /* Clear counters and comefrom */
784 e->counters = ((struct xt_counters) { 0, 0 });
785 e->comefrom = 0;
786
787 (*i)++;
788 return 0;
789 }
790
791 static int
792 cleanup_entry(struct ip6t_entry *e, unsigned int *i)
793 {
794 struct xt_tgdtor_param par;
795 struct ip6t_entry_target *t;
796
797 if (i && (*i)-- == 0)
798 return 1;
799
800 /* Cleanup all matches */
801 IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
802 t = ip6t_get_target(e);
803
804 par.target = t->u.kernel.target;
805 par.targinfo = t->data;
806 par.family = NFPROTO_IPV6;
807 if (par.target->destroy != NULL)
808 par.target->destroy(&par);
809 module_put(par.target->me);
810 return 0;
811 }
812
813 /* Checks and translates the user-supplied table segment (held in
814 newinfo) */
815 static int
816 translate_table(const char *name,
817 unsigned int valid_hooks,
818 struct xt_table_info *newinfo,
819 void *entry0,
820 unsigned int size,
821 unsigned int number,
822 const unsigned int *hook_entries,
823 const unsigned int *underflows)
824 {
825 unsigned int i;
826 int ret;
827
828 newinfo->size = size;
829 newinfo->number = number;
830
831 /* Init all hooks to impossible value. */
832 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
833 newinfo->hook_entry[i] = 0xFFFFFFFF;
834 newinfo->underflow[i] = 0xFFFFFFFF;
835 }
836
837 duprintf("translate_table: size %u\n", newinfo->size);
838 i = 0;
839 /* Walk through entries, checking offsets. */
840 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
841 check_entry_size_and_hooks,
842 newinfo,
843 entry0,
844 entry0 + size,
845 hook_entries, underflows, &i);
846 if (ret != 0)
847 return ret;
848
849 if (i != number) {
850 duprintf("translate_table: %u not %u entries\n",
851 i, number);
852 return -EINVAL;
853 }
854
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(valid_hooks & (1 << i)))
859 continue;
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
862 i, hook_entries[i]);
863 return -EINVAL;
864 }
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
867 i, underflows[i]);
868 return -EINVAL;
869 }
870 }
871
872 if (!mark_source_chains(newinfo, valid_hooks, entry0))
873 return -ELOOP;
874
875 /* Finally, each sanity check must pass */
876 i = 0;
877 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
878 find_check_entry, name, size, &i);
879
880 if (ret != 0) {
881 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
882 cleanup_entry, &i);
883 return ret;
884 }
885
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
890 }
891
892 return ret;
893 }
894
895 /* Gets counters. */
896 static inline int
897 add_entry_to_counter(const struct ip6t_entry *e,
898 struct xt_counters total[],
899 unsigned int *i)
900 {
901 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
902
903 (*i)++;
904 return 0;
905 }
906
907 static inline int
908 set_entry_to_counter(const struct ip6t_entry *e,
909 struct ip6t_counters total[],
910 unsigned int *i)
911 {
912 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
913
914 (*i)++;
915 return 0;
916 }
917
918 static void
919 get_counters(const struct xt_table_info *t,
920 struct xt_counters counters[])
921 {
922 unsigned int cpu;
923 unsigned int i;
924 unsigned int curcpu;
925
926 /* Instead of clearing (by a previous call to memset())
927 * the counters and using adds, we set the counters
928 * with data used by 'current' CPU
929 *
930 * Bottom half has to be disabled to prevent deadlock
931 * if new softirq were to run and call ipt_do_table
932 */
933 local_bh_disable();
934 curcpu = smp_processor_id();
935
936 i = 0;
937 IP6T_ENTRY_ITERATE(t->entries[curcpu],
938 t->size,
939 set_entry_to_counter,
940 counters,
941 &i);
942
943 for_each_possible_cpu(cpu) {
944 if (cpu == curcpu)
945 continue;
946 i = 0;
947 xt_info_wrlock(cpu);
948 IP6T_ENTRY_ITERATE(t->entries[cpu],
949 t->size,
950 add_entry_to_counter,
951 counters,
952 &i);
953 xt_info_wrunlock(cpu);
954 }
955 local_bh_enable();
956 }
957
958 static struct xt_counters *alloc_counters(struct xt_table *table)
959 {
960 unsigned int countersize;
961 struct xt_counters *counters;
962 struct xt_table_info *private = table->private;
963
964 /* We need atomic snapshot of counters: rest doesn't change
965 (other than comefrom, which userspace doesn't care
966 about). */
967 countersize = sizeof(struct xt_counters) * private->number;
968 counters = vmalloc_node(countersize, numa_node_id());
969
970 if (counters == NULL)
971 return ERR_PTR(-ENOMEM);
972
973 get_counters(private, counters);
974
975 return counters;
976 }
977
978 static int
979 copy_entries_to_user(unsigned int total_size,
980 struct xt_table *table,
981 void __user *userptr)
982 {
983 unsigned int off, num;
984 struct ip6t_entry *e;
985 struct xt_counters *counters;
986 const struct xt_table_info *private = table->private;
987 int ret = 0;
988 const void *loc_cpu_entry;
989
990 counters = alloc_counters(table);
991 if (IS_ERR(counters))
992 return PTR_ERR(counters);
993
994 /* choose the copy that is on our node/cpu, ...
995 * This choice is lazy (because current thread is
996 * allowed to migrate to another cpu)
997 */
998 loc_cpu_entry = private->entries[raw_smp_processor_id()];
999 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1000 ret = -EFAULT;
1001 goto free_counters;
1002 }
1003
1004 /* FIXME: use iterator macros --RR */
1005 /* ... then go back and fix counters and names */
1006 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1007 unsigned int i;
1008 const struct ip6t_entry_match *m;
1009 const struct ip6t_entry_target *t;
1010
1011 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1012 if (copy_to_user(userptr + off
1013 + offsetof(struct ip6t_entry, counters),
1014 &counters[num],
1015 sizeof(counters[num])) != 0) {
1016 ret = -EFAULT;
1017 goto free_counters;
1018 }
1019
1020 for (i = sizeof(struct ip6t_entry);
1021 i < e->target_offset;
1022 i += m->u.match_size) {
1023 m = (void *)e + i;
1024
1025 if (copy_to_user(userptr + off + i
1026 + offsetof(struct ip6t_entry_match,
1027 u.user.name),
1028 m->u.kernel.match->name,
1029 strlen(m->u.kernel.match->name)+1)
1030 != 0) {
1031 ret = -EFAULT;
1032 goto free_counters;
1033 }
1034 }
1035
1036 t = ip6t_get_target(e);
1037 if (copy_to_user(userptr + off + e->target_offset
1038 + offsetof(struct ip6t_entry_target,
1039 u.user.name),
1040 t->u.kernel.target->name,
1041 strlen(t->u.kernel.target->name)+1) != 0) {
1042 ret = -EFAULT;
1043 goto free_counters;
1044 }
1045 }
1046
1047 free_counters:
1048 vfree(counters);
1049 return ret;
1050 }
1051
1052 #ifdef CONFIG_COMPAT
1053 static void compat_standard_from_user(void *dst, void *src)
1054 {
1055 int v = *(compat_int_t *)src;
1056
1057 if (v > 0)
1058 v += xt_compat_calc_jump(AF_INET6, v);
1059 memcpy(dst, &v, sizeof(v));
1060 }
1061
1062 static int compat_standard_to_user(void __user *dst, void *src)
1063 {
1064 compat_int_t cv = *(int *)src;
1065
1066 if (cv > 0)
1067 cv -= xt_compat_calc_jump(AF_INET6, cv);
1068 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1069 }
1070
1071 static inline int
1072 compat_calc_match(struct ip6t_entry_match *m, int *size)
1073 {
1074 *size += xt_compat_match_offset(m->u.kernel.match);
1075 return 0;
1076 }
1077
1078 static int compat_calc_entry(struct ip6t_entry *e,
1079 const struct xt_table_info *info,
1080 void *base, struct xt_table_info *newinfo)
1081 {
1082 struct ip6t_entry_target *t;
1083 unsigned int entry_offset;
1084 int off, i, ret;
1085
1086 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1087 entry_offset = (void *)e - base;
1088 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1089 t = ip6t_get_target(e);
1090 off += xt_compat_target_offset(t->u.kernel.target);
1091 newinfo->size -= off;
1092 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1093 if (ret)
1094 return ret;
1095
1096 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1097 if (info->hook_entry[i] &&
1098 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1099 newinfo->hook_entry[i] -= off;
1100 if (info->underflow[i] &&
1101 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1102 newinfo->underflow[i] -= off;
1103 }
1104 return 0;
1105 }
1106
1107 static int compat_table_info(const struct xt_table_info *info,
1108 struct xt_table_info *newinfo)
1109 {
1110 void *loc_cpu_entry;
1111
1112 if (!newinfo || !info)
1113 return -EINVAL;
1114
1115 /* we dont care about newinfo->entries[] */
1116 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1117 newinfo->initial_entries = 0;
1118 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1119 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1120 compat_calc_entry, info, loc_cpu_entry,
1121 newinfo);
1122 }
1123 #endif
1124
1125 static int get_info(struct net *net, void __user *user, int *len, int compat)
1126 {
1127 char name[IP6T_TABLE_MAXNAMELEN];
1128 struct xt_table *t;
1129 int ret;
1130
1131 if (*len != sizeof(struct ip6t_getinfo)) {
1132 duprintf("length %u != %zu\n", *len,
1133 sizeof(struct ip6t_getinfo));
1134 return -EINVAL;
1135 }
1136
1137 if (copy_from_user(name, user, sizeof(name)) != 0)
1138 return -EFAULT;
1139
1140 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1141 #ifdef CONFIG_COMPAT
1142 if (compat)
1143 xt_compat_lock(AF_INET6);
1144 #endif
1145 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1146 "ip6table_%s", name);
1147 if (t && !IS_ERR(t)) {
1148 struct ip6t_getinfo info;
1149 const struct xt_table_info *private = t->private;
1150
1151 #ifdef CONFIG_COMPAT
1152 if (compat) {
1153 struct xt_table_info tmp;
1154 ret = compat_table_info(private, &tmp);
1155 xt_compat_flush_offsets(AF_INET6);
1156 private = &tmp;
1157 }
1158 #endif
1159 info.valid_hooks = t->valid_hooks;
1160 memcpy(info.hook_entry, private->hook_entry,
1161 sizeof(info.hook_entry));
1162 memcpy(info.underflow, private->underflow,
1163 sizeof(info.underflow));
1164 info.num_entries = private->number;
1165 info.size = private->size;
1166 strcpy(info.name, name);
1167
1168 if (copy_to_user(user, &info, *len) != 0)
1169 ret = -EFAULT;
1170 else
1171 ret = 0;
1172
1173 xt_table_unlock(t);
1174 module_put(t->me);
1175 } else
1176 ret = t ? PTR_ERR(t) : -ENOENT;
1177 #ifdef CONFIG_COMPAT
1178 if (compat)
1179 xt_compat_unlock(AF_INET6);
1180 #endif
1181 return ret;
1182 }
1183
1184 static int
1185 get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
1186 {
1187 int ret;
1188 struct ip6t_get_entries get;
1189 struct xt_table *t;
1190
1191 if (*len < sizeof(get)) {
1192 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1193 return -EINVAL;
1194 }
1195 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1196 return -EFAULT;
1197 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1198 duprintf("get_entries: %u != %zu\n",
1199 *len, sizeof(get) + get.size);
1200 return -EINVAL;
1201 }
1202
1203 t = xt_find_table_lock(net, AF_INET6, get.name);
1204 if (t && !IS_ERR(t)) {
1205 struct xt_table_info *private = t->private;
1206 duprintf("t->private->number = %u\n", private->number);
1207 if (get.size == private->size)
1208 ret = copy_entries_to_user(private->size,
1209 t, uptr->entrytable);
1210 else {
1211 duprintf("get_entries: I've got %u not %u!\n",
1212 private->size, get.size);
1213 ret = -EAGAIN;
1214 }
1215 module_put(t->me);
1216 xt_table_unlock(t);
1217 } else
1218 ret = t ? PTR_ERR(t) : -ENOENT;
1219
1220 return ret;
1221 }
1222
1223 static int
1224 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1225 struct xt_table_info *newinfo, unsigned int num_counters,
1226 void __user *counters_ptr)
1227 {
1228 int ret;
1229 struct xt_table *t;
1230 struct xt_table_info *oldinfo;
1231 struct xt_counters *counters;
1232 const void *loc_cpu_old_entry;
1233
1234 ret = 0;
1235 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1236 numa_node_id());
1237 if (!counters) {
1238 ret = -ENOMEM;
1239 goto out;
1240 }
1241
1242 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1243 "ip6table_%s", name);
1244 if (!t || IS_ERR(t)) {
1245 ret = t ? PTR_ERR(t) : -ENOENT;
1246 goto free_newinfo_counters_untrans;
1247 }
1248
1249 /* You lied! */
1250 if (valid_hooks != t->valid_hooks) {
1251 duprintf("Valid hook crap: %08X vs %08X\n",
1252 valid_hooks, t->valid_hooks);
1253 ret = -EINVAL;
1254 goto put_module;
1255 }
1256
1257 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1258 if (!oldinfo)
1259 goto put_module;
1260
1261 /* Update module usage count based on number of rules */
1262 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1263 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1264 if ((oldinfo->number > oldinfo->initial_entries) ||
1265 (newinfo->number <= oldinfo->initial_entries))
1266 module_put(t->me);
1267 if ((oldinfo->number > oldinfo->initial_entries) &&
1268 (newinfo->number <= oldinfo->initial_entries))
1269 module_put(t->me);
1270
1271 /* Get the old counters, and synchronize with replace */
1272 get_counters(oldinfo, counters);
1273
1274 /* Decrease module usage counts and free resource */
1275 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1276 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1277 NULL);
1278 xt_free_table_info(oldinfo);
1279 if (copy_to_user(counters_ptr, counters,
1280 sizeof(struct xt_counters) * num_counters) != 0)
1281 ret = -EFAULT;
1282 vfree(counters);
1283 xt_table_unlock(t);
1284 return ret;
1285
1286 put_module:
1287 module_put(t->me);
1288 xt_table_unlock(t);
1289 free_newinfo_counters_untrans:
1290 vfree(counters);
1291 out:
1292 return ret;
1293 }
1294
1295 static int
1296 do_replace(struct net *net, void __user *user, unsigned int len)
1297 {
1298 int ret;
1299 struct ip6t_replace tmp;
1300 struct xt_table_info *newinfo;
1301 void *loc_cpu_entry;
1302
1303 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1304 return -EFAULT;
1305
1306 /* overflow check */
1307 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1308 return -ENOMEM;
1309
1310 newinfo = xt_alloc_table_info(tmp.size);
1311 if (!newinfo)
1312 return -ENOMEM;
1313
1314 /* choose the copy that is on our node/cpu */
1315 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1316 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1317 tmp.size) != 0) {
1318 ret = -EFAULT;
1319 goto free_newinfo;
1320 }
1321
1322 ret = translate_table(tmp.name, tmp.valid_hooks,
1323 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1324 tmp.hook_entry, tmp.underflow);
1325 if (ret != 0)
1326 goto free_newinfo;
1327
1328 duprintf("ip_tables: Translated table\n");
1329
1330 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1331 tmp.num_counters, tmp.counters);
1332 if (ret)
1333 goto free_newinfo_untrans;
1334 return 0;
1335
1336 free_newinfo_untrans:
1337 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1338 free_newinfo:
1339 xt_free_table_info(newinfo);
1340 return ret;
1341 }
1342
1343 /* We're lazy, and add to the first CPU; overflow works its fey magic
1344 * and everything is OK. */
1345 static int
1346 add_counter_to_entry(struct ip6t_entry *e,
1347 const struct xt_counters addme[],
1348 unsigned int *i)
1349 {
1350 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1351
1352 (*i)++;
1353 return 0;
1354 }
1355
1356 static int
1357 do_add_counters(struct net *net, void __user *user, unsigned int len,
1358 int compat)
1359 {
1360 unsigned int i, curcpu;
1361 struct xt_counters_info tmp;
1362 struct xt_counters *paddc;
1363 unsigned int num_counters;
1364 char *name;
1365 int size;
1366 void *ptmp;
1367 struct xt_table *t;
1368 const struct xt_table_info *private;
1369 int ret = 0;
1370 const void *loc_cpu_entry;
1371 #ifdef CONFIG_COMPAT
1372 struct compat_xt_counters_info compat_tmp;
1373
1374 if (compat) {
1375 ptmp = &compat_tmp;
1376 size = sizeof(struct compat_xt_counters_info);
1377 } else
1378 #endif
1379 {
1380 ptmp = &tmp;
1381 size = sizeof(struct xt_counters_info);
1382 }
1383
1384 if (copy_from_user(ptmp, user, size) != 0)
1385 return -EFAULT;
1386
1387 #ifdef CONFIG_COMPAT
1388 if (compat) {
1389 num_counters = compat_tmp.num_counters;
1390 name = compat_tmp.name;
1391 } else
1392 #endif
1393 {
1394 num_counters = tmp.num_counters;
1395 name = tmp.name;
1396 }
1397
1398 if (len != size + num_counters * sizeof(struct xt_counters))
1399 return -EINVAL;
1400
1401 paddc = vmalloc_node(len - size, numa_node_id());
1402 if (!paddc)
1403 return -ENOMEM;
1404
1405 if (copy_from_user(paddc, user + size, len - size) != 0) {
1406 ret = -EFAULT;
1407 goto free;
1408 }
1409
1410 t = xt_find_table_lock(net, AF_INET6, name);
1411 if (!t || IS_ERR(t)) {
1412 ret = t ? PTR_ERR(t) : -ENOENT;
1413 goto free;
1414 }
1415
1416
1417 local_bh_disable();
1418 private = t->private;
1419 if (private->number != num_counters) {
1420 ret = -EINVAL;
1421 goto unlock_up_free;
1422 }
1423
1424 i = 0;
1425 /* Choose the copy that is on our node */
1426 curcpu = smp_processor_id();
1427 xt_info_wrlock(curcpu);
1428 loc_cpu_entry = private->entries[curcpu];
1429 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1430 private->size,
1431 add_counter_to_entry,
1432 paddc,
1433 &i);
1434 xt_info_wrunlock(curcpu);
1435
1436 unlock_up_free:
1437 local_bh_enable();
1438 xt_table_unlock(t);
1439 module_put(t->me);
1440 free:
1441 vfree(paddc);
1442
1443 return ret;
1444 }
1445
1446 #ifdef CONFIG_COMPAT
1447 struct compat_ip6t_replace {
1448 char name[IP6T_TABLE_MAXNAMELEN];
1449 u32 valid_hooks;
1450 u32 num_entries;
1451 u32 size;
1452 u32 hook_entry[NF_INET_NUMHOOKS];
1453 u32 underflow[NF_INET_NUMHOOKS];
1454 u32 num_counters;
1455 compat_uptr_t counters; /* struct ip6t_counters * */
1456 struct compat_ip6t_entry entries[0];
1457 };
1458
1459 static int
1460 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1461 unsigned int *size, struct xt_counters *counters,
1462 unsigned int *i)
1463 {
1464 struct ip6t_entry_target *t;
1465 struct compat_ip6t_entry __user *ce;
1466 u_int16_t target_offset, next_offset;
1467 compat_uint_t origsize;
1468 int ret;
1469
1470 ret = -EFAULT;
1471 origsize = *size;
1472 ce = (struct compat_ip6t_entry __user *)*dstptr;
1473 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1474 goto out;
1475
1476 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1477 goto out;
1478
1479 *dstptr += sizeof(struct compat_ip6t_entry);
1480 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1481
1482 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1483 target_offset = e->target_offset - (origsize - *size);
1484 if (ret)
1485 goto out;
1486 t = ip6t_get_target(e);
1487 ret = xt_compat_target_to_user(t, dstptr, size);
1488 if (ret)
1489 goto out;
1490 ret = -EFAULT;
1491 next_offset = e->next_offset - (origsize - *size);
1492 if (put_user(target_offset, &ce->target_offset))
1493 goto out;
1494 if (put_user(next_offset, &ce->next_offset))
1495 goto out;
1496
1497 (*i)++;
1498 return 0;
1499 out:
1500 return ret;
1501 }
1502
1503 static int
1504 compat_find_calc_match(struct ip6t_entry_match *m,
1505 const char *name,
1506 const struct ip6t_ip6 *ipv6,
1507 unsigned int hookmask,
1508 int *size, unsigned int *i)
1509 {
1510 struct xt_match *match;
1511
1512 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1513 m->u.user.revision),
1514 "ip6t_%s", m->u.user.name);
1515 if (IS_ERR(match) || !match) {
1516 duprintf("compat_check_calc_match: `%s' not found\n",
1517 m->u.user.name);
1518 return match ? PTR_ERR(match) : -ENOENT;
1519 }
1520 m->u.kernel.match = match;
1521 *size += xt_compat_match_offset(match);
1522
1523 (*i)++;
1524 return 0;
1525 }
1526
1527 static int
1528 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1529 {
1530 if (i && (*i)-- == 0)
1531 return 1;
1532
1533 module_put(m->u.kernel.match->me);
1534 return 0;
1535 }
1536
1537 static int
1538 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1539 {
1540 struct ip6t_entry_target *t;
1541
1542 if (i && (*i)-- == 0)
1543 return 1;
1544
1545 /* Cleanup all matches */
1546 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1547 t = compat_ip6t_get_target(e);
1548 module_put(t->u.kernel.target->me);
1549 return 0;
1550 }
1551
1552 static int
1553 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1554 struct xt_table_info *newinfo,
1555 unsigned int *size,
1556 unsigned char *base,
1557 unsigned char *limit,
1558 unsigned int *hook_entries,
1559 unsigned int *underflows,
1560 unsigned int *i,
1561 const char *name)
1562 {
1563 struct ip6t_entry_target *t;
1564 struct xt_target *target;
1565 unsigned int entry_offset;
1566 unsigned int j;
1567 int ret, off, h;
1568
1569 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1570 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0
1571 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1572 duprintf("Bad offset %p, limit = %p\n", e, limit);
1573 return -EINVAL;
1574 }
1575
1576 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1577 sizeof(struct compat_xt_entry_target)) {
1578 duprintf("checking: element %p size %u\n",
1579 e, e->next_offset);
1580 return -EINVAL;
1581 }
1582
1583 /* For purposes of check_entry casting the compat entry is fine */
1584 ret = check_entry((struct ip6t_entry *)e, name);
1585 if (ret)
1586 return ret;
1587
1588 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1589 entry_offset = (void *)e - (void *)base;
1590 j = 0;
1591 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1592 &e->ipv6, e->comefrom, &off, &j);
1593 if (ret != 0)
1594 goto release_matches;
1595
1596 t = compat_ip6t_get_target(e);
1597 target = try_then_request_module(xt_find_target(AF_INET6,
1598 t->u.user.name,
1599 t->u.user.revision),
1600 "ip6t_%s", t->u.user.name);
1601 if (IS_ERR(target) || !target) {
1602 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1603 t->u.user.name);
1604 ret = target ? PTR_ERR(target) : -ENOENT;
1605 goto release_matches;
1606 }
1607 t->u.kernel.target = target;
1608
1609 off += xt_compat_target_offset(target);
1610 *size += off;
1611 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1612 if (ret)
1613 goto out;
1614
1615 /* Check hooks & underflows */
1616 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1617 if ((unsigned char *)e - base == hook_entries[h])
1618 newinfo->hook_entry[h] = hook_entries[h];
1619 if ((unsigned char *)e - base == underflows[h])
1620 newinfo->underflow[h] = underflows[h];
1621 }
1622
1623 /* Clear counters and comefrom */
1624 memset(&e->counters, 0, sizeof(e->counters));
1625 e->comefrom = 0;
1626
1627 (*i)++;
1628 return 0;
1629
1630 out:
1631 module_put(t->u.kernel.target->me);
1632 release_matches:
1633 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1634 return ret;
1635 }
1636
1637 static int
1638 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1639 unsigned int *size, const char *name,
1640 struct xt_table_info *newinfo, unsigned char *base)
1641 {
1642 struct ip6t_entry_target *t;
1643 struct xt_target *target;
1644 struct ip6t_entry *de;
1645 unsigned int origsize;
1646 int ret, h;
1647
1648 ret = 0;
1649 origsize = *size;
1650 de = (struct ip6t_entry *)*dstptr;
1651 memcpy(de, e, sizeof(struct ip6t_entry));
1652 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1653
1654 *dstptr += sizeof(struct ip6t_entry);
1655 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1656
1657 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1658 dstptr, size);
1659 if (ret)
1660 return ret;
1661 de->target_offset = e->target_offset - (origsize - *size);
1662 t = compat_ip6t_get_target(e);
1663 target = t->u.kernel.target;
1664 xt_compat_target_from_user(t, dstptr, size);
1665
1666 de->next_offset = e->next_offset - (origsize - *size);
1667 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1668 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1669 newinfo->hook_entry[h] -= origsize - *size;
1670 if ((unsigned char *)de - base < newinfo->underflow[h])
1671 newinfo->underflow[h] -= origsize - *size;
1672 }
1673 return ret;
1674 }
1675
1676 static int compat_check_entry(struct ip6t_entry *e, const char *name,
1677 unsigned int *i)
1678 {
1679 unsigned int j;
1680 int ret;
1681 struct xt_mtchk_param mtpar;
1682
1683 j = 0;
1684 mtpar.table = name;
1685 mtpar.entryinfo = &e->ipv6;
1686 mtpar.hook_mask = e->comefrom;
1687 mtpar.family = NFPROTO_IPV6;
1688 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1689 if (ret)
1690 goto cleanup_matches;
1691
1692 ret = check_target(e, name);
1693 if (ret)
1694 goto cleanup_matches;
1695
1696 (*i)++;
1697 return 0;
1698
1699 cleanup_matches:
1700 IP6T_MATCH_ITERATE(e, cleanup_match, &j);
1701 return ret;
1702 }
1703
1704 static int
1705 translate_compat_table(const char *name,
1706 unsigned int valid_hooks,
1707 struct xt_table_info **pinfo,
1708 void **pentry0,
1709 unsigned int total_size,
1710 unsigned int number,
1711 unsigned int *hook_entries,
1712 unsigned int *underflows)
1713 {
1714 unsigned int i, j;
1715 struct xt_table_info *newinfo, *info;
1716 void *pos, *entry0, *entry1;
1717 unsigned int size;
1718 int ret;
1719
1720 info = *pinfo;
1721 entry0 = *pentry0;
1722 size = total_size;
1723 info->number = number;
1724
1725 /* Init all hooks to impossible value. */
1726 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1727 info->hook_entry[i] = 0xFFFFFFFF;
1728 info->underflow[i] = 0xFFFFFFFF;
1729 }
1730
1731 duprintf("translate_compat_table: size %u\n", info->size);
1732 j = 0;
1733 xt_compat_lock(AF_INET6);
1734 /* Walk through entries, checking offsets. */
1735 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1736 check_compat_entry_size_and_hooks,
1737 info, &size, entry0,
1738 entry0 + total_size,
1739 hook_entries, underflows, &j, name);
1740 if (ret != 0)
1741 goto out_unlock;
1742
1743 ret = -EINVAL;
1744 if (j != number) {
1745 duprintf("translate_compat_table: %u not %u entries\n",
1746 j, number);
1747 goto out_unlock;
1748 }
1749
1750 /* Check hooks all assigned */
1751 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1752 /* Only hooks which are valid */
1753 if (!(valid_hooks & (1 << i)))
1754 continue;
1755 if (info->hook_entry[i] == 0xFFFFFFFF) {
1756 duprintf("Invalid hook entry %u %u\n",
1757 i, hook_entries[i]);
1758 goto out_unlock;
1759 }
1760 if (info->underflow[i] == 0xFFFFFFFF) {
1761 duprintf("Invalid underflow %u %u\n",
1762 i, underflows[i]);
1763 goto out_unlock;
1764 }
1765 }
1766
1767 ret = -ENOMEM;
1768 newinfo = xt_alloc_table_info(size);
1769 if (!newinfo)
1770 goto out_unlock;
1771
1772 newinfo->number = number;
1773 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1774 newinfo->hook_entry[i] = info->hook_entry[i];
1775 newinfo->underflow[i] = info->underflow[i];
1776 }
1777 entry1 = newinfo->entries[raw_smp_processor_id()];
1778 pos = entry1;
1779 size = total_size;
1780 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1781 compat_copy_entry_from_user,
1782 &pos, &size, name, newinfo, entry1);
1783 xt_compat_flush_offsets(AF_INET6);
1784 xt_compat_unlock(AF_INET6);
1785 if (ret)
1786 goto free_newinfo;
1787
1788 ret = -ELOOP;
1789 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1790 goto free_newinfo;
1791
1792 i = 0;
1793 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1794 name, &i);
1795 if (ret) {
1796 j -= i;
1797 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1798 compat_release_entry, &j);
1799 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1800 xt_free_table_info(newinfo);
1801 return ret;
1802 }
1803
1804 /* And one copy for every other CPU */
1805 for_each_possible_cpu(i)
1806 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1807 memcpy(newinfo->entries[i], entry1, newinfo->size);
1808
1809 *pinfo = newinfo;
1810 *pentry0 = entry1;
1811 xt_free_table_info(info);
1812 return 0;
1813
1814 free_newinfo:
1815 xt_free_table_info(newinfo);
1816 out:
1817 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1818 return ret;
1819 out_unlock:
1820 xt_compat_flush_offsets(AF_INET6);
1821 xt_compat_unlock(AF_INET6);
1822 goto out;
1823 }
1824
1825 static int
1826 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1827 {
1828 int ret;
1829 struct compat_ip6t_replace tmp;
1830 struct xt_table_info *newinfo;
1831 void *loc_cpu_entry;
1832
1833 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1834 return -EFAULT;
1835
1836 /* overflow check */
1837 if (tmp.size >= INT_MAX / num_possible_cpus())
1838 return -ENOMEM;
1839 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1840 return -ENOMEM;
1841
1842 newinfo = xt_alloc_table_info(tmp.size);
1843 if (!newinfo)
1844 return -ENOMEM;
1845
1846 /* choose the copy that is on our node/cpu */
1847 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1848 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1849 tmp.size) != 0) {
1850 ret = -EFAULT;
1851 goto free_newinfo;
1852 }
1853
1854 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1855 &newinfo, &loc_cpu_entry, tmp.size,
1856 tmp.num_entries, tmp.hook_entry,
1857 tmp.underflow);
1858 if (ret != 0)
1859 goto free_newinfo;
1860
1861 duprintf("compat_do_replace: Translated table\n");
1862
1863 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1864 tmp.num_counters, compat_ptr(tmp.counters));
1865 if (ret)
1866 goto free_newinfo_untrans;
1867 return 0;
1868
1869 free_newinfo_untrans:
1870 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
1871 free_newinfo:
1872 xt_free_table_info(newinfo);
1873 return ret;
1874 }
1875
1876 static int
1877 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1878 unsigned int len)
1879 {
1880 int ret;
1881
1882 if (!capable(CAP_NET_ADMIN))
1883 return -EPERM;
1884
1885 switch (cmd) {
1886 case IP6T_SO_SET_REPLACE:
1887 ret = compat_do_replace(sock_net(sk), user, len);
1888 break;
1889
1890 case IP6T_SO_SET_ADD_COUNTERS:
1891 ret = do_add_counters(sock_net(sk), user, len, 1);
1892 break;
1893
1894 default:
1895 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1896 ret = -EINVAL;
1897 }
1898
1899 return ret;
1900 }
1901
1902 struct compat_ip6t_get_entries {
1903 char name[IP6T_TABLE_MAXNAMELEN];
1904 compat_uint_t size;
1905 struct compat_ip6t_entry entrytable[0];
1906 };
1907
1908 static int
1909 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1910 void __user *userptr)
1911 {
1912 struct xt_counters *counters;
1913 const struct xt_table_info *private = table->private;
1914 void __user *pos;
1915 unsigned int size;
1916 int ret = 0;
1917 const void *loc_cpu_entry;
1918 unsigned int i = 0;
1919
1920 counters = alloc_counters(table);
1921 if (IS_ERR(counters))
1922 return PTR_ERR(counters);
1923
1924 /* choose the copy that is on our node/cpu, ...
1925 * This choice is lazy (because current thread is
1926 * allowed to migrate to another cpu)
1927 */
1928 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1929 pos = userptr;
1930 size = total_size;
1931 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1932 compat_copy_entry_to_user,
1933 &pos, &size, counters, &i);
1934
1935 vfree(counters);
1936 return ret;
1937 }
1938
1939 static int
1940 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1941 int *len)
1942 {
1943 int ret;
1944 struct compat_ip6t_get_entries get;
1945 struct xt_table *t;
1946
1947 if (*len < sizeof(get)) {
1948 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1949 return -EINVAL;
1950 }
1951
1952 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1953 return -EFAULT;
1954
1955 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1956 duprintf("compat_get_entries: %u != %zu\n",
1957 *len, sizeof(get) + get.size);
1958 return -EINVAL;
1959 }
1960
1961 xt_compat_lock(AF_INET6);
1962 t = xt_find_table_lock(net, AF_INET6, get.name);
1963 if (t && !IS_ERR(t)) {
1964 const struct xt_table_info *private = t->private;
1965 struct xt_table_info info;
1966 duprintf("t->private->number = %u\n", private->number);
1967 ret = compat_table_info(private, &info);
1968 if (!ret && get.size == info.size) {
1969 ret = compat_copy_entries_to_user(private->size,
1970 t, uptr->entrytable);
1971 } else if (!ret) {
1972 duprintf("compat_get_entries: I've got %u not %u!\n",
1973 private->size, get.size);
1974 ret = -EAGAIN;
1975 }
1976 xt_compat_flush_offsets(AF_INET6);
1977 module_put(t->me);
1978 xt_table_unlock(t);
1979 } else
1980 ret = t ? PTR_ERR(t) : -ENOENT;
1981
1982 xt_compat_unlock(AF_INET6);
1983 return ret;
1984 }
1985
1986 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1987
1988 static int
1989 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1990 {
1991 int ret;
1992
1993 if (!capable(CAP_NET_ADMIN))
1994 return -EPERM;
1995
1996 switch (cmd) {
1997 case IP6T_SO_GET_INFO:
1998 ret = get_info(sock_net(sk), user, len, 1);
1999 break;
2000 case IP6T_SO_GET_ENTRIES:
2001 ret = compat_get_entries(sock_net(sk), user, len);
2002 break;
2003 default:
2004 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2005 }
2006 return ret;
2007 }
2008 #endif
2009
2010 static int
2011 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2012 {
2013 int ret;
2014
2015 if (!capable(CAP_NET_ADMIN))
2016 return -EPERM;
2017
2018 switch (cmd) {
2019 case IP6T_SO_SET_REPLACE:
2020 ret = do_replace(sock_net(sk), user, len);
2021 break;
2022
2023 case IP6T_SO_SET_ADD_COUNTERS:
2024 ret = do_add_counters(sock_net(sk), user, len, 0);
2025 break;
2026
2027 default:
2028 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2029 ret = -EINVAL;
2030 }
2031
2032 return ret;
2033 }
2034
2035 static int
2036 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2037 {
2038 int ret;
2039
2040 if (!capable(CAP_NET_ADMIN))
2041 return -EPERM;
2042
2043 switch (cmd) {
2044 case IP6T_SO_GET_INFO:
2045 ret = get_info(sock_net(sk), user, len, 0);
2046 break;
2047
2048 case IP6T_SO_GET_ENTRIES:
2049 ret = get_entries(sock_net(sk), user, len);
2050 break;
2051
2052 case IP6T_SO_GET_REVISION_MATCH:
2053 case IP6T_SO_GET_REVISION_TARGET: {
2054 struct ip6t_get_revision rev;
2055 int target;
2056
2057 if (*len != sizeof(rev)) {
2058 ret = -EINVAL;
2059 break;
2060 }
2061 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2062 ret = -EFAULT;
2063 break;
2064 }
2065
2066 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2067 target = 1;
2068 else
2069 target = 0;
2070
2071 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2072 rev.revision,
2073 target, &ret),
2074 "ip6t_%s", rev.name);
2075 break;
2076 }
2077
2078 default:
2079 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2080 ret = -EINVAL;
2081 }
2082
2083 return ret;
2084 }
2085
2086 struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
2087 const struct ip6t_replace *repl)
2088 {
2089 int ret;
2090 struct xt_table_info *newinfo;
2091 struct xt_table_info bootstrap
2092 = { 0, 0, 0, { 0 }, { 0 }, { } };
2093 void *loc_cpu_entry;
2094 struct xt_table *new_table;
2095
2096 newinfo = xt_alloc_table_info(repl->size);
2097 if (!newinfo) {
2098 ret = -ENOMEM;
2099 goto out;
2100 }
2101
2102 /* choose the copy on our node/cpu, but dont care about preemption */
2103 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2104 memcpy(loc_cpu_entry, repl->entries, repl->size);
2105
2106 ret = translate_table(table->name, table->valid_hooks,
2107 newinfo, loc_cpu_entry, repl->size,
2108 repl->num_entries,
2109 repl->hook_entry,
2110 repl->underflow);
2111 if (ret != 0)
2112 goto out_free;
2113
2114 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2115 if (IS_ERR(new_table)) {
2116 ret = PTR_ERR(new_table);
2117 goto out_free;
2118 }
2119 return new_table;
2120
2121 out_free:
2122 xt_free_table_info(newinfo);
2123 out:
2124 return ERR_PTR(ret);
2125 }
2126
2127 void ip6t_unregister_table(struct xt_table *table)
2128 {
2129 struct xt_table_info *private;
2130 void *loc_cpu_entry;
2131 struct module *table_owner = table->me;
2132
2133 private = xt_unregister_table(table);
2134
2135 /* Decrease module usage counts and free resources */
2136 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2137 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2138 if (private->number > private->initial_entries)
2139 module_put(table_owner);
2140 xt_free_table_info(private);
2141 }
2142
2143 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2144 static inline bool
2145 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2146 u_int8_t type, u_int8_t code,
2147 bool invert)
2148 {
2149 return (type == test_type && code >= min_code && code <= max_code)
2150 ^ invert;
2151 }
2152
2153 static bool
2154 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2155 {
2156 const struct icmp6hdr *ic;
2157 struct icmp6hdr _icmph;
2158 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2159
2160 /* Must not be a fragment. */
2161 if (par->fragoff != 0)
2162 return false;
2163
2164 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2165 if (ic == NULL) {
2166 /* We've been asked to examine this packet, and we
2167 * can't. Hence, no choice but to drop.
2168 */
2169 duprintf("Dropping evil ICMP tinygram.\n");
2170 *par->hotdrop = true;
2171 return false;
2172 }
2173
2174 return icmp6_type_code_match(icmpinfo->type,
2175 icmpinfo->code[0],
2176 icmpinfo->code[1],
2177 ic->icmp6_type, ic->icmp6_code,
2178 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2179 }
2180
2181 /* Called when user tries to insert an entry of this type. */
2182 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2183 {
2184 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2185
2186 /* Must specify no unknown invflags */
2187 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2188 }
2189
2190 /* The built-in targets: standard (NULL) and error. */
2191 static struct xt_target ip6t_standard_target __read_mostly = {
2192 .name = IP6T_STANDARD_TARGET,
2193 .targetsize = sizeof(int),
2194 .family = NFPROTO_IPV6,
2195 #ifdef CONFIG_COMPAT
2196 .compatsize = sizeof(compat_int_t),
2197 .compat_from_user = compat_standard_from_user,
2198 .compat_to_user = compat_standard_to_user,
2199 #endif
2200 };
2201
2202 static struct xt_target ip6t_error_target __read_mostly = {
2203 .name = IP6T_ERROR_TARGET,
2204 .target = ip6t_error,
2205 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2206 .family = NFPROTO_IPV6,
2207 };
2208
2209 static struct nf_sockopt_ops ip6t_sockopts = {
2210 .pf = PF_INET6,
2211 .set_optmin = IP6T_BASE_CTL,
2212 .set_optmax = IP6T_SO_SET_MAX+1,
2213 .set = do_ip6t_set_ctl,
2214 #ifdef CONFIG_COMPAT
2215 .compat_set = compat_do_ip6t_set_ctl,
2216 #endif
2217 .get_optmin = IP6T_BASE_CTL,
2218 .get_optmax = IP6T_SO_GET_MAX+1,
2219 .get = do_ip6t_get_ctl,
2220 #ifdef CONFIG_COMPAT
2221 .compat_get = compat_do_ip6t_get_ctl,
2222 #endif
2223 .owner = THIS_MODULE,
2224 };
2225
2226 static struct xt_match icmp6_matchstruct __read_mostly = {
2227 .name = "icmp6",
2228 .match = icmp6_match,
2229 .matchsize = sizeof(struct ip6t_icmp),
2230 .checkentry = icmp6_checkentry,
2231 .proto = IPPROTO_ICMPV6,
2232 .family = NFPROTO_IPV6,
2233 };
2234
2235 static int __net_init ip6_tables_net_init(struct net *net)
2236 {
2237 return xt_proto_init(net, NFPROTO_IPV6);
2238 }
2239
2240 static void __net_exit ip6_tables_net_exit(struct net *net)
2241 {
2242 xt_proto_fini(net, NFPROTO_IPV6);
2243 }
2244
2245 static struct pernet_operations ip6_tables_net_ops = {
2246 .init = ip6_tables_net_init,
2247 .exit = ip6_tables_net_exit,
2248 };
2249
2250 static int __init ip6_tables_init(void)
2251 {
2252 int ret;
2253
2254 ret = register_pernet_subsys(&ip6_tables_net_ops);
2255 if (ret < 0)
2256 goto err1;
2257
2258 /* Noone else will be downing sem now, so we won't sleep */
2259 ret = xt_register_target(&ip6t_standard_target);
2260 if (ret < 0)
2261 goto err2;
2262 ret = xt_register_target(&ip6t_error_target);
2263 if (ret < 0)
2264 goto err3;
2265 ret = xt_register_match(&icmp6_matchstruct);
2266 if (ret < 0)
2267 goto err4;
2268
2269 /* Register setsockopt */
2270 ret = nf_register_sockopt(&ip6t_sockopts);
2271 if (ret < 0)
2272 goto err5;
2273
2274 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2275 return 0;
2276
2277 err5:
2278 xt_unregister_match(&icmp6_matchstruct);
2279 err4:
2280 xt_unregister_target(&ip6t_error_target);
2281 err3:
2282 xt_unregister_target(&ip6t_standard_target);
2283 err2:
2284 unregister_pernet_subsys(&ip6_tables_net_ops);
2285 err1:
2286 return ret;
2287 }
2288
2289 static void __exit ip6_tables_fini(void)
2290 {
2291 nf_unregister_sockopt(&ip6t_sockopts);
2292
2293 xt_unregister_match(&icmp6_matchstruct);
2294 xt_unregister_target(&ip6t_error_target);
2295 xt_unregister_target(&ip6t_standard_target);
2296
2297 unregister_pernet_subsys(&ip6_tables_net_ops);
2298 }
2299
2300 /*
2301 * find the offset to specified header or the protocol number of last header
2302 * if target < 0. "last header" is transport protocol header, ESP, or
2303 * "No next header".
2304 *
2305 * If target header is found, its offset is set in *offset and return protocol
2306 * number. Otherwise, return -1.
2307 *
2308 * If the first fragment doesn't contain the final protocol header or
2309 * NEXTHDR_NONE it is considered invalid.
2310 *
2311 * Note that non-1st fragment is special case that "the protocol number
2312 * of last header" is "next header" field in Fragment header. In this case,
2313 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2314 * isn't NULL.
2315 *
2316 */
2317 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2318 int target, unsigned short *fragoff)
2319 {
2320 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2321 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2322 unsigned int len = skb->len - start;
2323
2324 if (fragoff)
2325 *fragoff = 0;
2326
2327 while (nexthdr != target) {
2328 struct ipv6_opt_hdr _hdr, *hp;
2329 unsigned int hdrlen;
2330
2331 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2332 if (target < 0)
2333 break;
2334 return -ENOENT;
2335 }
2336
2337 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2338 if (hp == NULL)
2339 return -EBADMSG;
2340 if (nexthdr == NEXTHDR_FRAGMENT) {
2341 unsigned short _frag_off;
2342 __be16 *fp;
2343 fp = skb_header_pointer(skb,
2344 start+offsetof(struct frag_hdr,
2345 frag_off),
2346 sizeof(_frag_off),
2347 &_frag_off);
2348 if (fp == NULL)
2349 return -EBADMSG;
2350
2351 _frag_off = ntohs(*fp) & ~0x7;
2352 if (_frag_off) {
2353 if (target < 0 &&
2354 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2355 hp->nexthdr == NEXTHDR_NONE)) {
2356 if (fragoff)
2357 *fragoff = _frag_off;
2358 return hp->nexthdr;
2359 }
2360 return -ENOENT;
2361 }
2362 hdrlen = 8;
2363 } else if (nexthdr == NEXTHDR_AUTH)
2364 hdrlen = (hp->hdrlen + 2) << 2;
2365 else
2366 hdrlen = ipv6_optlen(hp);
2367
2368 nexthdr = hp->nexthdr;
2369 len -= hdrlen;
2370 start += hdrlen;
2371 }
2372
2373 *offset = start;
2374 return nexthdr;
2375 }
2376
2377 EXPORT_SYMBOL(ip6t_register_table);
2378 EXPORT_SYMBOL(ip6t_unregister_table);
2379 EXPORT_SYMBOL(ip6t_do_table);
2380 EXPORT_SYMBOL(ip6t_ext_hdr);
2381 EXPORT_SYMBOL(ipv6_find_hdr);
2382
2383 module_init(ip6_tables_init);
2384 module_exit(ip6_tables_fini);
This page took 0.085587 seconds and 5 git commands to generate.