netfilter: xtables: dissolve do_match function
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
56 do { \
57 if (!(x)) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
60 } while(0)
61 #else
62 #define IP_NF_ASSERT(x)
63 #endif
64
65 #if 0
66 /* All the better to debug you with... */
67 #define static
68 #define inline
69 #endif
70
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 {
73 return xt_alloc_initial_table(ip6t, IP6T);
74 }
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
77 /*
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
83
84 Hence the start of any table is given by get_table() below. */
85
86 /* Check for an extension */
87 int
88 ip6t_ext_hdr(u8 nexthdr)
89 {
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
97 }
98
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
101 static inline bool
102 ip6_packet_match(const struct sk_buff *skb,
103 const char *indev,
104 const char *outdev,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
108 {
109 unsigned long ret;
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
111
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
113
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
119 /*
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
126 return false;
127 }
128
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
130
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
135 return false;
136 }
137
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
139
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
144 return false;
145 }
146
147 /* ... might want to do something with class and flowlabel here ... */
148
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
151 int protohdr;
152 unsigned short _frag_off;
153
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
155 if (protohdr < 0) {
156 if (_frag_off == 0)
157 *hotdrop = true;
158 return false;
159 }
160 *fragoff = _frag_off;
161
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
163 protohdr,
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
165 ip6info->proto);
166
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
169 return false;
170 }
171 return true;
172 }
173
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
177 return false;
178 }
179 return true;
180 }
181
182 /* should be ip6 safe */
183 static bool
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
185 {
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
189 return false;
190 }
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
194 return false;
195 }
196 return true;
197 }
198
199 static unsigned int
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 {
202 if (net_ratelimit())
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
204
205 return NF_DROP;
206 }
207
208 static inline struct ip6t_entry *
209 get_entry(const void *base, unsigned int offset)
210 {
211 return (struct ip6t_entry *)(base + offset);
212 }
213
214 /* All zeroes == unconditional rule. */
215 /* Mildly perf critical (only if packet tracing is on) */
216 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
217 {
218 static const struct ip6t_ip6 uncond;
219
220 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
221 }
222
223 static inline const struct ip6t_entry_target *
224 ip6t_get_target_c(const struct ip6t_entry *e)
225 {
226 return ip6t_get_target((struct ip6t_entry *)e);
227 }
228
229 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
230 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
231 /* This cries for unification! */
232 static const char *const hooknames[] = {
233 [NF_INET_PRE_ROUTING] = "PREROUTING",
234 [NF_INET_LOCAL_IN] = "INPUT",
235 [NF_INET_FORWARD] = "FORWARD",
236 [NF_INET_LOCAL_OUT] = "OUTPUT",
237 [NF_INET_POST_ROUTING] = "POSTROUTING",
238 };
239
240 enum nf_ip_trace_comments {
241 NF_IP6_TRACE_COMMENT_RULE,
242 NF_IP6_TRACE_COMMENT_RETURN,
243 NF_IP6_TRACE_COMMENT_POLICY,
244 };
245
246 static const char *const comments[] = {
247 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
248 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
249 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
250 };
251
252 static struct nf_loginfo trace_loginfo = {
253 .type = NF_LOG_TYPE_LOG,
254 .u = {
255 .log = {
256 .level = 4,
257 .logflags = NF_LOG_MASK,
258 },
259 },
260 };
261
262 /* Mildly perf critical (only if packet tracing is on) */
263 static inline int
264 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
265 const char *hookname, const char **chainname,
266 const char **comment, unsigned int *rulenum)
267 {
268 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
269
270 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
271 /* Head of user chain: ERROR target with chainname */
272 *chainname = t->target.data;
273 (*rulenum) = 0;
274 } else if (s == e) {
275 (*rulenum)++;
276
277 if (s->target_offset == sizeof(struct ip6t_entry) &&
278 strcmp(t->target.u.kernel.target->name,
279 IP6T_STANDARD_TARGET) == 0 &&
280 t->verdict < 0 &&
281 unconditional(&s->ipv6)) {
282 /* Tail of chains: STANDARD target (return/policy) */
283 *comment = *chainname == hookname
284 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
285 : comments[NF_IP6_TRACE_COMMENT_RETURN];
286 }
287 return 1;
288 } else
289 (*rulenum)++;
290
291 return 0;
292 }
293
294 static void trace_packet(const struct sk_buff *skb,
295 unsigned int hook,
296 const struct net_device *in,
297 const struct net_device *out,
298 const char *tablename,
299 const struct xt_table_info *private,
300 const struct ip6t_entry *e)
301 {
302 const void *table_base;
303 const struct ip6t_entry *root;
304 const char *hookname, *chainname, *comment;
305 const struct ip6t_entry *iter;
306 unsigned int rulenum = 0;
307
308 table_base = private->entries[smp_processor_id()];
309 root = get_entry(table_base, private->hook_entry[hook]);
310
311 hookname = chainname = hooknames[hook];
312 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
313
314 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
315 if (get_chainname_rulenum(iter, e, hookname,
316 &chainname, &comment, &rulenum) != 0)
317 break;
318
319 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
320 "TRACE: %s:%s:%s:%u ",
321 tablename, chainname, comment, rulenum);
322 }
323 #endif
324
325 static inline __pure struct ip6t_entry *
326 ip6t_next_entry(const struct ip6t_entry *entry)
327 {
328 return (void *)entry + entry->next_offset;
329 }
330
331 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
332 unsigned int
333 ip6t_do_table(struct sk_buff *skb,
334 unsigned int hook,
335 const struct net_device *in,
336 const struct net_device *out,
337 struct xt_table *table)
338 {
339 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
340 bool hotdrop = false;
341 /* Initializing verdict to NF_DROP keeps gcc happy. */
342 unsigned int verdict = NF_DROP;
343 const char *indev, *outdev;
344 const void *table_base;
345 struct ip6t_entry *e, **jumpstack;
346 unsigned int *stackptr, origptr, cpu;
347 const struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
350
351 /* Initialization */
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
359 * match it. */
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 mtpar.hooknum = tgpar.hooknum = hook;
365
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
367
368 xt_info_rdlock_bh();
369 private = table->private;
370 cpu = smp_processor_id();
371 table_base = private->entries[cpu];
372 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
373 stackptr = &private->stackptr[cpu];
374 origptr = *stackptr;
375
376 e = get_entry(table_base, private->hook_entry[hook]);
377
378 do {
379 const struct ip6t_entry_target *t;
380 const struct xt_entry_match *ematch;
381
382 IP_NF_ASSERT(e);
383 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
384 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
385 no_match:
386 e = ip6t_next_entry(e);
387 continue;
388 }
389
390 xt_ematch_foreach(ematch, e) {
391 mtpar.match = ematch->u.kernel.match;
392 mtpar.matchinfo = ematch->data;
393 if (!mtpar.match->match(skb, &mtpar))
394 goto no_match;
395 }
396
397 ADD_COUNTER(e->counters,
398 ntohs(ipv6_hdr(skb)->payload_len) +
399 sizeof(struct ipv6hdr), 1);
400
401 t = ip6t_get_target_c(e);
402 IP_NF_ASSERT(t->u.kernel.target);
403
404 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
405 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
406 /* The packet is traced: log it */
407 if (unlikely(skb->nf_trace))
408 trace_packet(skb, hook, in, out,
409 table->name, private, e);
410 #endif
411 /* Standard target? */
412 if (!t->u.kernel.target->target) {
413 int v;
414
415 v = ((struct ip6t_standard_target *)t)->verdict;
416 if (v < 0) {
417 /* Pop from stack? */
418 if (v != IP6T_RETURN) {
419 verdict = (unsigned)(-v) - 1;
420 break;
421 }
422 if (*stackptr == 0)
423 e = get_entry(table_base,
424 private->underflow[hook]);
425 else
426 e = ip6t_next_entry(jumpstack[--*stackptr]);
427 continue;
428 }
429 if (table_base + v != ip6t_next_entry(e) &&
430 !(e->ipv6.flags & IP6T_F_GOTO)) {
431 if (*stackptr >= private->stacksize) {
432 verdict = NF_DROP;
433 break;
434 }
435 jumpstack[(*stackptr)++] = e;
436 }
437
438 e = get_entry(table_base, v);
439 continue;
440 }
441
442 tgpar.target = t->u.kernel.target;
443 tgpar.targinfo = t->data;
444
445 verdict = t->u.kernel.target->target(skb, &tgpar);
446 if (verdict == IP6T_CONTINUE)
447 e = ip6t_next_entry(e);
448 else
449 /* Verdict */
450 break;
451 } while (!hotdrop);
452
453 xt_info_rdunlock_bh();
454 *stackptr = origptr;
455
456 #ifdef DEBUG_ALLOW_ALL
457 return NF_ACCEPT;
458 #else
459 if (hotdrop)
460 return NF_DROP;
461 else return verdict;
462 #endif
463 }
464
465 /* Figures out from what hook each rule can be called: returns 0 if
466 there are loops. Puts hook bitmask in comefrom. */
467 static int
468 mark_source_chains(const struct xt_table_info *newinfo,
469 unsigned int valid_hooks, void *entry0)
470 {
471 unsigned int hook;
472
473 /* No recursion; use packet counter to save back ptrs (reset
474 to 0 as we leave), and comefrom to save source hook bitmask */
475 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
476 unsigned int pos = newinfo->hook_entry[hook];
477 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
478
479 if (!(valid_hooks & (1 << hook)))
480 continue;
481
482 /* Set initial back pointer. */
483 e->counters.pcnt = pos;
484
485 for (;;) {
486 const struct ip6t_standard_target *t
487 = (void *)ip6t_get_target_c(e);
488 int visited = e->comefrom & (1 << hook);
489
490 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
491 printk("iptables: loop hook %u pos %u %08X.\n",
492 hook, pos, e->comefrom);
493 return 0;
494 }
495 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
496
497 /* Unconditional return/END. */
498 if ((e->target_offset == sizeof(struct ip6t_entry) &&
499 (strcmp(t->target.u.user.name,
500 IP6T_STANDARD_TARGET) == 0) &&
501 t->verdict < 0 &&
502 unconditional(&e->ipv6)) || visited) {
503 unsigned int oldpos, size;
504
505 if ((strcmp(t->target.u.user.name,
506 IP6T_STANDARD_TARGET) == 0) &&
507 t->verdict < -NF_MAX_VERDICT - 1) {
508 duprintf("mark_source_chains: bad "
509 "negative verdict (%i)\n",
510 t->verdict);
511 return 0;
512 }
513
514 /* Return: backtrack through the last
515 big jump. */
516 do {
517 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
518 #ifdef DEBUG_IP_FIREWALL_USER
519 if (e->comefrom
520 & (1 << NF_INET_NUMHOOKS)) {
521 duprintf("Back unset "
522 "on hook %u "
523 "rule %u\n",
524 hook, pos);
525 }
526 #endif
527 oldpos = pos;
528 pos = e->counters.pcnt;
529 e->counters.pcnt = 0;
530
531 /* We're at the start. */
532 if (pos == oldpos)
533 goto next;
534
535 e = (struct ip6t_entry *)
536 (entry0 + pos);
537 } while (oldpos == pos + e->next_offset);
538
539 /* Move along one */
540 size = e->next_offset;
541 e = (struct ip6t_entry *)
542 (entry0 + pos + size);
543 e->counters.pcnt = pos;
544 pos += size;
545 } else {
546 int newpos = t->verdict;
547
548 if (strcmp(t->target.u.user.name,
549 IP6T_STANDARD_TARGET) == 0 &&
550 newpos >= 0) {
551 if (newpos > newinfo->size -
552 sizeof(struct ip6t_entry)) {
553 duprintf("mark_source_chains: "
554 "bad verdict (%i)\n",
555 newpos);
556 return 0;
557 }
558 /* This a jump; chase it. */
559 duprintf("Jump rule %u -> %u\n",
560 pos, newpos);
561 } else {
562 /* ... this is a fallthru */
563 newpos = pos + e->next_offset;
564 }
565 e = (struct ip6t_entry *)
566 (entry0 + newpos);
567 e->counters.pcnt = pos;
568 pos = newpos;
569 }
570 }
571 next:
572 duprintf("Finished chain %u\n", hook);
573 }
574 return 1;
575 }
576
577 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
578 {
579 struct xt_mtdtor_param par;
580
581 par.net = net;
582 par.match = m->u.kernel.match;
583 par.matchinfo = m->data;
584 par.family = NFPROTO_IPV6;
585 if (par.match->destroy != NULL)
586 par.match->destroy(&par);
587 module_put(par.match->me);
588 }
589
590 static int
591 check_entry(const struct ip6t_entry *e, const char *name)
592 {
593 const struct ip6t_entry_target *t;
594
595 if (!ip6_checkentry(&e->ipv6)) {
596 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
597 return -EINVAL;
598 }
599
600 if (e->target_offset + sizeof(struct ip6t_entry_target) >
601 e->next_offset)
602 return -EINVAL;
603
604 t = ip6t_get_target_c(e);
605 if (e->target_offset + t->u.target_size > e->next_offset)
606 return -EINVAL;
607
608 return 0;
609 }
610
611 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
612 {
613 const struct ip6t_ip6 *ipv6 = par->entryinfo;
614 int ret;
615
616 par->match = m->u.kernel.match;
617 par->matchinfo = m->data;
618
619 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
620 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
621 if (ret < 0) {
622 duprintf("ip_tables: check failed for `%s'.\n",
623 par.match->name);
624 return ret;
625 }
626 return 0;
627 }
628
629 static int
630 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
631 {
632 struct xt_match *match;
633 int ret;
634
635 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
636 m->u.user.revision);
637 if (IS_ERR(match)) {
638 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
639 return PTR_ERR(match);
640 }
641 m->u.kernel.match = match;
642
643 ret = check_match(m, par);
644 if (ret)
645 goto err;
646
647 return 0;
648 err:
649 module_put(m->u.kernel.match->me);
650 return ret;
651 }
652
653 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
654 {
655 struct ip6t_entry_target *t = ip6t_get_target(e);
656 struct xt_tgchk_param par = {
657 .net = net,
658 .table = name,
659 .entryinfo = e,
660 .target = t->u.kernel.target,
661 .targinfo = t->data,
662 .hook_mask = e->comefrom,
663 .family = NFPROTO_IPV6,
664 };
665 int ret;
666
667 t = ip6t_get_target(e);
668 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
669 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
670 if (ret < 0) {
671 duprintf("ip_tables: check failed for `%s'.\n",
672 t->u.kernel.target->name);
673 return ret;
674 }
675 return 0;
676 }
677
678 static int
679 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
680 unsigned int size)
681 {
682 struct ip6t_entry_target *t;
683 struct xt_target *target;
684 int ret;
685 unsigned int j;
686 struct xt_mtchk_param mtpar;
687 struct xt_entry_match *ematch;
688
689 ret = check_entry(e, name);
690 if (ret)
691 return ret;
692
693 j = 0;
694 mtpar.net = net;
695 mtpar.table = name;
696 mtpar.entryinfo = &e->ipv6;
697 mtpar.hook_mask = e->comefrom;
698 mtpar.family = NFPROTO_IPV6;
699 xt_ematch_foreach(ematch, e) {
700 ret = find_check_match(ematch, &mtpar);
701 if (ret != 0)
702 goto cleanup_matches;
703 ++j;
704 }
705
706 t = ip6t_get_target(e);
707 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
708 t->u.user.revision);
709 if (IS_ERR(target)) {
710 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
711 ret = PTR_ERR(target);
712 goto cleanup_matches;
713 }
714 t->u.kernel.target = target;
715
716 ret = check_target(e, net, name);
717 if (ret)
718 goto err;
719 return 0;
720 err:
721 module_put(t->u.kernel.target->me);
722 cleanup_matches:
723 xt_ematch_foreach(ematch, e) {
724 if (j-- == 0)
725 break;
726 cleanup_match(ematch, net);
727 }
728 return ret;
729 }
730
731 static bool check_underflow(const struct ip6t_entry *e)
732 {
733 const struct ip6t_entry_target *t;
734 unsigned int verdict;
735
736 if (!unconditional(&e->ipv6))
737 return false;
738 t = ip6t_get_target_c(e);
739 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
740 return false;
741 verdict = ((struct ip6t_standard_target *)t)->verdict;
742 verdict = -verdict - 1;
743 return verdict == NF_DROP || verdict == NF_ACCEPT;
744 }
745
746 static int
747 check_entry_size_and_hooks(struct ip6t_entry *e,
748 struct xt_table_info *newinfo,
749 const unsigned char *base,
750 const unsigned char *limit,
751 const unsigned int *hook_entries,
752 const unsigned int *underflows,
753 unsigned int valid_hooks)
754 {
755 unsigned int h;
756
757 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
758 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
759 duprintf("Bad offset %p\n", e);
760 return -EINVAL;
761 }
762
763 if (e->next_offset
764 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
765 duprintf("checking: element %p size %u\n",
766 e, e->next_offset);
767 return -EINVAL;
768 }
769
770 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if (!(valid_hooks & (1 << h)))
773 continue;
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h]) {
777 if (!check_underflow(e)) {
778 pr_err("Underflows must be unconditional and "
779 "use the STANDARD target with "
780 "ACCEPT/DROP\n");
781 return -EINVAL;
782 }
783 newinfo->underflow[h] = underflows[h];
784 }
785 }
786
787 /* Clear counters and comefrom */
788 e->counters = ((struct xt_counters) { 0, 0 });
789 e->comefrom = 0;
790 return 0;
791 }
792
793 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
794 {
795 struct xt_tgdtor_param par;
796 struct ip6t_entry_target *t;
797 struct xt_entry_match *ematch;
798
799 /* Cleanup all matches */
800 xt_ematch_foreach(ematch, e)
801 cleanup_match(ematch, net);
802 t = ip6t_get_target(e);
803
804 par.net = net;
805 par.target = t->u.kernel.target;
806 par.targinfo = t->data;
807 par.family = NFPROTO_IPV6;
808 if (par.target->destroy != NULL)
809 par.target->destroy(&par);
810 module_put(par.target->me);
811 }
812
813 /* Checks and translates the user-supplied table segment (held in
814 newinfo) */
815 static int
816 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
817 const struct ip6t_replace *repl)
818 {
819 struct ip6t_entry *iter;
820 unsigned int i;
821 int ret = 0;
822
823 newinfo->size = repl->size;
824 newinfo->number = repl->num_entries;
825
826 /* Init all hooks to impossible value. */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 newinfo->hook_entry[i] = 0xFFFFFFFF;
829 newinfo->underflow[i] = 0xFFFFFFFF;
830 }
831
832 duprintf("translate_table: size %u\n", newinfo->size);
833 i = 0;
834 /* Walk through entries, checking offsets. */
835 xt_entry_foreach(iter, entry0, newinfo->size) {
836 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
837 entry0 + repl->size,
838 repl->hook_entry,
839 repl->underflow,
840 repl->valid_hooks);
841 if (ret != 0)
842 return ret;
843 ++i;
844 if (strcmp(ip6t_get_target(iter)->u.user.name,
845 XT_ERROR_TARGET) == 0)
846 ++newinfo->stacksize;
847 }
848
849 if (i != repl->num_entries) {
850 duprintf("translate_table: %u not %u entries\n",
851 i, repl->num_entries);
852 return -EINVAL;
853 }
854
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(repl->valid_hooks & (1 << i)))
859 continue;
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
862 i, repl->hook_entry[i]);
863 return -EINVAL;
864 }
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
867 i, repl->underflow[i]);
868 return -EINVAL;
869 }
870 }
871
872 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
873 return -ELOOP;
874
875 /* Finally, each sanity check must pass */
876 i = 0;
877 xt_entry_foreach(iter, entry0, newinfo->size) {
878 ret = find_check_entry(iter, net, repl->name, repl->size);
879 if (ret != 0)
880 break;
881 ++i;
882 }
883
884 if (ret != 0) {
885 xt_entry_foreach(iter, entry0, newinfo->size) {
886 if (i-- == 0)
887 break;
888 cleanup_entry(iter, net);
889 }
890 return ret;
891 }
892
893 /* And one copy for every other CPU */
894 for_each_possible_cpu(i) {
895 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
896 memcpy(newinfo->entries[i], entry0, newinfo->size);
897 }
898
899 return ret;
900 }
901
902 static void
903 get_counters(const struct xt_table_info *t,
904 struct xt_counters counters[])
905 {
906 struct ip6t_entry *iter;
907 unsigned int cpu;
908 unsigned int i;
909 unsigned int curcpu;
910
911 /* Instead of clearing (by a previous call to memset())
912 * the counters and using adds, we set the counters
913 * with data used by 'current' CPU
914 *
915 * Bottom half has to be disabled to prevent deadlock
916 * if new softirq were to run and call ipt_do_table
917 */
918 local_bh_disable();
919 curcpu = smp_processor_id();
920
921 i = 0;
922 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
923 SET_COUNTER(counters[i], iter->counters.bcnt,
924 iter->counters.pcnt);
925 ++i;
926 }
927
928 for_each_possible_cpu(cpu) {
929 if (cpu == curcpu)
930 continue;
931 i = 0;
932 xt_info_wrlock(cpu);
933 xt_entry_foreach(iter, t->entries[cpu], t->size) {
934 ADD_COUNTER(counters[i], iter->counters.bcnt,
935 iter->counters.pcnt);
936 ++i;
937 }
938 xt_info_wrunlock(cpu);
939 }
940 local_bh_enable();
941 }
942
943 static struct xt_counters *alloc_counters(const struct xt_table *table)
944 {
945 unsigned int countersize;
946 struct xt_counters *counters;
947 const struct xt_table_info *private = table->private;
948
949 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care
951 about). */
952 countersize = sizeof(struct xt_counters) * private->number;
953 counters = vmalloc_node(countersize, numa_node_id());
954
955 if (counters == NULL)
956 return ERR_PTR(-ENOMEM);
957
958 get_counters(private, counters);
959
960 return counters;
961 }
962
963 static int
964 copy_entries_to_user(unsigned int total_size,
965 const struct xt_table *table,
966 void __user *userptr)
967 {
968 unsigned int off, num;
969 const struct ip6t_entry *e;
970 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private;
972 int ret = 0;
973 const void *loc_cpu_entry;
974
975 counters = alloc_counters(table);
976 if (IS_ERR(counters))
977 return PTR_ERR(counters);
978
979 /* choose the copy that is on our node/cpu, ...
980 * This choice is lazy (because current thread is
981 * allowed to migrate to another cpu)
982 */
983 loc_cpu_entry = private->entries[raw_smp_processor_id()];
984 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
985 ret = -EFAULT;
986 goto free_counters;
987 }
988
989 /* FIXME: use iterator macros --RR */
990 /* ... then go back and fix counters and names */
991 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
992 unsigned int i;
993 const struct ip6t_entry_match *m;
994 const struct ip6t_entry_target *t;
995
996 e = (struct ip6t_entry *)(loc_cpu_entry + off);
997 if (copy_to_user(userptr + off
998 + offsetof(struct ip6t_entry, counters),
999 &counters[num],
1000 sizeof(counters[num])) != 0) {
1001 ret = -EFAULT;
1002 goto free_counters;
1003 }
1004
1005 for (i = sizeof(struct ip6t_entry);
1006 i < e->target_offset;
1007 i += m->u.match_size) {
1008 m = (void *)e + i;
1009
1010 if (copy_to_user(userptr + off + i
1011 + offsetof(struct ip6t_entry_match,
1012 u.user.name),
1013 m->u.kernel.match->name,
1014 strlen(m->u.kernel.match->name)+1)
1015 != 0) {
1016 ret = -EFAULT;
1017 goto free_counters;
1018 }
1019 }
1020
1021 t = ip6t_get_target_c(e);
1022 if (copy_to_user(userptr + off + e->target_offset
1023 + offsetof(struct ip6t_entry_target,
1024 u.user.name),
1025 t->u.kernel.target->name,
1026 strlen(t->u.kernel.target->name)+1) != 0) {
1027 ret = -EFAULT;
1028 goto free_counters;
1029 }
1030 }
1031
1032 free_counters:
1033 vfree(counters);
1034 return ret;
1035 }
1036
1037 #ifdef CONFIG_COMPAT
1038 static void compat_standard_from_user(void *dst, const void *src)
1039 {
1040 int v = *(compat_int_t *)src;
1041
1042 if (v > 0)
1043 v += xt_compat_calc_jump(AF_INET6, v);
1044 memcpy(dst, &v, sizeof(v));
1045 }
1046
1047 static int compat_standard_to_user(void __user *dst, const void *src)
1048 {
1049 compat_int_t cv = *(int *)src;
1050
1051 if (cv > 0)
1052 cv -= xt_compat_calc_jump(AF_INET6, cv);
1053 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1054 }
1055
1056 static int compat_calc_entry(const struct ip6t_entry *e,
1057 const struct xt_table_info *info,
1058 const void *base, struct xt_table_info *newinfo)
1059 {
1060 const struct xt_entry_match *ematch;
1061 const struct ip6t_entry_target *t;
1062 unsigned int entry_offset;
1063 int off, i, ret;
1064
1065 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1066 entry_offset = (void *)e - base;
1067 xt_ematch_foreach(ematch, e)
1068 off += xt_compat_match_offset(ematch->u.kernel.match);
1069 t = ip6t_get_target_c(e);
1070 off += xt_compat_target_offset(t->u.kernel.target);
1071 newinfo->size -= off;
1072 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1073 if (ret)
1074 return ret;
1075
1076 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1077 if (info->hook_entry[i] &&
1078 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1079 newinfo->hook_entry[i] -= off;
1080 if (info->underflow[i] &&
1081 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1082 newinfo->underflow[i] -= off;
1083 }
1084 return 0;
1085 }
1086
1087 static int compat_table_info(const struct xt_table_info *info,
1088 struct xt_table_info *newinfo)
1089 {
1090 struct ip6t_entry *iter;
1091 void *loc_cpu_entry;
1092 int ret;
1093
1094 if (!newinfo || !info)
1095 return -EINVAL;
1096
1097 /* we dont care about newinfo->entries[] */
1098 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1099 newinfo->initial_entries = 0;
1100 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1101 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1102 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1103 if (ret != 0)
1104 return ret;
1105 }
1106 return 0;
1107 }
1108 #endif
1109
1110 static int get_info(struct net *net, void __user *user,
1111 const int *len, int compat)
1112 {
1113 char name[IP6T_TABLE_MAXNAMELEN];
1114 struct xt_table *t;
1115 int ret;
1116
1117 if (*len != sizeof(struct ip6t_getinfo)) {
1118 duprintf("length %u != %zu\n", *len,
1119 sizeof(struct ip6t_getinfo));
1120 return -EINVAL;
1121 }
1122
1123 if (copy_from_user(name, user, sizeof(name)) != 0)
1124 return -EFAULT;
1125
1126 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1127 #ifdef CONFIG_COMPAT
1128 if (compat)
1129 xt_compat_lock(AF_INET6);
1130 #endif
1131 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1132 "ip6table_%s", name);
1133 if (t && !IS_ERR(t)) {
1134 struct ip6t_getinfo info;
1135 const struct xt_table_info *private = t->private;
1136 #ifdef CONFIG_COMPAT
1137 struct xt_table_info tmp;
1138
1139 if (compat) {
1140 ret = compat_table_info(private, &tmp);
1141 xt_compat_flush_offsets(AF_INET6);
1142 private = &tmp;
1143 }
1144 #endif
1145 info.valid_hooks = t->valid_hooks;
1146 memcpy(info.hook_entry, private->hook_entry,
1147 sizeof(info.hook_entry));
1148 memcpy(info.underflow, private->underflow,
1149 sizeof(info.underflow));
1150 info.num_entries = private->number;
1151 info.size = private->size;
1152 strcpy(info.name, name);
1153
1154 if (copy_to_user(user, &info, *len) != 0)
1155 ret = -EFAULT;
1156 else
1157 ret = 0;
1158
1159 xt_table_unlock(t);
1160 module_put(t->me);
1161 } else
1162 ret = t ? PTR_ERR(t) : -ENOENT;
1163 #ifdef CONFIG_COMPAT
1164 if (compat)
1165 xt_compat_unlock(AF_INET6);
1166 #endif
1167 return ret;
1168 }
1169
1170 static int
1171 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1172 const int *len)
1173 {
1174 int ret;
1175 struct ip6t_get_entries get;
1176 struct xt_table *t;
1177
1178 if (*len < sizeof(get)) {
1179 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1180 return -EINVAL;
1181 }
1182 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1183 return -EFAULT;
1184 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1185 duprintf("get_entries: %u != %zu\n",
1186 *len, sizeof(get) + get.size);
1187 return -EINVAL;
1188 }
1189
1190 t = xt_find_table_lock(net, AF_INET6, get.name);
1191 if (t && !IS_ERR(t)) {
1192 struct xt_table_info *private = t->private;
1193 duprintf("t->private->number = %u\n", private->number);
1194 if (get.size == private->size)
1195 ret = copy_entries_to_user(private->size,
1196 t, uptr->entrytable);
1197 else {
1198 duprintf("get_entries: I've got %u not %u!\n",
1199 private->size, get.size);
1200 ret = -EAGAIN;
1201 }
1202 module_put(t->me);
1203 xt_table_unlock(t);
1204 } else
1205 ret = t ? PTR_ERR(t) : -ENOENT;
1206
1207 return ret;
1208 }
1209
1210 static int
1211 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1212 struct xt_table_info *newinfo, unsigned int num_counters,
1213 void __user *counters_ptr)
1214 {
1215 int ret;
1216 struct xt_table *t;
1217 struct xt_table_info *oldinfo;
1218 struct xt_counters *counters;
1219 const void *loc_cpu_old_entry;
1220 struct ip6t_entry *iter;
1221
1222 ret = 0;
1223 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1224 numa_node_id());
1225 if (!counters) {
1226 ret = -ENOMEM;
1227 goto out;
1228 }
1229
1230 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1231 "ip6table_%s", name);
1232 if (!t || IS_ERR(t)) {
1233 ret = t ? PTR_ERR(t) : -ENOENT;
1234 goto free_newinfo_counters_untrans;
1235 }
1236
1237 /* You lied! */
1238 if (valid_hooks != t->valid_hooks) {
1239 duprintf("Valid hook crap: %08X vs %08X\n",
1240 valid_hooks, t->valid_hooks);
1241 ret = -EINVAL;
1242 goto put_module;
1243 }
1244
1245 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1246 if (!oldinfo)
1247 goto put_module;
1248
1249 /* Update module usage count based on number of rules */
1250 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1251 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1252 if ((oldinfo->number > oldinfo->initial_entries) ||
1253 (newinfo->number <= oldinfo->initial_entries))
1254 module_put(t->me);
1255 if ((oldinfo->number > oldinfo->initial_entries) &&
1256 (newinfo->number <= oldinfo->initial_entries))
1257 module_put(t->me);
1258
1259 /* Get the old counters, and synchronize with replace */
1260 get_counters(oldinfo, counters);
1261
1262 /* Decrease module usage counts and free resource */
1263 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1264 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1265 cleanup_entry(iter, net);
1266
1267 xt_free_table_info(oldinfo);
1268 if (copy_to_user(counters_ptr, counters,
1269 sizeof(struct xt_counters) * num_counters) != 0)
1270 ret = -EFAULT;
1271 vfree(counters);
1272 xt_table_unlock(t);
1273 return ret;
1274
1275 put_module:
1276 module_put(t->me);
1277 xt_table_unlock(t);
1278 free_newinfo_counters_untrans:
1279 vfree(counters);
1280 out:
1281 return ret;
1282 }
1283
1284 static int
1285 do_replace(struct net *net, const void __user *user, unsigned int len)
1286 {
1287 int ret;
1288 struct ip6t_replace tmp;
1289 struct xt_table_info *newinfo;
1290 void *loc_cpu_entry;
1291 struct ip6t_entry *iter;
1292
1293 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1294 return -EFAULT;
1295
1296 /* overflow check */
1297 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1298 return -ENOMEM;
1299
1300 newinfo = xt_alloc_table_info(tmp.size);
1301 if (!newinfo)
1302 return -ENOMEM;
1303
1304 /* choose the copy that is on our node/cpu */
1305 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1306 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1307 tmp.size) != 0) {
1308 ret = -EFAULT;
1309 goto free_newinfo;
1310 }
1311
1312 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1313 if (ret != 0)
1314 goto free_newinfo;
1315
1316 duprintf("ip_tables: Translated table\n");
1317
1318 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1319 tmp.num_counters, tmp.counters);
1320 if (ret)
1321 goto free_newinfo_untrans;
1322 return 0;
1323
1324 free_newinfo_untrans:
1325 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1326 cleanup_entry(iter, net);
1327 free_newinfo:
1328 xt_free_table_info(newinfo);
1329 return ret;
1330 }
1331
1332 static int
1333 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1334 int compat)
1335 {
1336 unsigned int i, curcpu;
1337 struct xt_counters_info tmp;
1338 struct xt_counters *paddc;
1339 unsigned int num_counters;
1340 char *name;
1341 int size;
1342 void *ptmp;
1343 struct xt_table *t;
1344 const struct xt_table_info *private;
1345 int ret = 0;
1346 const void *loc_cpu_entry;
1347 struct ip6t_entry *iter;
1348 #ifdef CONFIG_COMPAT
1349 struct compat_xt_counters_info compat_tmp;
1350
1351 if (compat) {
1352 ptmp = &compat_tmp;
1353 size = sizeof(struct compat_xt_counters_info);
1354 } else
1355 #endif
1356 {
1357 ptmp = &tmp;
1358 size = sizeof(struct xt_counters_info);
1359 }
1360
1361 if (copy_from_user(ptmp, user, size) != 0)
1362 return -EFAULT;
1363
1364 #ifdef CONFIG_COMPAT
1365 if (compat) {
1366 num_counters = compat_tmp.num_counters;
1367 name = compat_tmp.name;
1368 } else
1369 #endif
1370 {
1371 num_counters = tmp.num_counters;
1372 name = tmp.name;
1373 }
1374
1375 if (len != size + num_counters * sizeof(struct xt_counters))
1376 return -EINVAL;
1377
1378 paddc = vmalloc_node(len - size, numa_node_id());
1379 if (!paddc)
1380 return -ENOMEM;
1381
1382 if (copy_from_user(paddc, user + size, len - size) != 0) {
1383 ret = -EFAULT;
1384 goto free;
1385 }
1386
1387 t = xt_find_table_lock(net, AF_INET6, name);
1388 if (!t || IS_ERR(t)) {
1389 ret = t ? PTR_ERR(t) : -ENOENT;
1390 goto free;
1391 }
1392
1393
1394 local_bh_disable();
1395 private = t->private;
1396 if (private->number != num_counters) {
1397 ret = -EINVAL;
1398 goto unlock_up_free;
1399 }
1400
1401 i = 0;
1402 /* Choose the copy that is on our node */
1403 curcpu = smp_processor_id();
1404 xt_info_wrlock(curcpu);
1405 loc_cpu_entry = private->entries[curcpu];
1406 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1407 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1408 ++i;
1409 }
1410 xt_info_wrunlock(curcpu);
1411
1412 unlock_up_free:
1413 local_bh_enable();
1414 xt_table_unlock(t);
1415 module_put(t->me);
1416 free:
1417 vfree(paddc);
1418
1419 return ret;
1420 }
1421
1422 #ifdef CONFIG_COMPAT
1423 struct compat_ip6t_replace {
1424 char name[IP6T_TABLE_MAXNAMELEN];
1425 u32 valid_hooks;
1426 u32 num_entries;
1427 u32 size;
1428 u32 hook_entry[NF_INET_NUMHOOKS];
1429 u32 underflow[NF_INET_NUMHOOKS];
1430 u32 num_counters;
1431 compat_uptr_t counters; /* struct ip6t_counters * */
1432 struct compat_ip6t_entry entries[0];
1433 };
1434
1435 static int
1436 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1437 unsigned int *size, struct xt_counters *counters,
1438 unsigned int i)
1439 {
1440 struct ip6t_entry_target *t;
1441 struct compat_ip6t_entry __user *ce;
1442 u_int16_t target_offset, next_offset;
1443 compat_uint_t origsize;
1444 const struct xt_entry_match *ematch;
1445 int ret = 0;
1446
1447 origsize = *size;
1448 ce = (struct compat_ip6t_entry __user *)*dstptr;
1449 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1450 copy_to_user(&ce->counters, &counters[i],
1451 sizeof(counters[i])) != 0)
1452 return -EFAULT;
1453
1454 *dstptr += sizeof(struct compat_ip6t_entry);
1455 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1456
1457 xt_ematch_foreach(ematch, e) {
1458 ret = xt_compat_match_to_user(ematch, dstptr, size);
1459 if (ret != 0)
1460 return ret;
1461 }
1462 target_offset = e->target_offset - (origsize - *size);
1463 t = ip6t_get_target(e);
1464 ret = xt_compat_target_to_user(t, dstptr, size);
1465 if (ret)
1466 return ret;
1467 next_offset = e->next_offset - (origsize - *size);
1468 if (put_user(target_offset, &ce->target_offset) != 0 ||
1469 put_user(next_offset, &ce->next_offset) != 0)
1470 return -EFAULT;
1471 return 0;
1472 }
1473
1474 static int
1475 compat_find_calc_match(struct ip6t_entry_match *m,
1476 const char *name,
1477 const struct ip6t_ip6 *ipv6,
1478 unsigned int hookmask,
1479 int *size)
1480 {
1481 struct xt_match *match;
1482
1483 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1484 m->u.user.revision);
1485 if (IS_ERR(match)) {
1486 duprintf("compat_check_calc_match: `%s' not found\n",
1487 m->u.user.name);
1488 return PTR_ERR(match);
1489 }
1490 m->u.kernel.match = match;
1491 *size += xt_compat_match_offset(match);
1492 return 0;
1493 }
1494
1495 static void compat_release_entry(struct compat_ip6t_entry *e)
1496 {
1497 struct ip6t_entry_target *t;
1498 struct xt_entry_match *ematch;
1499
1500 /* Cleanup all matches */
1501 xt_ematch_foreach(ematch, e)
1502 module_put(ematch->u.kernel.match->me);
1503 t = compat_ip6t_get_target(e);
1504 module_put(t->u.kernel.target->me);
1505 }
1506
1507 static int
1508 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1509 struct xt_table_info *newinfo,
1510 unsigned int *size,
1511 const unsigned char *base,
1512 const unsigned char *limit,
1513 const unsigned int *hook_entries,
1514 const unsigned int *underflows,
1515 const char *name)
1516 {
1517 struct xt_entry_match *ematch;
1518 struct ip6t_entry_target *t;
1519 struct xt_target *target;
1520 unsigned int entry_offset;
1521 unsigned int j;
1522 int ret, off, h;
1523
1524 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1525 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1526 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1527 duprintf("Bad offset %p, limit = %p\n", e, limit);
1528 return -EINVAL;
1529 }
1530
1531 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1532 sizeof(struct compat_xt_entry_target)) {
1533 duprintf("checking: element %p size %u\n",
1534 e, e->next_offset);
1535 return -EINVAL;
1536 }
1537
1538 /* For purposes of check_entry casting the compat entry is fine */
1539 ret = check_entry((struct ip6t_entry *)e, name);
1540 if (ret)
1541 return ret;
1542
1543 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1544 entry_offset = (void *)e - (void *)base;
1545 j = 0;
1546 xt_ematch_foreach(ematch, e) {
1547 ret = compat_find_calc_match(ematch, name,
1548 &e->ipv6, e->comefrom, &off);
1549 if (ret != 0)
1550 goto release_matches;
1551 ++j;
1552 }
1553
1554 t = compat_ip6t_get_target(e);
1555 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1556 t->u.user.revision);
1557 if (IS_ERR(target)) {
1558 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1559 t->u.user.name);
1560 ret = PTR_ERR(target);
1561 goto release_matches;
1562 }
1563 t->u.kernel.target = target;
1564
1565 off += xt_compat_target_offset(target);
1566 *size += off;
1567 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1568 if (ret)
1569 goto out;
1570
1571 /* Check hooks & underflows */
1572 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1573 if ((unsigned char *)e - base == hook_entries[h])
1574 newinfo->hook_entry[h] = hook_entries[h];
1575 if ((unsigned char *)e - base == underflows[h])
1576 newinfo->underflow[h] = underflows[h];
1577 }
1578
1579 /* Clear counters and comefrom */
1580 memset(&e->counters, 0, sizeof(e->counters));
1581 e->comefrom = 0;
1582 return 0;
1583
1584 out:
1585 module_put(t->u.kernel.target->me);
1586 release_matches:
1587 xt_ematch_foreach(ematch, e) {
1588 if (j-- == 0)
1589 break;
1590 module_put(ematch->u.kernel.match->me);
1591 }
1592 return ret;
1593 }
1594
1595 static int
1596 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1597 unsigned int *size, const char *name,
1598 struct xt_table_info *newinfo, unsigned char *base)
1599 {
1600 struct ip6t_entry_target *t;
1601 struct xt_target *target;
1602 struct ip6t_entry *de;
1603 unsigned int origsize;
1604 int ret, h;
1605 struct xt_entry_match *ematch;
1606
1607 ret = 0;
1608 origsize = *size;
1609 de = (struct ip6t_entry *)*dstptr;
1610 memcpy(de, e, sizeof(struct ip6t_entry));
1611 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1612
1613 *dstptr += sizeof(struct ip6t_entry);
1614 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1615
1616 xt_ematch_foreach(ematch, e) {
1617 ret = xt_compat_match_from_user(ematch, dstptr, size);
1618 if (ret != 0)
1619 return ret;
1620 }
1621 de->target_offset = e->target_offset - (origsize - *size);
1622 t = compat_ip6t_get_target(e);
1623 target = t->u.kernel.target;
1624 xt_compat_target_from_user(t, dstptr, size);
1625
1626 de->next_offset = e->next_offset - (origsize - *size);
1627 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1628 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1629 newinfo->hook_entry[h] -= origsize - *size;
1630 if ((unsigned char *)de - base < newinfo->underflow[h])
1631 newinfo->underflow[h] -= origsize - *size;
1632 }
1633 return ret;
1634 }
1635
1636 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1637 const char *name)
1638 {
1639 unsigned int j;
1640 int ret = 0;
1641 struct xt_mtchk_param mtpar;
1642 struct xt_entry_match *ematch;
1643
1644 j = 0;
1645 mtpar.net = net;
1646 mtpar.table = name;
1647 mtpar.entryinfo = &e->ipv6;
1648 mtpar.hook_mask = e->comefrom;
1649 mtpar.family = NFPROTO_IPV6;
1650 xt_ematch_foreach(ematch, e) {
1651 ret = check_match(ematch, &mtpar);
1652 if (ret != 0)
1653 goto cleanup_matches;
1654 ++j;
1655 }
1656
1657 ret = check_target(e, net, name);
1658 if (ret)
1659 goto cleanup_matches;
1660 return 0;
1661
1662 cleanup_matches:
1663 xt_ematch_foreach(ematch, e) {
1664 if (j-- == 0)
1665 break;
1666 cleanup_match(ematch, net);
1667 }
1668 return ret;
1669 }
1670
1671 static int
1672 translate_compat_table(struct net *net,
1673 const char *name,
1674 unsigned int valid_hooks,
1675 struct xt_table_info **pinfo,
1676 void **pentry0,
1677 unsigned int total_size,
1678 unsigned int number,
1679 unsigned int *hook_entries,
1680 unsigned int *underflows)
1681 {
1682 unsigned int i, j;
1683 struct xt_table_info *newinfo, *info;
1684 void *pos, *entry0, *entry1;
1685 struct compat_ip6t_entry *iter0;
1686 struct ip6t_entry *iter1;
1687 unsigned int size;
1688 int ret = 0;
1689
1690 info = *pinfo;
1691 entry0 = *pentry0;
1692 size = total_size;
1693 info->number = number;
1694
1695 /* Init all hooks to impossible value. */
1696 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1697 info->hook_entry[i] = 0xFFFFFFFF;
1698 info->underflow[i] = 0xFFFFFFFF;
1699 }
1700
1701 duprintf("translate_compat_table: size %u\n", info->size);
1702 j = 0;
1703 xt_compat_lock(AF_INET6);
1704 /* Walk through entries, checking offsets. */
1705 xt_entry_foreach(iter0, entry0, total_size) {
1706 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1707 entry0,
1708 entry0 + total_size,
1709 hook_entries,
1710 underflows,
1711 name);
1712 if (ret != 0)
1713 goto out_unlock;
1714 ++j;
1715 }
1716
1717 ret = -EINVAL;
1718 if (j != number) {
1719 duprintf("translate_compat_table: %u not %u entries\n",
1720 j, number);
1721 goto out_unlock;
1722 }
1723
1724 /* Check hooks all assigned */
1725 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1726 /* Only hooks which are valid */
1727 if (!(valid_hooks & (1 << i)))
1728 continue;
1729 if (info->hook_entry[i] == 0xFFFFFFFF) {
1730 duprintf("Invalid hook entry %u %u\n",
1731 i, hook_entries[i]);
1732 goto out_unlock;
1733 }
1734 if (info->underflow[i] == 0xFFFFFFFF) {
1735 duprintf("Invalid underflow %u %u\n",
1736 i, underflows[i]);
1737 goto out_unlock;
1738 }
1739 }
1740
1741 ret = -ENOMEM;
1742 newinfo = xt_alloc_table_info(size);
1743 if (!newinfo)
1744 goto out_unlock;
1745
1746 newinfo->number = number;
1747 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1748 newinfo->hook_entry[i] = info->hook_entry[i];
1749 newinfo->underflow[i] = info->underflow[i];
1750 }
1751 entry1 = newinfo->entries[raw_smp_processor_id()];
1752 pos = entry1;
1753 size = total_size;
1754 xt_entry_foreach(iter0, entry0, total_size) {
1755 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1756 name, newinfo, entry1);
1757 if (ret != 0)
1758 break;
1759 }
1760 xt_compat_flush_offsets(AF_INET6);
1761 xt_compat_unlock(AF_INET6);
1762 if (ret)
1763 goto free_newinfo;
1764
1765 ret = -ELOOP;
1766 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1767 goto free_newinfo;
1768
1769 i = 0;
1770 xt_entry_foreach(iter1, entry1, newinfo->size) {
1771 ret = compat_check_entry(iter1, net, name);
1772 if (ret != 0)
1773 break;
1774 ++i;
1775 }
1776 if (ret) {
1777 /*
1778 * The first i matches need cleanup_entry (calls ->destroy)
1779 * because they had called ->check already. The other j-i
1780 * entries need only release.
1781 */
1782 int skip = i;
1783 j -= i;
1784 xt_entry_foreach(iter0, entry0, newinfo->size) {
1785 if (skip-- > 0)
1786 continue;
1787 if (j-- == 0)
1788 break;
1789 compat_release_entry(iter0);
1790 }
1791 xt_entry_foreach(iter1, entry1, newinfo->size) {
1792 if (i-- == 0)
1793 break;
1794 cleanup_entry(iter1, net);
1795 }
1796 xt_free_table_info(newinfo);
1797 return ret;
1798 }
1799
1800 /* And one copy for every other CPU */
1801 for_each_possible_cpu(i)
1802 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1803 memcpy(newinfo->entries[i], entry1, newinfo->size);
1804
1805 *pinfo = newinfo;
1806 *pentry0 = entry1;
1807 xt_free_table_info(info);
1808 return 0;
1809
1810 free_newinfo:
1811 xt_free_table_info(newinfo);
1812 out:
1813 xt_entry_foreach(iter0, entry0, total_size) {
1814 if (j-- == 0)
1815 break;
1816 compat_release_entry(iter0);
1817 }
1818 return ret;
1819 out_unlock:
1820 xt_compat_flush_offsets(AF_INET6);
1821 xt_compat_unlock(AF_INET6);
1822 goto out;
1823 }
1824
1825 static int
1826 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1827 {
1828 int ret;
1829 struct compat_ip6t_replace tmp;
1830 struct xt_table_info *newinfo;
1831 void *loc_cpu_entry;
1832 struct ip6t_entry *iter;
1833
1834 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1835 return -EFAULT;
1836
1837 /* overflow check */
1838 if (tmp.size >= INT_MAX / num_possible_cpus())
1839 return -ENOMEM;
1840 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1841 return -ENOMEM;
1842
1843 newinfo = xt_alloc_table_info(tmp.size);
1844 if (!newinfo)
1845 return -ENOMEM;
1846
1847 /* choose the copy that is on our node/cpu */
1848 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1849 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1850 tmp.size) != 0) {
1851 ret = -EFAULT;
1852 goto free_newinfo;
1853 }
1854
1855 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1856 &newinfo, &loc_cpu_entry, tmp.size,
1857 tmp.num_entries, tmp.hook_entry,
1858 tmp.underflow);
1859 if (ret != 0)
1860 goto free_newinfo;
1861
1862 duprintf("compat_do_replace: Translated table\n");
1863
1864 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1865 tmp.num_counters, compat_ptr(tmp.counters));
1866 if (ret)
1867 goto free_newinfo_untrans;
1868 return 0;
1869
1870 free_newinfo_untrans:
1871 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1872 cleanup_entry(iter, net);
1873 free_newinfo:
1874 xt_free_table_info(newinfo);
1875 return ret;
1876 }
1877
1878 static int
1879 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1880 unsigned int len)
1881 {
1882 int ret;
1883
1884 if (!capable(CAP_NET_ADMIN))
1885 return -EPERM;
1886
1887 switch (cmd) {
1888 case IP6T_SO_SET_REPLACE:
1889 ret = compat_do_replace(sock_net(sk), user, len);
1890 break;
1891
1892 case IP6T_SO_SET_ADD_COUNTERS:
1893 ret = do_add_counters(sock_net(sk), user, len, 1);
1894 break;
1895
1896 default:
1897 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1898 ret = -EINVAL;
1899 }
1900
1901 return ret;
1902 }
1903
1904 struct compat_ip6t_get_entries {
1905 char name[IP6T_TABLE_MAXNAMELEN];
1906 compat_uint_t size;
1907 struct compat_ip6t_entry entrytable[0];
1908 };
1909
1910 static int
1911 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1912 void __user *userptr)
1913 {
1914 struct xt_counters *counters;
1915 const struct xt_table_info *private = table->private;
1916 void __user *pos;
1917 unsigned int size;
1918 int ret = 0;
1919 const void *loc_cpu_entry;
1920 unsigned int i = 0;
1921 struct ip6t_entry *iter;
1922
1923 counters = alloc_counters(table);
1924 if (IS_ERR(counters))
1925 return PTR_ERR(counters);
1926
1927 /* choose the copy that is on our node/cpu, ...
1928 * This choice is lazy (because current thread is
1929 * allowed to migrate to another cpu)
1930 */
1931 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1932 pos = userptr;
1933 size = total_size;
1934 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1935 ret = compat_copy_entry_to_user(iter, &pos,
1936 &size, counters, i++);
1937 if (ret != 0)
1938 break;
1939 }
1940
1941 vfree(counters);
1942 return ret;
1943 }
1944
1945 static int
1946 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1947 int *len)
1948 {
1949 int ret;
1950 struct compat_ip6t_get_entries get;
1951 struct xt_table *t;
1952
1953 if (*len < sizeof(get)) {
1954 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1955 return -EINVAL;
1956 }
1957
1958 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1959 return -EFAULT;
1960
1961 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1962 duprintf("compat_get_entries: %u != %zu\n",
1963 *len, sizeof(get) + get.size);
1964 return -EINVAL;
1965 }
1966
1967 xt_compat_lock(AF_INET6);
1968 t = xt_find_table_lock(net, AF_INET6, get.name);
1969 if (t && !IS_ERR(t)) {
1970 const struct xt_table_info *private = t->private;
1971 struct xt_table_info info;
1972 duprintf("t->private->number = %u\n", private->number);
1973 ret = compat_table_info(private, &info);
1974 if (!ret && get.size == info.size) {
1975 ret = compat_copy_entries_to_user(private->size,
1976 t, uptr->entrytable);
1977 } else if (!ret) {
1978 duprintf("compat_get_entries: I've got %u not %u!\n",
1979 private->size, get.size);
1980 ret = -EAGAIN;
1981 }
1982 xt_compat_flush_offsets(AF_INET6);
1983 module_put(t->me);
1984 xt_table_unlock(t);
1985 } else
1986 ret = t ? PTR_ERR(t) : -ENOENT;
1987
1988 xt_compat_unlock(AF_INET6);
1989 return ret;
1990 }
1991
1992 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1993
1994 static int
1995 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1996 {
1997 int ret;
1998
1999 if (!capable(CAP_NET_ADMIN))
2000 return -EPERM;
2001
2002 switch (cmd) {
2003 case IP6T_SO_GET_INFO:
2004 ret = get_info(sock_net(sk), user, len, 1);
2005 break;
2006 case IP6T_SO_GET_ENTRIES:
2007 ret = compat_get_entries(sock_net(sk), user, len);
2008 break;
2009 default:
2010 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2011 }
2012 return ret;
2013 }
2014 #endif
2015
2016 static int
2017 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2018 {
2019 int ret;
2020
2021 if (!capable(CAP_NET_ADMIN))
2022 return -EPERM;
2023
2024 switch (cmd) {
2025 case IP6T_SO_SET_REPLACE:
2026 ret = do_replace(sock_net(sk), user, len);
2027 break;
2028
2029 case IP6T_SO_SET_ADD_COUNTERS:
2030 ret = do_add_counters(sock_net(sk), user, len, 0);
2031 break;
2032
2033 default:
2034 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2035 ret = -EINVAL;
2036 }
2037
2038 return ret;
2039 }
2040
2041 static int
2042 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2043 {
2044 int ret;
2045
2046 if (!capable(CAP_NET_ADMIN))
2047 return -EPERM;
2048
2049 switch (cmd) {
2050 case IP6T_SO_GET_INFO:
2051 ret = get_info(sock_net(sk), user, len, 0);
2052 break;
2053
2054 case IP6T_SO_GET_ENTRIES:
2055 ret = get_entries(sock_net(sk), user, len);
2056 break;
2057
2058 case IP6T_SO_GET_REVISION_MATCH:
2059 case IP6T_SO_GET_REVISION_TARGET: {
2060 struct ip6t_get_revision rev;
2061 int target;
2062
2063 if (*len != sizeof(rev)) {
2064 ret = -EINVAL;
2065 break;
2066 }
2067 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2068 ret = -EFAULT;
2069 break;
2070 }
2071
2072 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2073 target = 1;
2074 else
2075 target = 0;
2076
2077 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2078 rev.revision,
2079 target, &ret),
2080 "ip6t_%s", rev.name);
2081 break;
2082 }
2083
2084 default:
2085 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2086 ret = -EINVAL;
2087 }
2088
2089 return ret;
2090 }
2091
2092 struct xt_table *ip6t_register_table(struct net *net,
2093 const struct xt_table *table,
2094 const struct ip6t_replace *repl)
2095 {
2096 int ret;
2097 struct xt_table_info *newinfo;
2098 struct xt_table_info bootstrap = {0};
2099 void *loc_cpu_entry;
2100 struct xt_table *new_table;
2101
2102 newinfo = xt_alloc_table_info(repl->size);
2103 if (!newinfo) {
2104 ret = -ENOMEM;
2105 goto out;
2106 }
2107
2108 /* choose the copy on our node/cpu, but dont care about preemption */
2109 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2111
2112 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2113 if (ret != 0)
2114 goto out_free;
2115
2116 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2117 if (IS_ERR(new_table)) {
2118 ret = PTR_ERR(new_table);
2119 goto out_free;
2120 }
2121 return new_table;
2122
2123 out_free:
2124 xt_free_table_info(newinfo);
2125 out:
2126 return ERR_PTR(ret);
2127 }
2128
2129 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2130 {
2131 struct xt_table_info *private;
2132 void *loc_cpu_entry;
2133 struct module *table_owner = table->me;
2134 struct ip6t_entry *iter;
2135
2136 private = xt_unregister_table(table);
2137
2138 /* Decrease module usage counts and free resources */
2139 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2140 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2141 cleanup_entry(iter, net);
2142 if (private->number > private->initial_entries)
2143 module_put(table_owner);
2144 xt_free_table_info(private);
2145 }
2146
2147 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2148 static inline bool
2149 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2150 u_int8_t type, u_int8_t code,
2151 bool invert)
2152 {
2153 return (type == test_type && code >= min_code && code <= max_code)
2154 ^ invert;
2155 }
2156
2157 static bool
2158 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2159 {
2160 const struct icmp6hdr *ic;
2161 struct icmp6hdr _icmph;
2162 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2163
2164 /* Must not be a fragment. */
2165 if (par->fragoff != 0)
2166 return false;
2167
2168 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2169 if (ic == NULL) {
2170 /* We've been asked to examine this packet, and we
2171 * can't. Hence, no choice but to drop.
2172 */
2173 duprintf("Dropping evil ICMP tinygram.\n");
2174 *par->hotdrop = true;
2175 return false;
2176 }
2177
2178 return icmp6_type_code_match(icmpinfo->type,
2179 icmpinfo->code[0],
2180 icmpinfo->code[1],
2181 ic->icmp6_type, ic->icmp6_code,
2182 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2183 }
2184
2185 /* Called when user tries to insert an entry of this type. */
2186 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2187 {
2188 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2189
2190 /* Must specify no unknown invflags */
2191 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2192 }
2193
2194 /* The built-in targets: standard (NULL) and error. */
2195 static struct xt_target ip6t_standard_target __read_mostly = {
2196 .name = IP6T_STANDARD_TARGET,
2197 .targetsize = sizeof(int),
2198 .family = NFPROTO_IPV6,
2199 #ifdef CONFIG_COMPAT
2200 .compatsize = sizeof(compat_int_t),
2201 .compat_from_user = compat_standard_from_user,
2202 .compat_to_user = compat_standard_to_user,
2203 #endif
2204 };
2205
2206 static struct xt_target ip6t_error_target __read_mostly = {
2207 .name = IP6T_ERROR_TARGET,
2208 .target = ip6t_error,
2209 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2210 .family = NFPROTO_IPV6,
2211 };
2212
2213 static struct nf_sockopt_ops ip6t_sockopts = {
2214 .pf = PF_INET6,
2215 .set_optmin = IP6T_BASE_CTL,
2216 .set_optmax = IP6T_SO_SET_MAX+1,
2217 .set = do_ip6t_set_ctl,
2218 #ifdef CONFIG_COMPAT
2219 .compat_set = compat_do_ip6t_set_ctl,
2220 #endif
2221 .get_optmin = IP6T_BASE_CTL,
2222 .get_optmax = IP6T_SO_GET_MAX+1,
2223 .get = do_ip6t_get_ctl,
2224 #ifdef CONFIG_COMPAT
2225 .compat_get = compat_do_ip6t_get_ctl,
2226 #endif
2227 .owner = THIS_MODULE,
2228 };
2229
2230 static struct xt_match icmp6_matchstruct __read_mostly = {
2231 .name = "icmp6",
2232 .match = icmp6_match,
2233 .matchsize = sizeof(struct ip6t_icmp),
2234 .checkentry = icmp6_checkentry,
2235 .proto = IPPROTO_ICMPV6,
2236 .family = NFPROTO_IPV6,
2237 };
2238
2239 static int __net_init ip6_tables_net_init(struct net *net)
2240 {
2241 return xt_proto_init(net, NFPROTO_IPV6);
2242 }
2243
2244 static void __net_exit ip6_tables_net_exit(struct net *net)
2245 {
2246 xt_proto_fini(net, NFPROTO_IPV6);
2247 }
2248
2249 static struct pernet_operations ip6_tables_net_ops = {
2250 .init = ip6_tables_net_init,
2251 .exit = ip6_tables_net_exit,
2252 };
2253
2254 static int __init ip6_tables_init(void)
2255 {
2256 int ret;
2257
2258 ret = register_pernet_subsys(&ip6_tables_net_ops);
2259 if (ret < 0)
2260 goto err1;
2261
2262 /* Noone else will be downing sem now, so we won't sleep */
2263 ret = xt_register_target(&ip6t_standard_target);
2264 if (ret < 0)
2265 goto err2;
2266 ret = xt_register_target(&ip6t_error_target);
2267 if (ret < 0)
2268 goto err3;
2269 ret = xt_register_match(&icmp6_matchstruct);
2270 if (ret < 0)
2271 goto err4;
2272
2273 /* Register setsockopt */
2274 ret = nf_register_sockopt(&ip6t_sockopts);
2275 if (ret < 0)
2276 goto err5;
2277
2278 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2279 return 0;
2280
2281 err5:
2282 xt_unregister_match(&icmp6_matchstruct);
2283 err4:
2284 xt_unregister_target(&ip6t_error_target);
2285 err3:
2286 xt_unregister_target(&ip6t_standard_target);
2287 err2:
2288 unregister_pernet_subsys(&ip6_tables_net_ops);
2289 err1:
2290 return ret;
2291 }
2292
2293 static void __exit ip6_tables_fini(void)
2294 {
2295 nf_unregister_sockopt(&ip6t_sockopts);
2296
2297 xt_unregister_match(&icmp6_matchstruct);
2298 xt_unregister_target(&ip6t_error_target);
2299 xt_unregister_target(&ip6t_standard_target);
2300
2301 unregister_pernet_subsys(&ip6_tables_net_ops);
2302 }
2303
2304 /*
2305 * find the offset to specified header or the protocol number of last header
2306 * if target < 0. "last header" is transport protocol header, ESP, or
2307 * "No next header".
2308 *
2309 * If target header is found, its offset is set in *offset and return protocol
2310 * number. Otherwise, return -1.
2311 *
2312 * If the first fragment doesn't contain the final protocol header or
2313 * NEXTHDR_NONE it is considered invalid.
2314 *
2315 * Note that non-1st fragment is special case that "the protocol number
2316 * of last header" is "next header" field in Fragment header. In this case,
2317 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2318 * isn't NULL.
2319 *
2320 */
2321 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2322 int target, unsigned short *fragoff)
2323 {
2324 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2325 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2326 unsigned int len = skb->len - start;
2327
2328 if (fragoff)
2329 *fragoff = 0;
2330
2331 while (nexthdr != target) {
2332 struct ipv6_opt_hdr _hdr, *hp;
2333 unsigned int hdrlen;
2334
2335 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2336 if (target < 0)
2337 break;
2338 return -ENOENT;
2339 }
2340
2341 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2342 if (hp == NULL)
2343 return -EBADMSG;
2344 if (nexthdr == NEXTHDR_FRAGMENT) {
2345 unsigned short _frag_off;
2346 __be16 *fp;
2347 fp = skb_header_pointer(skb,
2348 start+offsetof(struct frag_hdr,
2349 frag_off),
2350 sizeof(_frag_off),
2351 &_frag_off);
2352 if (fp == NULL)
2353 return -EBADMSG;
2354
2355 _frag_off = ntohs(*fp) & ~0x7;
2356 if (_frag_off) {
2357 if (target < 0 &&
2358 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2359 hp->nexthdr == NEXTHDR_NONE)) {
2360 if (fragoff)
2361 *fragoff = _frag_off;
2362 return hp->nexthdr;
2363 }
2364 return -ENOENT;
2365 }
2366 hdrlen = 8;
2367 } else if (nexthdr == NEXTHDR_AUTH)
2368 hdrlen = (hp->hdrlen + 2) << 2;
2369 else
2370 hdrlen = ipv6_optlen(hp);
2371
2372 nexthdr = hp->nexthdr;
2373 len -= hdrlen;
2374 start += hdrlen;
2375 }
2376
2377 *offset = start;
2378 return nexthdr;
2379 }
2380
2381 EXPORT_SYMBOL(ip6t_register_table);
2382 EXPORT_SYMBOL(ip6t_unregister_table);
2383 EXPORT_SYMBOL(ip6t_do_table);
2384 EXPORT_SYMBOL(ip6t_ext_hdr);
2385 EXPORT_SYMBOL(ipv6_find_hdr);
2386
2387 module_init(ip6_tables_init);
2388 module_exit(ip6_tables_fini);
This page took 0.081554 seconds and 6 git commands to generate.