netfilter: xt_statistic: remove nth_lock spinlock
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
... / ...
CommitLineData
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/capability.h>
13#include <linux/in.h>
14#include <linux/skbuff.h>
15#include <linux/kmod.h>
16#include <linux/vmalloc.h>
17#include <linux/netdevice.h>
18#include <linux/module.h>
19#include <linux/poison.h>
20#include <linux/icmpv6.h>
21#include <net/ipv6.h>
22#include <net/compat.h>
23#include <asm/uaccess.h>
24#include <linux/mutex.h>
25#include <linux/proc_fs.h>
26#include <linux/err.h>
27#include <linux/cpumask.h>
28
29#include <linux/netfilter_ipv6/ip6_tables.h>
30#include <linux/netfilter/x_tables.h>
31#include <net/netfilter/nf_log.h>
32#include "../../netfilter/xt_repldata.h"
33
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36MODULE_DESCRIPTION("IPv6 packet filter");
37
38/*#define DEBUG_IP_FIREWALL*/
39/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40/*#define DEBUG_IP_FIREWALL_USER*/
41
42#ifdef DEBUG_IP_FIREWALL
43#define dprintf(format, args...) pr_info(format , ## args)
44#else
45#define dprintf(format, args...)
46#endif
47
48#ifdef DEBUG_IP_FIREWALL_USER
49#define duprintf(format, args...) pr_info(format , ## args)
50#else
51#define duprintf(format, args...)
52#endif
53
54#ifdef CONFIG_NETFILTER_DEBUG
55#define IP_NF_ASSERT(x) WARN_ON(!(x))
56#else
57#define IP_NF_ASSERT(x)
58#endif
59
60#if 0
61/* All the better to debug you with... */
62#define static
63#define inline
64#endif
65
66void *ip6t_alloc_initial_table(const struct xt_table *info)
67{
68 return xt_alloc_initial_table(ip6t, IP6T);
69}
70EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
71
72/*
73 We keep a set of rules for each CPU, so we can avoid write-locking
74 them in the softirq when updating the counters and therefore
75 only need to read-lock in the softirq; doing a write_lock_bh() in user
76 context stops packets coming through and allows user context to read
77 the counters or update the rules.
78
79 Hence the start of any table is given by get_table() below. */
80
81/* Check for an extension */
82int
83ip6t_ext_hdr(u8 nexthdr)
84{
85 return ( (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS) );
92}
93
94/* Returns whether matches rule or not. */
95/* Performance critical - called for every packet */
96static inline bool
97ip6_packet_match(const struct sk_buff *skb,
98 const char *indev,
99 const char *outdev,
100 const struct ip6t_ip6 *ip6info,
101 unsigned int *protoff,
102 int *fragoff, bool *hotdrop)
103{
104 unsigned long ret;
105 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106
107#define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108
109 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
110 &ip6info->src), IP6T_INV_SRCIP) ||
111 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
112 &ip6info->dst), IP6T_INV_DSTIP)) {
113 dprintf("Source or dest mismatch.\n");
114/*
115 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
116 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
117 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
118 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
119 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
120 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
121 return false;
122 }
123
124 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
127 dprintf("VIA in mismatch (%s vs %s).%s\n",
128 indev, ip6info->iniface,
129 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
130 return false;
131 }
132
133 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
134
135 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ip6info->outiface,
138 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
139 return false;
140 }
141
142/* ... might want to do something with class and flowlabel here ... */
143
144 /* look for the desired protocol header */
145 if((ip6info->flags & IP6T_F_PROTO)) {
146 int protohdr;
147 unsigned short _frag_off;
148
149 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
150 if (protohdr < 0) {
151 if (_frag_off == 0)
152 *hotdrop = true;
153 return false;
154 }
155 *fragoff = _frag_off;
156
157 dprintf("Packet protocol %hi ?= %s%hi.\n",
158 protohdr,
159 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 ip6info->proto);
161
162 if (ip6info->proto == protohdr) {
163 if(ip6info->invflags & IP6T_INV_PROTO) {
164 return false;
165 }
166 return true;
167 }
168
169 /* We need match for the '-p all', too! */
170 if ((ip6info->proto != 0) &&
171 !(ip6info->invflags & IP6T_INV_PROTO))
172 return false;
173 }
174 return true;
175}
176
177/* should be ip6 safe */
178static bool
179ip6_checkentry(const struct ip6t_ip6 *ipv6)
180{
181 if (ipv6->flags & ~IP6T_F_MASK) {
182 duprintf("Unknown flag bits set: %08X\n",
183 ipv6->flags & ~IP6T_F_MASK);
184 return false;
185 }
186 if (ipv6->invflags & ~IP6T_INV_MASK) {
187 duprintf("Unknown invflag bits set: %08X\n",
188 ipv6->invflags & ~IP6T_INV_MASK);
189 return false;
190 }
191 return true;
192}
193
194static unsigned int
195ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
196{
197 if (net_ratelimit())
198 pr_info("error: `%s'\n", (const char *)par->targinfo);
199
200 return NF_DROP;
201}
202
203static inline struct ip6t_entry *
204get_entry(const void *base, unsigned int offset)
205{
206 return (struct ip6t_entry *)(base + offset);
207}
208
209/* All zeroes == unconditional rule. */
210/* Mildly perf critical (only if packet tracing is on) */
211static inline bool unconditional(const struct ip6t_ip6 *ipv6)
212{
213 static const struct ip6t_ip6 uncond;
214
215 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
216}
217
218static inline const struct ip6t_entry_target *
219ip6t_get_target_c(const struct ip6t_entry *e)
220{
221 return ip6t_get_target((struct ip6t_entry *)e);
222}
223
224#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
225 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
226/* This cries for unification! */
227static const char *const hooknames[] = {
228 [NF_INET_PRE_ROUTING] = "PREROUTING",
229 [NF_INET_LOCAL_IN] = "INPUT",
230 [NF_INET_FORWARD] = "FORWARD",
231 [NF_INET_LOCAL_OUT] = "OUTPUT",
232 [NF_INET_POST_ROUTING] = "POSTROUTING",
233};
234
235enum nf_ip_trace_comments {
236 NF_IP6_TRACE_COMMENT_RULE,
237 NF_IP6_TRACE_COMMENT_RETURN,
238 NF_IP6_TRACE_COMMENT_POLICY,
239};
240
241static const char *const comments[] = {
242 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
243 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
244 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
245};
246
247static struct nf_loginfo trace_loginfo = {
248 .type = NF_LOG_TYPE_LOG,
249 .u = {
250 .log = {
251 .level = 4,
252 .logflags = NF_LOG_MASK,
253 },
254 },
255};
256
257/* Mildly perf critical (only if packet tracing is on) */
258static inline int
259get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
260 const char *hookname, const char **chainname,
261 const char **comment, unsigned int *rulenum)
262{
263 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
264
265 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
266 /* Head of user chain: ERROR target with chainname */
267 *chainname = t->target.data;
268 (*rulenum) = 0;
269 } else if (s == e) {
270 (*rulenum)++;
271
272 if (s->target_offset == sizeof(struct ip6t_entry) &&
273 strcmp(t->target.u.kernel.target->name,
274 IP6T_STANDARD_TARGET) == 0 &&
275 t->verdict < 0 &&
276 unconditional(&s->ipv6)) {
277 /* Tail of chains: STANDARD target (return/policy) */
278 *comment = *chainname == hookname
279 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
280 : comments[NF_IP6_TRACE_COMMENT_RETURN];
281 }
282 return 1;
283 } else
284 (*rulenum)++;
285
286 return 0;
287}
288
289static void trace_packet(const struct sk_buff *skb,
290 unsigned int hook,
291 const struct net_device *in,
292 const struct net_device *out,
293 const char *tablename,
294 const struct xt_table_info *private,
295 const struct ip6t_entry *e)
296{
297 const void *table_base;
298 const struct ip6t_entry *root;
299 const char *hookname, *chainname, *comment;
300 const struct ip6t_entry *iter;
301 unsigned int rulenum = 0;
302
303 table_base = private->entries[smp_processor_id()];
304 root = get_entry(table_base, private->hook_entry[hook]);
305
306 hookname = chainname = hooknames[hook];
307 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
308
309 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
310 if (get_chainname_rulenum(iter, e, hookname,
311 &chainname, &comment, &rulenum) != 0)
312 break;
313
314 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
315 "TRACE: %s:%s:%s:%u ",
316 tablename, chainname, comment, rulenum);
317}
318#endif
319
320static inline __pure struct ip6t_entry *
321ip6t_next_entry(const struct ip6t_entry *entry)
322{
323 return (void *)entry + entry->next_offset;
324}
325
326/* Returns one of the generic firewall policies, like NF_ACCEPT. */
327unsigned int
328ip6t_do_table(struct sk_buff *skb,
329 unsigned int hook,
330 const struct net_device *in,
331 const struct net_device *out,
332 struct xt_table *table)
333{
334 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
335 /* Initializing verdict to NF_DROP keeps gcc happy. */
336 unsigned int verdict = NF_DROP;
337 const char *indev, *outdev;
338 const void *table_base;
339 struct ip6t_entry *e, **jumpstack;
340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private;
342 struct xt_action_param acpar;
343
344 /* Initialization */
345 indev = in ? in->name : nulldevname;
346 outdev = out ? out->name : nulldevname;
347 /* We handle fragments by dealing with the first fragment as
348 * if it was a normal packet. All other fragments are treated
349 * normally, except that they will NEVER match rules that ask
350 * things we don't know, ie. tcp syn flag or ports). If the
351 * rule is also a fragment-specific rule, non-fragments won't
352 * match it. */
353 acpar.hotdrop = false;
354 acpar.in = in;
355 acpar.out = out;
356 acpar.family = NFPROTO_IPV6;
357 acpar.hooknum = hook;
358
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360
361 xt_info_rdlock_bh();
362 private = table->private;
363 cpu = smp_processor_id();
364 table_base = private->entries[cpu];
365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
366 stackptr = &private->stackptr[cpu];
367 origptr = *stackptr;
368
369 e = get_entry(table_base, private->hook_entry[hook]);
370
371 do {
372 const struct ip6t_entry_target *t;
373 const struct xt_entry_match *ematch;
374
375 IP_NF_ASSERT(e);
376 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
377 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
378 no_match:
379 e = ip6t_next_entry(e);
380 continue;
381 }
382
383 xt_ematch_foreach(ematch, e) {
384 acpar.match = ematch->u.kernel.match;
385 acpar.matchinfo = ematch->data;
386 if (!acpar.match->match(skb, &acpar))
387 goto no_match;
388 }
389
390 ADD_COUNTER(e->counters,
391 ntohs(ipv6_hdr(skb)->payload_len) +
392 sizeof(struct ipv6hdr), 1);
393
394 t = ip6t_get_target_c(e);
395 IP_NF_ASSERT(t->u.kernel.target);
396
397#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
398 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
399 /* The packet is traced: log it */
400 if (unlikely(skb->nf_trace))
401 trace_packet(skb, hook, in, out,
402 table->name, private, e);
403#endif
404 /* Standard target? */
405 if (!t->u.kernel.target->target) {
406 int v;
407
408 v = ((struct ip6t_standard_target *)t)->verdict;
409 if (v < 0) {
410 /* Pop from stack? */
411 if (v != IP6T_RETURN) {
412 verdict = (unsigned)(-v) - 1;
413 break;
414 }
415 if (*stackptr == 0)
416 e = get_entry(table_base,
417 private->underflow[hook]);
418 else
419 e = ip6t_next_entry(jumpstack[--*stackptr]);
420 continue;
421 }
422 if (table_base + v != ip6t_next_entry(e) &&
423 !(e->ipv6.flags & IP6T_F_GOTO)) {
424 if (*stackptr >= private->stacksize) {
425 verdict = NF_DROP;
426 break;
427 }
428 jumpstack[(*stackptr)++] = e;
429 }
430
431 e = get_entry(table_base, v);
432 continue;
433 }
434
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
437
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == IP6T_CONTINUE)
440 e = ip6t_next_entry(e);
441 else
442 /* Verdict */
443 break;
444 } while (!acpar.hotdrop);
445
446 xt_info_rdunlock_bh();
447 *stackptr = origptr;
448
449#ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451#else
452 if (acpar.hotdrop)
453 return NF_DROP;
454 else return verdict;
455#endif
456}
457
458/* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460static int
461mark_source_chains(const struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
463{
464 unsigned int hook;
465
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
471
472 if (!(valid_hooks & (1 << hook)))
473 continue;
474
475 /* Set initial back pointer. */
476 e->counters.pcnt = pos;
477
478 for (;;) {
479 const struct ip6t_standard_target *t
480 = (void *)ip6t_get_target_c(e);
481 int visited = e->comefrom & (1 << hook);
482
483 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
484 pr_err("iptables: loop hook %u pos %u %08X.\n",
485 hook, pos, e->comefrom);
486 return 0;
487 }
488 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
489
490 /* Unconditional return/END. */
491 if ((e->target_offset == sizeof(struct ip6t_entry) &&
492 (strcmp(t->target.u.user.name,
493 IP6T_STANDARD_TARGET) == 0) &&
494 t->verdict < 0 &&
495 unconditional(&e->ipv6)) || visited) {
496 unsigned int oldpos, size;
497
498 if ((strcmp(t->target.u.user.name,
499 IP6T_STANDARD_TARGET) == 0) &&
500 t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
505 }
506
507 /* Return: backtrack through the last
508 big jump. */
509 do {
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511#ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
518 }
519#endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
523
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
527
528 e = (struct ip6t_entry *)
529 (entry0 + pos);
530 } while (oldpos == pos + e->next_offset);
531
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ip6t_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
540
541 if (strcmp(t->target.u.user.name,
542 IP6T_STANDARD_TARGET) == 0 &&
543 newpos >= 0) {
544 if (newpos > newinfo->size -
545 sizeof(struct ip6t_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
550 }
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
557 }
558 e = (struct ip6t_entry *)
559 (entry0 + newpos);
560 e->counters.pcnt = pos;
561 pos = newpos;
562 }
563 }
564 next:
565 duprintf("Finished chain %u\n", hook);
566 }
567 return 1;
568}
569
570static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
571{
572 struct xt_mtdtor_param par;
573
574 par.net = net;
575 par.match = m->u.kernel.match;
576 par.matchinfo = m->data;
577 par.family = NFPROTO_IPV6;
578 if (par.match->destroy != NULL)
579 par.match->destroy(&par);
580 module_put(par.match->me);
581}
582
583static int
584check_entry(const struct ip6t_entry *e, const char *name)
585{
586 const struct ip6t_entry_target *t;
587
588 if (!ip6_checkentry(&e->ipv6)) {
589 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
590 return -EINVAL;
591 }
592
593 if (e->target_offset + sizeof(struct ip6t_entry_target) >
594 e->next_offset)
595 return -EINVAL;
596
597 t = ip6t_get_target_c(e);
598 if (e->target_offset + t->u.target_size > e->next_offset)
599 return -EINVAL;
600
601 return 0;
602}
603
604static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
605{
606 const struct ip6t_ip6 *ipv6 = par->entryinfo;
607 int ret;
608
609 par->match = m->u.kernel.match;
610 par->matchinfo = m->data;
611
612 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
613 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
614 if (ret < 0) {
615 duprintf("ip_tables: check failed for `%s'.\n",
616 par.match->name);
617 return ret;
618 }
619 return 0;
620}
621
622static int
623find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
624{
625 struct xt_match *match;
626 int ret;
627
628 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
629 m->u.user.revision);
630 if (IS_ERR(match)) {
631 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
632 return PTR_ERR(match);
633 }
634 m->u.kernel.match = match;
635
636 ret = check_match(m, par);
637 if (ret)
638 goto err;
639
640 return 0;
641err:
642 module_put(m->u.kernel.match->me);
643 return ret;
644}
645
646static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
647{
648 struct ip6t_entry_target *t = ip6t_get_target(e);
649 struct xt_tgchk_param par = {
650 .net = net,
651 .table = name,
652 .entryinfo = e,
653 .target = t->u.kernel.target,
654 .targinfo = t->data,
655 .hook_mask = e->comefrom,
656 .family = NFPROTO_IPV6,
657 };
658 int ret;
659
660 t = ip6t_get_target(e);
661 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
662 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
663 if (ret < 0) {
664 duprintf("ip_tables: check failed for `%s'.\n",
665 t->u.kernel.target->name);
666 return ret;
667 }
668 return 0;
669}
670
671static int
672find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
673 unsigned int size)
674{
675 struct ip6t_entry_target *t;
676 struct xt_target *target;
677 int ret;
678 unsigned int j;
679 struct xt_mtchk_param mtpar;
680 struct xt_entry_match *ematch;
681
682 ret = check_entry(e, name);
683 if (ret)
684 return ret;
685
686 j = 0;
687 mtpar.net = net;
688 mtpar.table = name;
689 mtpar.entryinfo = &e->ipv6;
690 mtpar.hook_mask = e->comefrom;
691 mtpar.family = NFPROTO_IPV6;
692 xt_ematch_foreach(ematch, e) {
693 ret = find_check_match(ematch, &mtpar);
694 if (ret != 0)
695 goto cleanup_matches;
696 ++j;
697 }
698
699 t = ip6t_get_target(e);
700 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
701 t->u.user.revision);
702 if (IS_ERR(target)) {
703 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
704 ret = PTR_ERR(target);
705 goto cleanup_matches;
706 }
707 t->u.kernel.target = target;
708
709 ret = check_target(e, net, name);
710 if (ret)
711 goto err;
712 return 0;
713 err:
714 module_put(t->u.kernel.target->me);
715 cleanup_matches:
716 xt_ematch_foreach(ematch, e) {
717 if (j-- == 0)
718 break;
719 cleanup_match(ematch, net);
720 }
721 return ret;
722}
723
724static bool check_underflow(const struct ip6t_entry *e)
725{
726 const struct ip6t_entry_target *t;
727 unsigned int verdict;
728
729 if (!unconditional(&e->ipv6))
730 return false;
731 t = ip6t_get_target_c(e);
732 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
733 return false;
734 verdict = ((struct ip6t_standard_target *)t)->verdict;
735 verdict = -verdict - 1;
736 return verdict == NF_DROP || verdict == NF_ACCEPT;
737}
738
739static int
740check_entry_size_and_hooks(struct ip6t_entry *e,
741 struct xt_table_info *newinfo,
742 const unsigned char *base,
743 const unsigned char *limit,
744 const unsigned int *hook_entries,
745 const unsigned int *underflows,
746 unsigned int valid_hooks)
747{
748 unsigned int h;
749
750 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
751 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
752 duprintf("Bad offset %p\n", e);
753 return -EINVAL;
754 }
755
756 if (e->next_offset
757 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
758 duprintf("checking: element %p size %u\n",
759 e, e->next_offset);
760 return -EINVAL;
761 }
762
763 /* Check hooks & underflows */
764 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
765 if (!(valid_hooks & (1 << h)))
766 continue;
767 if ((unsigned char *)e - base == hook_entries[h])
768 newinfo->hook_entry[h] = hook_entries[h];
769 if ((unsigned char *)e - base == underflows[h]) {
770 if (!check_underflow(e)) {
771 pr_err("Underflows must be unconditional and "
772 "use the STANDARD target with "
773 "ACCEPT/DROP\n");
774 return -EINVAL;
775 }
776 newinfo->underflow[h] = underflows[h];
777 }
778 }
779
780 /* Clear counters and comefrom */
781 e->counters = ((struct xt_counters) { 0, 0 });
782 e->comefrom = 0;
783 return 0;
784}
785
786static void cleanup_entry(struct ip6t_entry *e, struct net *net)
787{
788 struct xt_tgdtor_param par;
789 struct ip6t_entry_target *t;
790 struct xt_entry_match *ematch;
791
792 /* Cleanup all matches */
793 xt_ematch_foreach(ematch, e)
794 cleanup_match(ematch, net);
795 t = ip6t_get_target(e);
796
797 par.net = net;
798 par.target = t->u.kernel.target;
799 par.targinfo = t->data;
800 par.family = NFPROTO_IPV6;
801 if (par.target->destroy != NULL)
802 par.target->destroy(&par);
803 module_put(par.target->me);
804}
805
806/* Checks and translates the user-supplied table segment (held in
807 newinfo) */
808static int
809translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
810 const struct ip6t_replace *repl)
811{
812 struct ip6t_entry *iter;
813 unsigned int i;
814 int ret = 0;
815
816 newinfo->size = repl->size;
817 newinfo->number = repl->num_entries;
818
819 /* Init all hooks to impossible value. */
820 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
821 newinfo->hook_entry[i] = 0xFFFFFFFF;
822 newinfo->underflow[i] = 0xFFFFFFFF;
823 }
824
825 duprintf("translate_table: size %u\n", newinfo->size);
826 i = 0;
827 /* Walk through entries, checking offsets. */
828 xt_entry_foreach(iter, entry0, newinfo->size) {
829 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
830 entry0 + repl->size,
831 repl->hook_entry,
832 repl->underflow,
833 repl->valid_hooks);
834 if (ret != 0)
835 return ret;
836 ++i;
837 if (strcmp(ip6t_get_target(iter)->u.user.name,
838 XT_ERROR_TARGET) == 0)
839 ++newinfo->stacksize;
840 }
841
842 if (i != repl->num_entries) {
843 duprintf("translate_table: %u not %u entries\n",
844 i, repl->num_entries);
845 return -EINVAL;
846 }
847
848 /* Check hooks all assigned */
849 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
850 /* Only hooks which are valid */
851 if (!(repl->valid_hooks & (1 << i)))
852 continue;
853 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
854 duprintf("Invalid hook entry %u %u\n",
855 i, repl->hook_entry[i]);
856 return -EINVAL;
857 }
858 if (newinfo->underflow[i] == 0xFFFFFFFF) {
859 duprintf("Invalid underflow %u %u\n",
860 i, repl->underflow[i]);
861 return -EINVAL;
862 }
863 }
864
865 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
866 return -ELOOP;
867
868 /* Finally, each sanity check must pass */
869 i = 0;
870 xt_entry_foreach(iter, entry0, newinfo->size) {
871 ret = find_check_entry(iter, net, repl->name, repl->size);
872 if (ret != 0)
873 break;
874 ++i;
875 }
876
877 if (ret != 0) {
878 xt_entry_foreach(iter, entry0, newinfo->size) {
879 if (i-- == 0)
880 break;
881 cleanup_entry(iter, net);
882 }
883 return ret;
884 }
885
886 /* And one copy for every other CPU */
887 for_each_possible_cpu(i) {
888 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
889 memcpy(newinfo->entries[i], entry0, newinfo->size);
890 }
891
892 return ret;
893}
894
895static void
896get_counters(const struct xt_table_info *t,
897 struct xt_counters counters[])
898{
899 struct ip6t_entry *iter;
900 unsigned int cpu;
901 unsigned int i;
902 unsigned int curcpu;
903
904 /* Instead of clearing (by a previous call to memset())
905 * the counters and using adds, we set the counters
906 * with data used by 'current' CPU
907 *
908 * Bottom half has to be disabled to prevent deadlock
909 * if new softirq were to run and call ipt_do_table
910 */
911 local_bh_disable();
912 curcpu = smp_processor_id();
913
914 i = 0;
915 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
916 SET_COUNTER(counters[i], iter->counters.bcnt,
917 iter->counters.pcnt);
918 ++i;
919 }
920
921 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu)
923 continue;
924 i = 0;
925 xt_info_wrlock(cpu);
926 xt_entry_foreach(iter, t->entries[cpu], t->size) {
927 ADD_COUNTER(counters[i], iter->counters.bcnt,
928 iter->counters.pcnt);
929 ++i;
930 }
931 xt_info_wrunlock(cpu);
932 }
933 local_bh_enable();
934}
935
936static struct xt_counters *alloc_counters(const struct xt_table *table)
937{
938 unsigned int countersize;
939 struct xt_counters *counters;
940 const struct xt_table_info *private = table->private;
941
942 /* We need atomic snapshot of counters: rest doesn't change
943 (other than comefrom, which userspace doesn't care
944 about). */
945 countersize = sizeof(struct xt_counters) * private->number;
946 counters = vmalloc_node(countersize, numa_node_id());
947
948 if (counters == NULL)
949 return ERR_PTR(-ENOMEM);
950
951 get_counters(private, counters);
952
953 return counters;
954}
955
956static int
957copy_entries_to_user(unsigned int total_size,
958 const struct xt_table *table,
959 void __user *userptr)
960{
961 unsigned int off, num;
962 const struct ip6t_entry *e;
963 struct xt_counters *counters;
964 const struct xt_table_info *private = table->private;
965 int ret = 0;
966 const void *loc_cpu_entry;
967
968 counters = alloc_counters(table);
969 if (IS_ERR(counters))
970 return PTR_ERR(counters);
971
972 /* choose the copy that is on our node/cpu, ...
973 * This choice is lazy (because current thread is
974 * allowed to migrate to another cpu)
975 */
976 loc_cpu_entry = private->entries[raw_smp_processor_id()];
977 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
978 ret = -EFAULT;
979 goto free_counters;
980 }
981
982 /* FIXME: use iterator macros --RR */
983 /* ... then go back and fix counters and names */
984 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
985 unsigned int i;
986 const struct ip6t_entry_match *m;
987 const struct ip6t_entry_target *t;
988
989 e = (struct ip6t_entry *)(loc_cpu_entry + off);
990 if (copy_to_user(userptr + off
991 + offsetof(struct ip6t_entry, counters),
992 &counters[num],
993 sizeof(counters[num])) != 0) {
994 ret = -EFAULT;
995 goto free_counters;
996 }
997
998 for (i = sizeof(struct ip6t_entry);
999 i < e->target_offset;
1000 i += m->u.match_size) {
1001 m = (void *)e + i;
1002
1003 if (copy_to_user(userptr + off + i
1004 + offsetof(struct ip6t_entry_match,
1005 u.user.name),
1006 m->u.kernel.match->name,
1007 strlen(m->u.kernel.match->name)+1)
1008 != 0) {
1009 ret = -EFAULT;
1010 goto free_counters;
1011 }
1012 }
1013
1014 t = ip6t_get_target_c(e);
1015 if (copy_to_user(userptr + off + e->target_offset
1016 + offsetof(struct ip6t_entry_target,
1017 u.user.name),
1018 t->u.kernel.target->name,
1019 strlen(t->u.kernel.target->name)+1) != 0) {
1020 ret = -EFAULT;
1021 goto free_counters;
1022 }
1023 }
1024
1025 free_counters:
1026 vfree(counters);
1027 return ret;
1028}
1029
1030#ifdef CONFIG_COMPAT
1031static void compat_standard_from_user(void *dst, const void *src)
1032{
1033 int v = *(compat_int_t *)src;
1034
1035 if (v > 0)
1036 v += xt_compat_calc_jump(AF_INET6, v);
1037 memcpy(dst, &v, sizeof(v));
1038}
1039
1040static int compat_standard_to_user(void __user *dst, const void *src)
1041{
1042 compat_int_t cv = *(int *)src;
1043
1044 if (cv > 0)
1045 cv -= xt_compat_calc_jump(AF_INET6, cv);
1046 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1047}
1048
1049static int compat_calc_entry(const struct ip6t_entry *e,
1050 const struct xt_table_info *info,
1051 const void *base, struct xt_table_info *newinfo)
1052{
1053 const struct xt_entry_match *ematch;
1054 const struct ip6t_entry_target *t;
1055 unsigned int entry_offset;
1056 int off, i, ret;
1057
1058 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1059 entry_offset = (void *)e - base;
1060 xt_ematch_foreach(ematch, e)
1061 off += xt_compat_match_offset(ematch->u.kernel.match);
1062 t = ip6t_get_target_c(e);
1063 off += xt_compat_target_offset(t->u.kernel.target);
1064 newinfo->size -= off;
1065 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1066 if (ret)
1067 return ret;
1068
1069 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1070 if (info->hook_entry[i] &&
1071 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1072 newinfo->hook_entry[i] -= off;
1073 if (info->underflow[i] &&
1074 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1075 newinfo->underflow[i] -= off;
1076 }
1077 return 0;
1078}
1079
1080static int compat_table_info(const struct xt_table_info *info,
1081 struct xt_table_info *newinfo)
1082{
1083 struct ip6t_entry *iter;
1084 void *loc_cpu_entry;
1085 int ret;
1086
1087 if (!newinfo || !info)
1088 return -EINVAL;
1089
1090 /* we dont care about newinfo->entries[] */
1091 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1092 newinfo->initial_entries = 0;
1093 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1094 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1095 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1096 if (ret != 0)
1097 return ret;
1098 }
1099 return 0;
1100}
1101#endif
1102
1103static int get_info(struct net *net, void __user *user,
1104 const int *len, int compat)
1105{
1106 char name[IP6T_TABLE_MAXNAMELEN];
1107 struct xt_table *t;
1108 int ret;
1109
1110 if (*len != sizeof(struct ip6t_getinfo)) {
1111 duprintf("length %u != %zu\n", *len,
1112 sizeof(struct ip6t_getinfo));
1113 return -EINVAL;
1114 }
1115
1116 if (copy_from_user(name, user, sizeof(name)) != 0)
1117 return -EFAULT;
1118
1119 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1120#ifdef CONFIG_COMPAT
1121 if (compat)
1122 xt_compat_lock(AF_INET6);
1123#endif
1124 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1125 "ip6table_%s", name);
1126 if (t && !IS_ERR(t)) {
1127 struct ip6t_getinfo info;
1128 const struct xt_table_info *private = t->private;
1129#ifdef CONFIG_COMPAT
1130 struct xt_table_info tmp;
1131
1132 if (compat) {
1133 ret = compat_table_info(private, &tmp);
1134 xt_compat_flush_offsets(AF_INET6);
1135 private = &tmp;
1136 }
1137#endif
1138 info.valid_hooks = t->valid_hooks;
1139 memcpy(info.hook_entry, private->hook_entry,
1140 sizeof(info.hook_entry));
1141 memcpy(info.underflow, private->underflow,
1142 sizeof(info.underflow));
1143 info.num_entries = private->number;
1144 info.size = private->size;
1145 strcpy(info.name, name);
1146
1147 if (copy_to_user(user, &info, *len) != 0)
1148 ret = -EFAULT;
1149 else
1150 ret = 0;
1151
1152 xt_table_unlock(t);
1153 module_put(t->me);
1154 } else
1155 ret = t ? PTR_ERR(t) : -ENOENT;
1156#ifdef CONFIG_COMPAT
1157 if (compat)
1158 xt_compat_unlock(AF_INET6);
1159#endif
1160 return ret;
1161}
1162
1163static int
1164get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1165 const int *len)
1166{
1167 int ret;
1168 struct ip6t_get_entries get;
1169 struct xt_table *t;
1170
1171 if (*len < sizeof(get)) {
1172 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1173 return -EINVAL;
1174 }
1175 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1176 return -EFAULT;
1177 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1178 duprintf("get_entries: %u != %zu\n",
1179 *len, sizeof(get) + get.size);
1180 return -EINVAL;
1181 }
1182
1183 t = xt_find_table_lock(net, AF_INET6, get.name);
1184 if (t && !IS_ERR(t)) {
1185 struct xt_table_info *private = t->private;
1186 duprintf("t->private->number = %u\n", private->number);
1187 if (get.size == private->size)
1188 ret = copy_entries_to_user(private->size,
1189 t, uptr->entrytable);
1190 else {
1191 duprintf("get_entries: I've got %u not %u!\n",
1192 private->size, get.size);
1193 ret = -EAGAIN;
1194 }
1195 module_put(t->me);
1196 xt_table_unlock(t);
1197 } else
1198 ret = t ? PTR_ERR(t) : -ENOENT;
1199
1200 return ret;
1201}
1202
1203static int
1204__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1205 struct xt_table_info *newinfo, unsigned int num_counters,
1206 void __user *counters_ptr)
1207{
1208 int ret;
1209 struct xt_table *t;
1210 struct xt_table_info *oldinfo;
1211 struct xt_counters *counters;
1212 const void *loc_cpu_old_entry;
1213 struct ip6t_entry *iter;
1214
1215 ret = 0;
1216 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1217 numa_node_id());
1218 if (!counters) {
1219 ret = -ENOMEM;
1220 goto out;
1221 }
1222
1223 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1224 "ip6table_%s", name);
1225 if (!t || IS_ERR(t)) {
1226 ret = t ? PTR_ERR(t) : -ENOENT;
1227 goto free_newinfo_counters_untrans;
1228 }
1229
1230 /* You lied! */
1231 if (valid_hooks != t->valid_hooks) {
1232 duprintf("Valid hook crap: %08X vs %08X\n",
1233 valid_hooks, t->valid_hooks);
1234 ret = -EINVAL;
1235 goto put_module;
1236 }
1237
1238 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1239 if (!oldinfo)
1240 goto put_module;
1241
1242 /* Update module usage count based on number of rules */
1243 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1244 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1245 if ((oldinfo->number > oldinfo->initial_entries) ||
1246 (newinfo->number <= oldinfo->initial_entries))
1247 module_put(t->me);
1248 if ((oldinfo->number > oldinfo->initial_entries) &&
1249 (newinfo->number <= oldinfo->initial_entries))
1250 module_put(t->me);
1251
1252 /* Get the old counters, and synchronize with replace */
1253 get_counters(oldinfo, counters);
1254
1255 /* Decrease module usage counts and free resource */
1256 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1257 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1258 cleanup_entry(iter, net);
1259
1260 xt_free_table_info(oldinfo);
1261 if (copy_to_user(counters_ptr, counters,
1262 sizeof(struct xt_counters) * num_counters) != 0)
1263 ret = -EFAULT;
1264 vfree(counters);
1265 xt_table_unlock(t);
1266 return ret;
1267
1268 put_module:
1269 module_put(t->me);
1270 xt_table_unlock(t);
1271 free_newinfo_counters_untrans:
1272 vfree(counters);
1273 out:
1274 return ret;
1275}
1276
1277static int
1278do_replace(struct net *net, const void __user *user, unsigned int len)
1279{
1280 int ret;
1281 struct ip6t_replace tmp;
1282 struct xt_table_info *newinfo;
1283 void *loc_cpu_entry;
1284 struct ip6t_entry *iter;
1285
1286 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1287 return -EFAULT;
1288
1289 /* overflow check */
1290 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1291 return -ENOMEM;
1292
1293 newinfo = xt_alloc_table_info(tmp.size);
1294 if (!newinfo)
1295 return -ENOMEM;
1296
1297 /* choose the copy that is on our node/cpu */
1298 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1299 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1300 tmp.size) != 0) {
1301 ret = -EFAULT;
1302 goto free_newinfo;
1303 }
1304
1305 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1306 if (ret != 0)
1307 goto free_newinfo;
1308
1309 duprintf("ip_tables: Translated table\n");
1310
1311 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1312 tmp.num_counters, tmp.counters);
1313 if (ret)
1314 goto free_newinfo_untrans;
1315 return 0;
1316
1317 free_newinfo_untrans:
1318 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1319 cleanup_entry(iter, net);
1320 free_newinfo:
1321 xt_free_table_info(newinfo);
1322 return ret;
1323}
1324
1325static int
1326do_add_counters(struct net *net, const void __user *user, unsigned int len,
1327 int compat)
1328{
1329 unsigned int i, curcpu;
1330 struct xt_counters_info tmp;
1331 struct xt_counters *paddc;
1332 unsigned int num_counters;
1333 char *name;
1334 int size;
1335 void *ptmp;
1336 struct xt_table *t;
1337 const struct xt_table_info *private;
1338 int ret = 0;
1339 const void *loc_cpu_entry;
1340 struct ip6t_entry *iter;
1341#ifdef CONFIG_COMPAT
1342 struct compat_xt_counters_info compat_tmp;
1343
1344 if (compat) {
1345 ptmp = &compat_tmp;
1346 size = sizeof(struct compat_xt_counters_info);
1347 } else
1348#endif
1349 {
1350 ptmp = &tmp;
1351 size = sizeof(struct xt_counters_info);
1352 }
1353
1354 if (copy_from_user(ptmp, user, size) != 0)
1355 return -EFAULT;
1356
1357#ifdef CONFIG_COMPAT
1358 if (compat) {
1359 num_counters = compat_tmp.num_counters;
1360 name = compat_tmp.name;
1361 } else
1362#endif
1363 {
1364 num_counters = tmp.num_counters;
1365 name = tmp.name;
1366 }
1367
1368 if (len != size + num_counters * sizeof(struct xt_counters))
1369 return -EINVAL;
1370
1371 paddc = vmalloc_node(len - size, numa_node_id());
1372 if (!paddc)
1373 return -ENOMEM;
1374
1375 if (copy_from_user(paddc, user + size, len - size) != 0) {
1376 ret = -EFAULT;
1377 goto free;
1378 }
1379
1380 t = xt_find_table_lock(net, AF_INET6, name);
1381 if (!t || IS_ERR(t)) {
1382 ret = t ? PTR_ERR(t) : -ENOENT;
1383 goto free;
1384 }
1385
1386
1387 local_bh_disable();
1388 private = t->private;
1389 if (private->number != num_counters) {
1390 ret = -EINVAL;
1391 goto unlock_up_free;
1392 }
1393
1394 i = 0;
1395 /* Choose the copy that is on our node */
1396 curcpu = smp_processor_id();
1397 xt_info_wrlock(curcpu);
1398 loc_cpu_entry = private->entries[curcpu];
1399 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1400 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1401 ++i;
1402 }
1403 xt_info_wrunlock(curcpu);
1404
1405 unlock_up_free:
1406 local_bh_enable();
1407 xt_table_unlock(t);
1408 module_put(t->me);
1409 free:
1410 vfree(paddc);
1411
1412 return ret;
1413}
1414
1415#ifdef CONFIG_COMPAT
1416struct compat_ip6t_replace {
1417 char name[IP6T_TABLE_MAXNAMELEN];
1418 u32 valid_hooks;
1419 u32 num_entries;
1420 u32 size;
1421 u32 hook_entry[NF_INET_NUMHOOKS];
1422 u32 underflow[NF_INET_NUMHOOKS];
1423 u32 num_counters;
1424 compat_uptr_t counters; /* struct ip6t_counters * */
1425 struct compat_ip6t_entry entries[0];
1426};
1427
1428static int
1429compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1430 unsigned int *size, struct xt_counters *counters,
1431 unsigned int i)
1432{
1433 struct ip6t_entry_target *t;
1434 struct compat_ip6t_entry __user *ce;
1435 u_int16_t target_offset, next_offset;
1436 compat_uint_t origsize;
1437 const struct xt_entry_match *ematch;
1438 int ret = 0;
1439
1440 origsize = *size;
1441 ce = (struct compat_ip6t_entry __user *)*dstptr;
1442 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1443 copy_to_user(&ce->counters, &counters[i],
1444 sizeof(counters[i])) != 0)
1445 return -EFAULT;
1446
1447 *dstptr += sizeof(struct compat_ip6t_entry);
1448 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1449
1450 xt_ematch_foreach(ematch, e) {
1451 ret = xt_compat_match_to_user(ematch, dstptr, size);
1452 if (ret != 0)
1453 return ret;
1454 }
1455 target_offset = e->target_offset - (origsize - *size);
1456 t = ip6t_get_target(e);
1457 ret = xt_compat_target_to_user(t, dstptr, size);
1458 if (ret)
1459 return ret;
1460 next_offset = e->next_offset - (origsize - *size);
1461 if (put_user(target_offset, &ce->target_offset) != 0 ||
1462 put_user(next_offset, &ce->next_offset) != 0)
1463 return -EFAULT;
1464 return 0;
1465}
1466
1467static int
1468compat_find_calc_match(struct ip6t_entry_match *m,
1469 const char *name,
1470 const struct ip6t_ip6 *ipv6,
1471 unsigned int hookmask,
1472 int *size)
1473{
1474 struct xt_match *match;
1475
1476 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1477 m->u.user.revision);
1478 if (IS_ERR(match)) {
1479 duprintf("compat_check_calc_match: `%s' not found\n",
1480 m->u.user.name);
1481 return PTR_ERR(match);
1482 }
1483 m->u.kernel.match = match;
1484 *size += xt_compat_match_offset(match);
1485 return 0;
1486}
1487
1488static void compat_release_entry(struct compat_ip6t_entry *e)
1489{
1490 struct ip6t_entry_target *t;
1491 struct xt_entry_match *ematch;
1492
1493 /* Cleanup all matches */
1494 xt_ematch_foreach(ematch, e)
1495 module_put(ematch->u.kernel.match->me);
1496 t = compat_ip6t_get_target(e);
1497 module_put(t->u.kernel.target->me);
1498}
1499
1500static int
1501check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1502 struct xt_table_info *newinfo,
1503 unsigned int *size,
1504 const unsigned char *base,
1505 const unsigned char *limit,
1506 const unsigned int *hook_entries,
1507 const unsigned int *underflows,
1508 const char *name)
1509{
1510 struct xt_entry_match *ematch;
1511 struct ip6t_entry_target *t;
1512 struct xt_target *target;
1513 unsigned int entry_offset;
1514 unsigned int j;
1515 int ret, off, h;
1516
1517 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1518 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1519 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1520 duprintf("Bad offset %p, limit = %p\n", e, limit);
1521 return -EINVAL;
1522 }
1523
1524 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1525 sizeof(struct compat_xt_entry_target)) {
1526 duprintf("checking: element %p size %u\n",
1527 e, e->next_offset);
1528 return -EINVAL;
1529 }
1530
1531 /* For purposes of check_entry casting the compat entry is fine */
1532 ret = check_entry((struct ip6t_entry *)e, name);
1533 if (ret)
1534 return ret;
1535
1536 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1537 entry_offset = (void *)e - (void *)base;
1538 j = 0;
1539 xt_ematch_foreach(ematch, e) {
1540 ret = compat_find_calc_match(ematch, name,
1541 &e->ipv6, e->comefrom, &off);
1542 if (ret != 0)
1543 goto release_matches;
1544 ++j;
1545 }
1546
1547 t = compat_ip6t_get_target(e);
1548 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1549 t->u.user.revision);
1550 if (IS_ERR(target)) {
1551 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1552 t->u.user.name);
1553 ret = PTR_ERR(target);
1554 goto release_matches;
1555 }
1556 t->u.kernel.target = target;
1557
1558 off += xt_compat_target_offset(target);
1559 *size += off;
1560 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1561 if (ret)
1562 goto out;
1563
1564 /* Check hooks & underflows */
1565 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1566 if ((unsigned char *)e - base == hook_entries[h])
1567 newinfo->hook_entry[h] = hook_entries[h];
1568 if ((unsigned char *)e - base == underflows[h])
1569 newinfo->underflow[h] = underflows[h];
1570 }
1571
1572 /* Clear counters and comefrom */
1573 memset(&e->counters, 0, sizeof(e->counters));
1574 e->comefrom = 0;
1575 return 0;
1576
1577out:
1578 module_put(t->u.kernel.target->me);
1579release_matches:
1580 xt_ematch_foreach(ematch, e) {
1581 if (j-- == 0)
1582 break;
1583 module_put(ematch->u.kernel.match->me);
1584 }
1585 return ret;
1586}
1587
1588static int
1589compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1590 unsigned int *size, const char *name,
1591 struct xt_table_info *newinfo, unsigned char *base)
1592{
1593 struct ip6t_entry_target *t;
1594 struct xt_target *target;
1595 struct ip6t_entry *de;
1596 unsigned int origsize;
1597 int ret, h;
1598 struct xt_entry_match *ematch;
1599
1600 ret = 0;
1601 origsize = *size;
1602 de = (struct ip6t_entry *)*dstptr;
1603 memcpy(de, e, sizeof(struct ip6t_entry));
1604 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1605
1606 *dstptr += sizeof(struct ip6t_entry);
1607 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1608
1609 xt_ematch_foreach(ematch, e) {
1610 ret = xt_compat_match_from_user(ematch, dstptr, size);
1611 if (ret != 0)
1612 return ret;
1613 }
1614 de->target_offset = e->target_offset - (origsize - *size);
1615 t = compat_ip6t_get_target(e);
1616 target = t->u.kernel.target;
1617 xt_compat_target_from_user(t, dstptr, size);
1618
1619 de->next_offset = e->next_offset - (origsize - *size);
1620 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1621 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1622 newinfo->hook_entry[h] -= origsize - *size;
1623 if ((unsigned char *)de - base < newinfo->underflow[h])
1624 newinfo->underflow[h] -= origsize - *size;
1625 }
1626 return ret;
1627}
1628
1629static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1630 const char *name)
1631{
1632 unsigned int j;
1633 int ret = 0;
1634 struct xt_mtchk_param mtpar;
1635 struct xt_entry_match *ematch;
1636
1637 j = 0;
1638 mtpar.net = net;
1639 mtpar.table = name;
1640 mtpar.entryinfo = &e->ipv6;
1641 mtpar.hook_mask = e->comefrom;
1642 mtpar.family = NFPROTO_IPV6;
1643 xt_ematch_foreach(ematch, e) {
1644 ret = check_match(ematch, &mtpar);
1645 if (ret != 0)
1646 goto cleanup_matches;
1647 ++j;
1648 }
1649
1650 ret = check_target(e, net, name);
1651 if (ret)
1652 goto cleanup_matches;
1653 return 0;
1654
1655 cleanup_matches:
1656 xt_ematch_foreach(ematch, e) {
1657 if (j-- == 0)
1658 break;
1659 cleanup_match(ematch, net);
1660 }
1661 return ret;
1662}
1663
1664static int
1665translate_compat_table(struct net *net,
1666 const char *name,
1667 unsigned int valid_hooks,
1668 struct xt_table_info **pinfo,
1669 void **pentry0,
1670 unsigned int total_size,
1671 unsigned int number,
1672 unsigned int *hook_entries,
1673 unsigned int *underflows)
1674{
1675 unsigned int i, j;
1676 struct xt_table_info *newinfo, *info;
1677 void *pos, *entry0, *entry1;
1678 struct compat_ip6t_entry *iter0;
1679 struct ip6t_entry *iter1;
1680 unsigned int size;
1681 int ret = 0;
1682
1683 info = *pinfo;
1684 entry0 = *pentry0;
1685 size = total_size;
1686 info->number = number;
1687
1688 /* Init all hooks to impossible value. */
1689 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1690 info->hook_entry[i] = 0xFFFFFFFF;
1691 info->underflow[i] = 0xFFFFFFFF;
1692 }
1693
1694 duprintf("translate_compat_table: size %u\n", info->size);
1695 j = 0;
1696 xt_compat_lock(AF_INET6);
1697 /* Walk through entries, checking offsets. */
1698 xt_entry_foreach(iter0, entry0, total_size) {
1699 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1700 entry0,
1701 entry0 + total_size,
1702 hook_entries,
1703 underflows,
1704 name);
1705 if (ret != 0)
1706 goto out_unlock;
1707 ++j;
1708 }
1709
1710 ret = -EINVAL;
1711 if (j != number) {
1712 duprintf("translate_compat_table: %u not %u entries\n",
1713 j, number);
1714 goto out_unlock;
1715 }
1716
1717 /* Check hooks all assigned */
1718 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1719 /* Only hooks which are valid */
1720 if (!(valid_hooks & (1 << i)))
1721 continue;
1722 if (info->hook_entry[i] == 0xFFFFFFFF) {
1723 duprintf("Invalid hook entry %u %u\n",
1724 i, hook_entries[i]);
1725 goto out_unlock;
1726 }
1727 if (info->underflow[i] == 0xFFFFFFFF) {
1728 duprintf("Invalid underflow %u %u\n",
1729 i, underflows[i]);
1730 goto out_unlock;
1731 }
1732 }
1733
1734 ret = -ENOMEM;
1735 newinfo = xt_alloc_table_info(size);
1736 if (!newinfo)
1737 goto out_unlock;
1738
1739 newinfo->number = number;
1740 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1741 newinfo->hook_entry[i] = info->hook_entry[i];
1742 newinfo->underflow[i] = info->underflow[i];
1743 }
1744 entry1 = newinfo->entries[raw_smp_processor_id()];
1745 pos = entry1;
1746 size = total_size;
1747 xt_entry_foreach(iter0, entry0, total_size) {
1748 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1749 name, newinfo, entry1);
1750 if (ret != 0)
1751 break;
1752 }
1753 xt_compat_flush_offsets(AF_INET6);
1754 xt_compat_unlock(AF_INET6);
1755 if (ret)
1756 goto free_newinfo;
1757
1758 ret = -ELOOP;
1759 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1760 goto free_newinfo;
1761
1762 i = 0;
1763 xt_entry_foreach(iter1, entry1, newinfo->size) {
1764 ret = compat_check_entry(iter1, net, name);
1765 if (ret != 0)
1766 break;
1767 ++i;
1768 }
1769 if (ret) {
1770 /*
1771 * The first i matches need cleanup_entry (calls ->destroy)
1772 * because they had called ->check already. The other j-i
1773 * entries need only release.
1774 */
1775 int skip = i;
1776 j -= i;
1777 xt_entry_foreach(iter0, entry0, newinfo->size) {
1778 if (skip-- > 0)
1779 continue;
1780 if (j-- == 0)
1781 break;
1782 compat_release_entry(iter0);
1783 }
1784 xt_entry_foreach(iter1, entry1, newinfo->size) {
1785 if (i-- == 0)
1786 break;
1787 cleanup_entry(iter1, net);
1788 }
1789 xt_free_table_info(newinfo);
1790 return ret;
1791 }
1792
1793 /* And one copy for every other CPU */
1794 for_each_possible_cpu(i)
1795 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1796 memcpy(newinfo->entries[i], entry1, newinfo->size);
1797
1798 *pinfo = newinfo;
1799 *pentry0 = entry1;
1800 xt_free_table_info(info);
1801 return 0;
1802
1803free_newinfo:
1804 xt_free_table_info(newinfo);
1805out:
1806 xt_entry_foreach(iter0, entry0, total_size) {
1807 if (j-- == 0)
1808 break;
1809 compat_release_entry(iter0);
1810 }
1811 return ret;
1812out_unlock:
1813 xt_compat_flush_offsets(AF_INET6);
1814 xt_compat_unlock(AF_INET6);
1815 goto out;
1816}
1817
1818static int
1819compat_do_replace(struct net *net, void __user *user, unsigned int len)
1820{
1821 int ret;
1822 struct compat_ip6t_replace tmp;
1823 struct xt_table_info *newinfo;
1824 void *loc_cpu_entry;
1825 struct ip6t_entry *iter;
1826
1827 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1828 return -EFAULT;
1829
1830 /* overflow check */
1831 if (tmp.size >= INT_MAX / num_possible_cpus())
1832 return -ENOMEM;
1833 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1834 return -ENOMEM;
1835
1836 newinfo = xt_alloc_table_info(tmp.size);
1837 if (!newinfo)
1838 return -ENOMEM;
1839
1840 /* choose the copy that is on our node/cpu */
1841 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1842 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1843 tmp.size) != 0) {
1844 ret = -EFAULT;
1845 goto free_newinfo;
1846 }
1847
1848 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1849 &newinfo, &loc_cpu_entry, tmp.size,
1850 tmp.num_entries, tmp.hook_entry,
1851 tmp.underflow);
1852 if (ret != 0)
1853 goto free_newinfo;
1854
1855 duprintf("compat_do_replace: Translated table\n");
1856
1857 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1858 tmp.num_counters, compat_ptr(tmp.counters));
1859 if (ret)
1860 goto free_newinfo_untrans;
1861 return 0;
1862
1863 free_newinfo_untrans:
1864 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1865 cleanup_entry(iter, net);
1866 free_newinfo:
1867 xt_free_table_info(newinfo);
1868 return ret;
1869}
1870
1871static int
1872compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1873 unsigned int len)
1874{
1875 int ret;
1876
1877 if (!capable(CAP_NET_ADMIN))
1878 return -EPERM;
1879
1880 switch (cmd) {
1881 case IP6T_SO_SET_REPLACE:
1882 ret = compat_do_replace(sock_net(sk), user, len);
1883 break;
1884
1885 case IP6T_SO_SET_ADD_COUNTERS:
1886 ret = do_add_counters(sock_net(sk), user, len, 1);
1887 break;
1888
1889 default:
1890 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1891 ret = -EINVAL;
1892 }
1893
1894 return ret;
1895}
1896
1897struct compat_ip6t_get_entries {
1898 char name[IP6T_TABLE_MAXNAMELEN];
1899 compat_uint_t size;
1900 struct compat_ip6t_entry entrytable[0];
1901};
1902
1903static int
1904compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1905 void __user *userptr)
1906{
1907 struct xt_counters *counters;
1908 const struct xt_table_info *private = table->private;
1909 void __user *pos;
1910 unsigned int size;
1911 int ret = 0;
1912 const void *loc_cpu_entry;
1913 unsigned int i = 0;
1914 struct ip6t_entry *iter;
1915
1916 counters = alloc_counters(table);
1917 if (IS_ERR(counters))
1918 return PTR_ERR(counters);
1919
1920 /* choose the copy that is on our node/cpu, ...
1921 * This choice is lazy (because current thread is
1922 * allowed to migrate to another cpu)
1923 */
1924 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1925 pos = userptr;
1926 size = total_size;
1927 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1928 ret = compat_copy_entry_to_user(iter, &pos,
1929 &size, counters, i++);
1930 if (ret != 0)
1931 break;
1932 }
1933
1934 vfree(counters);
1935 return ret;
1936}
1937
1938static int
1939compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1940 int *len)
1941{
1942 int ret;
1943 struct compat_ip6t_get_entries get;
1944 struct xt_table *t;
1945
1946 if (*len < sizeof(get)) {
1947 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1948 return -EINVAL;
1949 }
1950
1951 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1952 return -EFAULT;
1953
1954 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1955 duprintf("compat_get_entries: %u != %zu\n",
1956 *len, sizeof(get) + get.size);
1957 return -EINVAL;
1958 }
1959
1960 xt_compat_lock(AF_INET6);
1961 t = xt_find_table_lock(net, AF_INET6, get.name);
1962 if (t && !IS_ERR(t)) {
1963 const struct xt_table_info *private = t->private;
1964 struct xt_table_info info;
1965 duprintf("t->private->number = %u\n", private->number);
1966 ret = compat_table_info(private, &info);
1967 if (!ret && get.size == info.size) {
1968 ret = compat_copy_entries_to_user(private->size,
1969 t, uptr->entrytable);
1970 } else if (!ret) {
1971 duprintf("compat_get_entries: I've got %u not %u!\n",
1972 private->size, get.size);
1973 ret = -EAGAIN;
1974 }
1975 xt_compat_flush_offsets(AF_INET6);
1976 module_put(t->me);
1977 xt_table_unlock(t);
1978 } else
1979 ret = t ? PTR_ERR(t) : -ENOENT;
1980
1981 xt_compat_unlock(AF_INET6);
1982 return ret;
1983}
1984
1985static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1986
1987static int
1988compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1989{
1990 int ret;
1991
1992 if (!capable(CAP_NET_ADMIN))
1993 return -EPERM;
1994
1995 switch (cmd) {
1996 case IP6T_SO_GET_INFO:
1997 ret = get_info(sock_net(sk), user, len, 1);
1998 break;
1999 case IP6T_SO_GET_ENTRIES:
2000 ret = compat_get_entries(sock_net(sk), user, len);
2001 break;
2002 default:
2003 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2004 }
2005 return ret;
2006}
2007#endif
2008
2009static int
2010do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2011{
2012 int ret;
2013
2014 if (!capable(CAP_NET_ADMIN))
2015 return -EPERM;
2016
2017 switch (cmd) {
2018 case IP6T_SO_SET_REPLACE:
2019 ret = do_replace(sock_net(sk), user, len);
2020 break;
2021
2022 case IP6T_SO_SET_ADD_COUNTERS:
2023 ret = do_add_counters(sock_net(sk), user, len, 0);
2024 break;
2025
2026 default:
2027 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2028 ret = -EINVAL;
2029 }
2030
2031 return ret;
2032}
2033
2034static int
2035do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2036{
2037 int ret;
2038
2039 if (!capable(CAP_NET_ADMIN))
2040 return -EPERM;
2041
2042 switch (cmd) {
2043 case IP6T_SO_GET_INFO:
2044 ret = get_info(sock_net(sk), user, len, 0);
2045 break;
2046
2047 case IP6T_SO_GET_ENTRIES:
2048 ret = get_entries(sock_net(sk), user, len);
2049 break;
2050
2051 case IP6T_SO_GET_REVISION_MATCH:
2052 case IP6T_SO_GET_REVISION_TARGET: {
2053 struct ip6t_get_revision rev;
2054 int target;
2055
2056 if (*len != sizeof(rev)) {
2057 ret = -EINVAL;
2058 break;
2059 }
2060 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2061 ret = -EFAULT;
2062 break;
2063 }
2064
2065 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2066 target = 1;
2067 else
2068 target = 0;
2069
2070 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2071 rev.revision,
2072 target, &ret),
2073 "ip6t_%s", rev.name);
2074 break;
2075 }
2076
2077 default:
2078 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2079 ret = -EINVAL;
2080 }
2081
2082 return ret;
2083}
2084
2085struct xt_table *ip6t_register_table(struct net *net,
2086 const struct xt_table *table,
2087 const struct ip6t_replace *repl)
2088{
2089 int ret;
2090 struct xt_table_info *newinfo;
2091 struct xt_table_info bootstrap = {0};
2092 void *loc_cpu_entry;
2093 struct xt_table *new_table;
2094
2095 newinfo = xt_alloc_table_info(repl->size);
2096 if (!newinfo) {
2097 ret = -ENOMEM;
2098 goto out;
2099 }
2100
2101 /* choose the copy on our node/cpu, but dont care about preemption */
2102 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2103 memcpy(loc_cpu_entry, repl->entries, repl->size);
2104
2105 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2106 if (ret != 0)
2107 goto out_free;
2108
2109 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2110 if (IS_ERR(new_table)) {
2111 ret = PTR_ERR(new_table);
2112 goto out_free;
2113 }
2114 return new_table;
2115
2116out_free:
2117 xt_free_table_info(newinfo);
2118out:
2119 return ERR_PTR(ret);
2120}
2121
2122void ip6t_unregister_table(struct net *net, struct xt_table *table)
2123{
2124 struct xt_table_info *private;
2125 void *loc_cpu_entry;
2126 struct module *table_owner = table->me;
2127 struct ip6t_entry *iter;
2128
2129 private = xt_unregister_table(table);
2130
2131 /* Decrease module usage counts and free resources */
2132 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2133 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2134 cleanup_entry(iter, net);
2135 if (private->number > private->initial_entries)
2136 module_put(table_owner);
2137 xt_free_table_info(private);
2138}
2139
2140/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2141static inline bool
2142icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2143 u_int8_t type, u_int8_t code,
2144 bool invert)
2145{
2146 return (type == test_type && code >= min_code && code <= max_code)
2147 ^ invert;
2148}
2149
2150static bool
2151icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2152{
2153 const struct icmp6hdr *ic;
2154 struct icmp6hdr _icmph;
2155 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2156
2157 /* Must not be a fragment. */
2158 if (par->fragoff != 0)
2159 return false;
2160
2161 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2162 if (ic == NULL) {
2163 /* We've been asked to examine this packet, and we
2164 * can't. Hence, no choice but to drop.
2165 */
2166 duprintf("Dropping evil ICMP tinygram.\n");
2167 par->hotdrop = true;
2168 return false;
2169 }
2170
2171 return icmp6_type_code_match(icmpinfo->type,
2172 icmpinfo->code[0],
2173 icmpinfo->code[1],
2174 ic->icmp6_type, ic->icmp6_code,
2175 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2176}
2177
2178/* Called when user tries to insert an entry of this type. */
2179static int icmp6_checkentry(const struct xt_mtchk_param *par)
2180{
2181 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2182
2183 /* Must specify no unknown invflags */
2184 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2185}
2186
2187/* The built-in targets: standard (NULL) and error. */
2188static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2189 {
2190 .name = IP6T_STANDARD_TARGET,
2191 .targetsize = sizeof(int),
2192 .family = NFPROTO_IPV6,
2193#ifdef CONFIG_COMPAT
2194 .compatsize = sizeof(compat_int_t),
2195 .compat_from_user = compat_standard_from_user,
2196 .compat_to_user = compat_standard_to_user,
2197#endif
2198 },
2199 {
2200 .name = IP6T_ERROR_TARGET,
2201 .target = ip6t_error,
2202 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2203 .family = NFPROTO_IPV6,
2204 },
2205};
2206
2207static struct nf_sockopt_ops ip6t_sockopts = {
2208 .pf = PF_INET6,
2209 .set_optmin = IP6T_BASE_CTL,
2210 .set_optmax = IP6T_SO_SET_MAX+1,
2211 .set = do_ip6t_set_ctl,
2212#ifdef CONFIG_COMPAT
2213 .compat_set = compat_do_ip6t_set_ctl,
2214#endif
2215 .get_optmin = IP6T_BASE_CTL,
2216 .get_optmax = IP6T_SO_GET_MAX+1,
2217 .get = do_ip6t_get_ctl,
2218#ifdef CONFIG_COMPAT
2219 .compat_get = compat_do_ip6t_get_ctl,
2220#endif
2221 .owner = THIS_MODULE,
2222};
2223
2224static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2225 {
2226 .name = "icmp6",
2227 .match = icmp6_match,
2228 .matchsize = sizeof(struct ip6t_icmp),
2229 .checkentry = icmp6_checkentry,
2230 .proto = IPPROTO_ICMPV6,
2231 .family = NFPROTO_IPV6,
2232 },
2233};
2234
2235static int __net_init ip6_tables_net_init(struct net *net)
2236{
2237 return xt_proto_init(net, NFPROTO_IPV6);
2238}
2239
2240static void __net_exit ip6_tables_net_exit(struct net *net)
2241{
2242 xt_proto_fini(net, NFPROTO_IPV6);
2243}
2244
2245static struct pernet_operations ip6_tables_net_ops = {
2246 .init = ip6_tables_net_init,
2247 .exit = ip6_tables_net_exit,
2248};
2249
2250static int __init ip6_tables_init(void)
2251{
2252 int ret;
2253
2254 ret = register_pernet_subsys(&ip6_tables_net_ops);
2255 if (ret < 0)
2256 goto err1;
2257
2258 /* Noone else will be downing sem now, so we won't sleep */
2259 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2260 if (ret < 0)
2261 goto err2;
2262 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2263 if (ret < 0)
2264 goto err4;
2265
2266 /* Register setsockopt */
2267 ret = nf_register_sockopt(&ip6t_sockopts);
2268 if (ret < 0)
2269 goto err5;
2270
2271 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2272 return 0;
2273
2274err5:
2275 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2276err4:
2277 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2278err2:
2279 unregister_pernet_subsys(&ip6_tables_net_ops);
2280err1:
2281 return ret;
2282}
2283
2284static void __exit ip6_tables_fini(void)
2285{
2286 nf_unregister_sockopt(&ip6t_sockopts);
2287
2288 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2289 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2290 unregister_pernet_subsys(&ip6_tables_net_ops);
2291}
2292
2293/*
2294 * find the offset to specified header or the protocol number of last header
2295 * if target < 0. "last header" is transport protocol header, ESP, or
2296 * "No next header".
2297 *
2298 * If target header is found, its offset is set in *offset and return protocol
2299 * number. Otherwise, return -1.
2300 *
2301 * If the first fragment doesn't contain the final protocol header or
2302 * NEXTHDR_NONE it is considered invalid.
2303 *
2304 * Note that non-1st fragment is special case that "the protocol number
2305 * of last header" is "next header" field in Fragment header. In this case,
2306 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2307 * isn't NULL.
2308 *
2309 */
2310int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2311 int target, unsigned short *fragoff)
2312{
2313 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2314 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2315 unsigned int len = skb->len - start;
2316
2317 if (fragoff)
2318 *fragoff = 0;
2319
2320 while (nexthdr != target) {
2321 struct ipv6_opt_hdr _hdr, *hp;
2322 unsigned int hdrlen;
2323
2324 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2325 if (target < 0)
2326 break;
2327 return -ENOENT;
2328 }
2329
2330 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2331 if (hp == NULL)
2332 return -EBADMSG;
2333 if (nexthdr == NEXTHDR_FRAGMENT) {
2334 unsigned short _frag_off;
2335 __be16 *fp;
2336 fp = skb_header_pointer(skb,
2337 start+offsetof(struct frag_hdr,
2338 frag_off),
2339 sizeof(_frag_off),
2340 &_frag_off);
2341 if (fp == NULL)
2342 return -EBADMSG;
2343
2344 _frag_off = ntohs(*fp) & ~0x7;
2345 if (_frag_off) {
2346 if (target < 0 &&
2347 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2348 hp->nexthdr == NEXTHDR_NONE)) {
2349 if (fragoff)
2350 *fragoff = _frag_off;
2351 return hp->nexthdr;
2352 }
2353 return -ENOENT;
2354 }
2355 hdrlen = 8;
2356 } else if (nexthdr == NEXTHDR_AUTH)
2357 hdrlen = (hp->hdrlen + 2) << 2;
2358 else
2359 hdrlen = ipv6_optlen(hp);
2360
2361 nexthdr = hp->nexthdr;
2362 len -= hdrlen;
2363 start += hdrlen;
2364 }
2365
2366 *offset = start;
2367 return nexthdr;
2368}
2369
2370EXPORT_SYMBOL(ip6t_register_table);
2371EXPORT_SYMBOL(ip6t_unregister_table);
2372EXPORT_SYMBOL(ip6t_do_table);
2373EXPORT_SYMBOL(ip6t_ext_hdr);
2374EXPORT_SYMBOL(ipv6_find_hdr);
2375
2376module_init(ip6_tables_init);
2377module_exit(ip6_tables_fini);
This page took 0.030814 seconds and 5 git commands to generate.