netfilter: nf_conntrack: add support for "conntrack zones"
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
56 do { \
57 if (!(x)) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
60 } while(0)
61 #else
62 #define IP_NF_ASSERT(x)
63 #endif
64
65 #if 0
66 /* All the better to debug you with... */
67 #define static
68 #define inline
69 #endif
70
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
72 {
73 return xt_alloc_initial_table(ip6t, IP6T);
74 }
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
76
77 /*
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
83
84 Hence the start of any table is given by get_table() below. */
85
86 /* Check for an extension */
87 int
88 ip6t_ext_hdr(u8 nexthdr)
89 {
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
97 }
98
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
101 static inline bool
102 ip6_packet_match(const struct sk_buff *skb,
103 const char *indev,
104 const char *outdev,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
108 {
109 unsigned long ret;
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
111
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
113
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
119 /*
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
126 return false;
127 }
128
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
130
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
135 return false;
136 }
137
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
139
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
144 return false;
145 }
146
147 /* ... might want to do something with class and flowlabel here ... */
148
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
151 int protohdr;
152 unsigned short _frag_off;
153
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
155 if (protohdr < 0) {
156 if (_frag_off == 0)
157 *hotdrop = true;
158 return false;
159 }
160 *fragoff = _frag_off;
161
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
163 protohdr,
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
165 ip6info->proto);
166
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
169 return false;
170 }
171 return true;
172 }
173
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
177 return false;
178 }
179 return true;
180 }
181
182 /* should be ip6 safe */
183 static bool
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
185 {
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
189 return false;
190 }
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
194 return false;
195 }
196 return true;
197 }
198
199 static unsigned int
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
201 {
202 if (net_ratelimit())
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
205
206 return NF_DROP;
207 }
208
209 /* Performance critical - called for every packet */
210 static inline bool
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
213 {
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
216
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
219 return true;
220 else
221 return false;
222 }
223
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
226 {
227 return (struct ip6t_entry *)(base + offset);
228 }
229
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
233 {
234 static const struct ip6t_ip6 uncond;
235
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
237 }
238
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
241 {
242 return ip6t_get_target((struct ip6t_entry *)e);
243 }
244
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
254 };
255
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
260 };
261
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
266 };
267
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
270 .u = {
271 .log = {
272 .level = 4,
273 .logflags = NF_LOG_MASK,
274 },
275 },
276 };
277
278 /* Mildly perf critical (only if packet tracing is on) */
279 static inline int
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
283 {
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
285
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
289 (*rulenum) = 0;
290 } else if (s == e) {
291 (*rulenum)++;
292
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
296 t->verdict < 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
302 }
303 return 1;
304 } else
305 (*rulenum)++;
306
307 return 0;
308 }
309
310 static void trace_packet(const struct sk_buff *skb,
311 unsigned int hook,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
317 {
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 unsigned int rulenum = 0;
322
323 table_base = private->entries[smp_processor_id()];
324 root = get_entry(table_base, private->hook_entry[hook]);
325
326 hookname = chainname = hooknames[hook];
327 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
328
329 IP6T_ENTRY_ITERATE(root,
330 private->size - private->hook_entry[hook],
331 get_chainname_rulenum,
332 e, hookname, &chainname, &comment, &rulenum);
333
334 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
335 "TRACE: %s:%s:%s:%u ",
336 tablename, chainname, comment, rulenum);
337 }
338 #endif
339
340 static inline __pure struct ip6t_entry *
341 ip6t_next_entry(const struct ip6t_entry *entry)
342 {
343 return (void *)entry + entry->next_offset;
344 }
345
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
347 unsigned int
348 ip6t_do_table(struct sk_buff *skb,
349 unsigned int hook,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
353 {
354 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
355
356 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
357 bool hotdrop = false;
358 /* Initializing verdict to NF_DROP keeps gcc happy. */
359 unsigned int verdict = NF_DROP;
360 const char *indev, *outdev;
361 const void *table_base;
362 struct ip6t_entry *e, *back;
363 const struct xt_table_info *private;
364 struct xt_match_param mtpar;
365 struct xt_target_param tgpar;
366
367 /* Initialization */
368 indev = in ? in->name : nulldevname;
369 outdev = out ? out->name : nulldevname;
370 /* We handle fragments by dealing with the first fragment as
371 * if it was a normal packet. All other fragments are treated
372 * normally, except that they will NEVER match rules that ask
373 * things we don't know, ie. tcp syn flag or ports). If the
374 * rule is also a fragment-specific rule, non-fragments won't
375 * match it. */
376 mtpar.hotdrop = &hotdrop;
377 mtpar.in = tgpar.in = in;
378 mtpar.out = tgpar.out = out;
379 mtpar.family = tgpar.family = NFPROTO_IPV6;
380 mtpar.hooknum = tgpar.hooknum = hook;
381
382 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
383
384 xt_info_rdlock_bh();
385 private = table->private;
386 table_base = private->entries[smp_processor_id()];
387
388 e = get_entry(table_base, private->hook_entry[hook]);
389
390 /* For return from builtin chain */
391 back = get_entry(table_base, private->underflow[hook]);
392
393 do {
394 const struct ip6t_entry_target *t;
395
396 IP_NF_ASSERT(e);
397 IP_NF_ASSERT(back);
398 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
399 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
400 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
401 e = ip6t_next_entry(e);
402 continue;
403 }
404
405 ADD_COUNTER(e->counters,
406 ntohs(ipv6_hdr(skb)->payload_len) +
407 sizeof(struct ipv6hdr), 1);
408
409 t = ip6t_get_target_c(e);
410 IP_NF_ASSERT(t->u.kernel.target);
411
412 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
413 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
414 /* The packet is traced: log it */
415 if (unlikely(skb->nf_trace))
416 trace_packet(skb, hook, in, out,
417 table->name, private, e);
418 #endif
419 /* Standard target? */
420 if (!t->u.kernel.target->target) {
421 int v;
422
423 v = ((struct ip6t_standard_target *)t)->verdict;
424 if (v < 0) {
425 /* Pop from stack? */
426 if (v != IP6T_RETURN) {
427 verdict = (unsigned)(-v) - 1;
428 break;
429 }
430 e = back;
431 back = get_entry(table_base, back->comefrom);
432 continue;
433 }
434 if (table_base + v != ip6t_next_entry(e) &&
435 !(e->ipv6.flags & IP6T_F_GOTO)) {
436 /* Save old back ptr in next entry */
437 struct ip6t_entry *next = ip6t_next_entry(e);
438 next->comefrom = (void *)back - table_base;
439 /* set back pointer to next entry */
440 back = next;
441 }
442
443 e = get_entry(table_base, v);
444 continue;
445 }
446
447 /* Targets which reenter must return
448 abs. verdicts */
449 tgpar.target = t->u.kernel.target;
450 tgpar.targinfo = t->data;
451
452 #ifdef CONFIG_NETFILTER_DEBUG
453 tb_comefrom = 0xeeeeeeec;
454 #endif
455 verdict = t->u.kernel.target->target(skb, &tgpar);
456
457 #ifdef CONFIG_NETFILTER_DEBUG
458 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
459 printk("Target %s reentered!\n",
460 t->u.kernel.target->name);
461 verdict = NF_DROP;
462 }
463 tb_comefrom = 0x57acc001;
464 #endif
465 if (verdict == IP6T_CONTINUE)
466 e = ip6t_next_entry(e);
467 else
468 /* Verdict */
469 break;
470 } while (!hotdrop);
471
472 #ifdef CONFIG_NETFILTER_DEBUG
473 tb_comefrom = NETFILTER_LINK_POISON;
474 #endif
475 xt_info_rdunlock_bh();
476
477 #ifdef DEBUG_ALLOW_ALL
478 return NF_ACCEPT;
479 #else
480 if (hotdrop)
481 return NF_DROP;
482 else return verdict;
483 #endif
484
485 #undef tb_comefrom
486 }
487
488 /* Figures out from what hook each rule can be called: returns 0 if
489 there are loops. Puts hook bitmask in comefrom. */
490 static int
491 mark_source_chains(const struct xt_table_info *newinfo,
492 unsigned int valid_hooks, void *entry0)
493 {
494 unsigned int hook;
495
496 /* No recursion; use packet counter to save back ptrs (reset
497 to 0 as we leave), and comefrom to save source hook bitmask */
498 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
499 unsigned int pos = newinfo->hook_entry[hook];
500 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
501
502 if (!(valid_hooks & (1 << hook)))
503 continue;
504
505 /* Set initial back pointer. */
506 e->counters.pcnt = pos;
507
508 for (;;) {
509 const struct ip6t_standard_target *t
510 = (void *)ip6t_get_target_c(e);
511 int visited = e->comefrom & (1 << hook);
512
513 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
514 printk("iptables: loop hook %u pos %u %08X.\n",
515 hook, pos, e->comefrom);
516 return 0;
517 }
518 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
519
520 /* Unconditional return/END. */
521 if ((e->target_offset == sizeof(struct ip6t_entry) &&
522 (strcmp(t->target.u.user.name,
523 IP6T_STANDARD_TARGET) == 0) &&
524 t->verdict < 0 &&
525 unconditional(&e->ipv6)) || visited) {
526 unsigned int oldpos, size;
527
528 if ((strcmp(t->target.u.user.name,
529 IP6T_STANDARD_TARGET) == 0) &&
530 t->verdict < -NF_MAX_VERDICT - 1) {
531 duprintf("mark_source_chains: bad "
532 "negative verdict (%i)\n",
533 t->verdict);
534 return 0;
535 }
536
537 /* Return: backtrack through the last
538 big jump. */
539 do {
540 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
541 #ifdef DEBUG_IP_FIREWALL_USER
542 if (e->comefrom
543 & (1 << NF_INET_NUMHOOKS)) {
544 duprintf("Back unset "
545 "on hook %u "
546 "rule %u\n",
547 hook, pos);
548 }
549 #endif
550 oldpos = pos;
551 pos = e->counters.pcnt;
552 e->counters.pcnt = 0;
553
554 /* We're at the start. */
555 if (pos == oldpos)
556 goto next;
557
558 e = (struct ip6t_entry *)
559 (entry0 + pos);
560 } while (oldpos == pos + e->next_offset);
561
562 /* Move along one */
563 size = e->next_offset;
564 e = (struct ip6t_entry *)
565 (entry0 + pos + size);
566 e->counters.pcnt = pos;
567 pos += size;
568 } else {
569 int newpos = t->verdict;
570
571 if (strcmp(t->target.u.user.name,
572 IP6T_STANDARD_TARGET) == 0 &&
573 newpos >= 0) {
574 if (newpos > newinfo->size -
575 sizeof(struct ip6t_entry)) {
576 duprintf("mark_source_chains: "
577 "bad verdict (%i)\n",
578 newpos);
579 return 0;
580 }
581 /* This a jump; chase it. */
582 duprintf("Jump rule %u -> %u\n",
583 pos, newpos);
584 } else {
585 /* ... this is a fallthru */
586 newpos = pos + e->next_offset;
587 }
588 e = (struct ip6t_entry *)
589 (entry0 + newpos);
590 e->counters.pcnt = pos;
591 pos = newpos;
592 }
593 }
594 next:
595 duprintf("Finished chain %u\n", hook);
596 }
597 return 1;
598 }
599
600 static int
601 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
602 {
603 struct xt_mtdtor_param par;
604
605 if (i && (*i)-- == 0)
606 return 1;
607
608 par.net = net;
609 par.match = m->u.kernel.match;
610 par.matchinfo = m->data;
611 par.family = NFPROTO_IPV6;
612 if (par.match->destroy != NULL)
613 par.match->destroy(&par);
614 module_put(par.match->me);
615 return 0;
616 }
617
618 static int
619 check_entry(const struct ip6t_entry *e, const char *name)
620 {
621 const struct ip6t_entry_target *t;
622
623 if (!ip6_checkentry(&e->ipv6)) {
624 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
625 return -EINVAL;
626 }
627
628 if (e->target_offset + sizeof(struct ip6t_entry_target) >
629 e->next_offset)
630 return -EINVAL;
631
632 t = ip6t_get_target_c(e);
633 if (e->target_offset + t->u.target_size > e->next_offset)
634 return -EINVAL;
635
636 return 0;
637 }
638
639 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
640 unsigned int *i)
641 {
642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
643 int ret;
644
645 par->match = m->u.kernel.match;
646 par->matchinfo = m->data;
647
648 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
649 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
650 if (ret < 0) {
651 duprintf("ip_tables: check failed for `%s'.\n",
652 par.match->name);
653 return ret;
654 }
655 ++*i;
656 return 0;
657 }
658
659 static int
660 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
661 unsigned int *i)
662 {
663 struct xt_match *match;
664 int ret;
665
666 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
667 m->u.user.revision),
668 "ip6t_%s", m->u.user.name);
669 if (IS_ERR(match) || !match) {
670 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
671 return match ? PTR_ERR(match) : -ENOENT;
672 }
673 m->u.kernel.match = match;
674
675 ret = check_match(m, par, i);
676 if (ret)
677 goto err;
678
679 return 0;
680 err:
681 module_put(m->u.kernel.match->me);
682 return ret;
683 }
684
685 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
686 {
687 struct ip6t_entry_target *t = ip6t_get_target(e);
688 struct xt_tgchk_param par = {
689 .net = net,
690 .table = name,
691 .entryinfo = e,
692 .target = t->u.kernel.target,
693 .targinfo = t->data,
694 .hook_mask = e->comefrom,
695 .family = NFPROTO_IPV6,
696 };
697 int ret;
698
699 t = ip6t_get_target(e);
700 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
701 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
702 if (ret < 0) {
703 duprintf("ip_tables: check failed for `%s'.\n",
704 t->u.kernel.target->name);
705 return ret;
706 }
707 return 0;
708 }
709
710 static int
711 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
712 unsigned int size, unsigned int *i)
713 {
714 struct ip6t_entry_target *t;
715 struct xt_target *target;
716 int ret;
717 unsigned int j;
718 struct xt_mtchk_param mtpar;
719
720 ret = check_entry(e, name);
721 if (ret)
722 return ret;
723
724 j = 0;
725 mtpar.net = net;
726 mtpar.table = name;
727 mtpar.entryinfo = &e->ipv6;
728 mtpar.hook_mask = e->comefrom;
729 mtpar.family = NFPROTO_IPV6;
730 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
731 if (ret != 0)
732 goto cleanup_matches;
733
734 t = ip6t_get_target(e);
735 target = try_then_request_module(xt_find_target(AF_INET6,
736 t->u.user.name,
737 t->u.user.revision),
738 "ip6t_%s", t->u.user.name);
739 if (IS_ERR(target) || !target) {
740 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
741 ret = target ? PTR_ERR(target) : -ENOENT;
742 goto cleanup_matches;
743 }
744 t->u.kernel.target = target;
745
746 ret = check_target(e, net, name);
747 if (ret)
748 goto err;
749
750 (*i)++;
751 return 0;
752 err:
753 module_put(t->u.kernel.target->me);
754 cleanup_matches:
755 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
756 return ret;
757 }
758
759 static bool check_underflow(const struct ip6t_entry *e)
760 {
761 const struct ip6t_entry_target *t;
762 unsigned int verdict;
763
764 if (!unconditional(&e->ipv6))
765 return false;
766 t = ip6t_get_target_c(e);
767 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
768 return false;
769 verdict = ((struct ip6t_standard_target *)t)->verdict;
770 verdict = -verdict - 1;
771 return verdict == NF_DROP || verdict == NF_ACCEPT;
772 }
773
774 static int
775 check_entry_size_and_hooks(struct ip6t_entry *e,
776 struct xt_table_info *newinfo,
777 const unsigned char *base,
778 const unsigned char *limit,
779 const unsigned int *hook_entries,
780 const unsigned int *underflows,
781 unsigned int valid_hooks,
782 unsigned int *i)
783 {
784 unsigned int h;
785
786 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
787 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
788 duprintf("Bad offset %p\n", e);
789 return -EINVAL;
790 }
791
792 if (e->next_offset
793 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
794 duprintf("checking: element %p size %u\n",
795 e, e->next_offset);
796 return -EINVAL;
797 }
798
799 /* Check hooks & underflows */
800 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
801 if (!(valid_hooks & (1 << h)))
802 continue;
803 if ((unsigned char *)e - base == hook_entries[h])
804 newinfo->hook_entry[h] = hook_entries[h];
805 if ((unsigned char *)e - base == underflows[h]) {
806 if (!check_underflow(e)) {
807 pr_err("Underflows must be unconditional and "
808 "use the STANDARD target with "
809 "ACCEPT/DROP\n");
810 return -EINVAL;
811 }
812 newinfo->underflow[h] = underflows[h];
813 }
814 }
815
816 /* Clear counters and comefrom */
817 e->counters = ((struct xt_counters) { 0, 0 });
818 e->comefrom = 0;
819
820 (*i)++;
821 return 0;
822 }
823
824 static int
825 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
826 {
827 struct xt_tgdtor_param par;
828 struct ip6t_entry_target *t;
829
830 if (i && (*i)-- == 0)
831 return 1;
832
833 /* Cleanup all matches */
834 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
835 t = ip6t_get_target(e);
836
837 par.net = net;
838 par.target = t->u.kernel.target;
839 par.targinfo = t->data;
840 par.family = NFPROTO_IPV6;
841 if (par.target->destroy != NULL)
842 par.target->destroy(&par);
843 module_put(par.target->me);
844 return 0;
845 }
846
847 /* Checks and translates the user-supplied table segment (held in
848 newinfo) */
849 static int
850 translate_table(struct net *net,
851 const char *name,
852 unsigned int valid_hooks,
853 struct xt_table_info *newinfo,
854 void *entry0,
855 unsigned int size,
856 unsigned int number,
857 const unsigned int *hook_entries,
858 const unsigned int *underflows)
859 {
860 unsigned int i;
861 int ret;
862
863 newinfo->size = size;
864 newinfo->number = number;
865
866 /* Init all hooks to impossible value. */
867 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
868 newinfo->hook_entry[i] = 0xFFFFFFFF;
869 newinfo->underflow[i] = 0xFFFFFFFF;
870 }
871
872 duprintf("translate_table: size %u\n", newinfo->size);
873 i = 0;
874 /* Walk through entries, checking offsets. */
875 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
876 check_entry_size_and_hooks,
877 newinfo,
878 entry0,
879 entry0 + size,
880 hook_entries, underflows, valid_hooks, &i);
881 if (ret != 0)
882 return ret;
883
884 if (i != number) {
885 duprintf("translate_table: %u not %u entries\n",
886 i, number);
887 return -EINVAL;
888 }
889
890 /* Check hooks all assigned */
891 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
892 /* Only hooks which are valid */
893 if (!(valid_hooks & (1 << i)))
894 continue;
895 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
896 duprintf("Invalid hook entry %u %u\n",
897 i, hook_entries[i]);
898 return -EINVAL;
899 }
900 if (newinfo->underflow[i] == 0xFFFFFFFF) {
901 duprintf("Invalid underflow %u %u\n",
902 i, underflows[i]);
903 return -EINVAL;
904 }
905 }
906
907 if (!mark_source_chains(newinfo, valid_hooks, entry0))
908 return -ELOOP;
909
910 /* Finally, each sanity check must pass */
911 i = 0;
912 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
913 find_check_entry, net, name, size, &i);
914
915 if (ret != 0) {
916 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
917 cleanup_entry, net, &i);
918 return ret;
919 }
920
921 /* And one copy for every other CPU */
922 for_each_possible_cpu(i) {
923 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
924 memcpy(newinfo->entries[i], entry0, newinfo->size);
925 }
926
927 return ret;
928 }
929
930 /* Gets counters. */
931 static inline int
932 add_entry_to_counter(const struct ip6t_entry *e,
933 struct xt_counters total[],
934 unsigned int *i)
935 {
936 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
937
938 (*i)++;
939 return 0;
940 }
941
942 static inline int
943 set_entry_to_counter(const struct ip6t_entry *e,
944 struct ip6t_counters total[],
945 unsigned int *i)
946 {
947 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
948
949 (*i)++;
950 return 0;
951 }
952
953 static void
954 get_counters(const struct xt_table_info *t,
955 struct xt_counters counters[])
956 {
957 unsigned int cpu;
958 unsigned int i;
959 unsigned int curcpu;
960
961 /* Instead of clearing (by a previous call to memset())
962 * the counters and using adds, we set the counters
963 * with data used by 'current' CPU
964 *
965 * Bottom half has to be disabled to prevent deadlock
966 * if new softirq were to run and call ipt_do_table
967 */
968 local_bh_disable();
969 curcpu = smp_processor_id();
970
971 i = 0;
972 IP6T_ENTRY_ITERATE(t->entries[curcpu],
973 t->size,
974 set_entry_to_counter,
975 counters,
976 &i);
977
978 for_each_possible_cpu(cpu) {
979 if (cpu == curcpu)
980 continue;
981 i = 0;
982 xt_info_wrlock(cpu);
983 IP6T_ENTRY_ITERATE(t->entries[cpu],
984 t->size,
985 add_entry_to_counter,
986 counters,
987 &i);
988 xt_info_wrunlock(cpu);
989 }
990 local_bh_enable();
991 }
992
993 static struct xt_counters *alloc_counters(const struct xt_table *table)
994 {
995 unsigned int countersize;
996 struct xt_counters *counters;
997 const struct xt_table_info *private = table->private;
998
999 /* We need atomic snapshot of counters: rest doesn't change
1000 (other than comefrom, which userspace doesn't care
1001 about). */
1002 countersize = sizeof(struct xt_counters) * private->number;
1003 counters = vmalloc_node(countersize, numa_node_id());
1004
1005 if (counters == NULL)
1006 return ERR_PTR(-ENOMEM);
1007
1008 get_counters(private, counters);
1009
1010 return counters;
1011 }
1012
1013 static int
1014 copy_entries_to_user(unsigned int total_size,
1015 const struct xt_table *table,
1016 void __user *userptr)
1017 {
1018 unsigned int off, num;
1019 const struct ip6t_entry *e;
1020 struct xt_counters *counters;
1021 const struct xt_table_info *private = table->private;
1022 int ret = 0;
1023 const void *loc_cpu_entry;
1024
1025 counters = alloc_counters(table);
1026 if (IS_ERR(counters))
1027 return PTR_ERR(counters);
1028
1029 /* choose the copy that is on our node/cpu, ...
1030 * This choice is lazy (because current thread is
1031 * allowed to migrate to another cpu)
1032 */
1033 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1034 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1035 ret = -EFAULT;
1036 goto free_counters;
1037 }
1038
1039 /* FIXME: use iterator macros --RR */
1040 /* ... then go back and fix counters and names */
1041 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1042 unsigned int i;
1043 const struct ip6t_entry_match *m;
1044 const struct ip6t_entry_target *t;
1045
1046 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1047 if (copy_to_user(userptr + off
1048 + offsetof(struct ip6t_entry, counters),
1049 &counters[num],
1050 sizeof(counters[num])) != 0) {
1051 ret = -EFAULT;
1052 goto free_counters;
1053 }
1054
1055 for (i = sizeof(struct ip6t_entry);
1056 i < e->target_offset;
1057 i += m->u.match_size) {
1058 m = (void *)e + i;
1059
1060 if (copy_to_user(userptr + off + i
1061 + offsetof(struct ip6t_entry_match,
1062 u.user.name),
1063 m->u.kernel.match->name,
1064 strlen(m->u.kernel.match->name)+1)
1065 != 0) {
1066 ret = -EFAULT;
1067 goto free_counters;
1068 }
1069 }
1070
1071 t = ip6t_get_target_c(e);
1072 if (copy_to_user(userptr + off + e->target_offset
1073 + offsetof(struct ip6t_entry_target,
1074 u.user.name),
1075 t->u.kernel.target->name,
1076 strlen(t->u.kernel.target->name)+1) != 0) {
1077 ret = -EFAULT;
1078 goto free_counters;
1079 }
1080 }
1081
1082 free_counters:
1083 vfree(counters);
1084 return ret;
1085 }
1086
1087 #ifdef CONFIG_COMPAT
1088 static void compat_standard_from_user(void *dst, const void *src)
1089 {
1090 int v = *(compat_int_t *)src;
1091
1092 if (v > 0)
1093 v += xt_compat_calc_jump(AF_INET6, v);
1094 memcpy(dst, &v, sizeof(v));
1095 }
1096
1097 static int compat_standard_to_user(void __user *dst, const void *src)
1098 {
1099 compat_int_t cv = *(int *)src;
1100
1101 if (cv > 0)
1102 cv -= xt_compat_calc_jump(AF_INET6, cv);
1103 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1104 }
1105
1106 static inline int
1107 compat_calc_match(const struct ip6t_entry_match *m, int *size)
1108 {
1109 *size += xt_compat_match_offset(m->u.kernel.match);
1110 return 0;
1111 }
1112
1113 static int compat_calc_entry(const struct ip6t_entry *e,
1114 const struct xt_table_info *info,
1115 const void *base, struct xt_table_info *newinfo)
1116 {
1117 const struct ip6t_entry_target *t;
1118 unsigned int entry_offset;
1119 int off, i, ret;
1120
1121 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1122 entry_offset = (void *)e - base;
1123 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1124 t = ip6t_get_target_c(e);
1125 off += xt_compat_target_offset(t->u.kernel.target);
1126 newinfo->size -= off;
1127 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1128 if (ret)
1129 return ret;
1130
1131 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1132 if (info->hook_entry[i] &&
1133 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1134 newinfo->hook_entry[i] -= off;
1135 if (info->underflow[i] &&
1136 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1137 newinfo->underflow[i] -= off;
1138 }
1139 return 0;
1140 }
1141
1142 static int compat_table_info(const struct xt_table_info *info,
1143 struct xt_table_info *newinfo)
1144 {
1145 void *loc_cpu_entry;
1146
1147 if (!newinfo || !info)
1148 return -EINVAL;
1149
1150 /* we dont care about newinfo->entries[] */
1151 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1152 newinfo->initial_entries = 0;
1153 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1154 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1155 compat_calc_entry, info, loc_cpu_entry,
1156 newinfo);
1157 }
1158 #endif
1159
1160 static int get_info(struct net *net, void __user *user,
1161 const int *len, int compat)
1162 {
1163 char name[IP6T_TABLE_MAXNAMELEN];
1164 struct xt_table *t;
1165 int ret;
1166
1167 if (*len != sizeof(struct ip6t_getinfo)) {
1168 duprintf("length %u != %zu\n", *len,
1169 sizeof(struct ip6t_getinfo));
1170 return -EINVAL;
1171 }
1172
1173 if (copy_from_user(name, user, sizeof(name)) != 0)
1174 return -EFAULT;
1175
1176 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1177 #ifdef CONFIG_COMPAT
1178 if (compat)
1179 xt_compat_lock(AF_INET6);
1180 #endif
1181 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1182 "ip6table_%s", name);
1183 if (t && !IS_ERR(t)) {
1184 struct ip6t_getinfo info;
1185 const struct xt_table_info *private = t->private;
1186 #ifdef CONFIG_COMPAT
1187 struct xt_table_info tmp;
1188
1189 if (compat) {
1190 ret = compat_table_info(private, &tmp);
1191 xt_compat_flush_offsets(AF_INET6);
1192 private = &tmp;
1193 }
1194 #endif
1195 info.valid_hooks = t->valid_hooks;
1196 memcpy(info.hook_entry, private->hook_entry,
1197 sizeof(info.hook_entry));
1198 memcpy(info.underflow, private->underflow,
1199 sizeof(info.underflow));
1200 info.num_entries = private->number;
1201 info.size = private->size;
1202 strcpy(info.name, name);
1203
1204 if (copy_to_user(user, &info, *len) != 0)
1205 ret = -EFAULT;
1206 else
1207 ret = 0;
1208
1209 xt_table_unlock(t);
1210 module_put(t->me);
1211 } else
1212 ret = t ? PTR_ERR(t) : -ENOENT;
1213 #ifdef CONFIG_COMPAT
1214 if (compat)
1215 xt_compat_unlock(AF_INET6);
1216 #endif
1217 return ret;
1218 }
1219
1220 static int
1221 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1222 const int *len)
1223 {
1224 int ret;
1225 struct ip6t_get_entries get;
1226 struct xt_table *t;
1227
1228 if (*len < sizeof(get)) {
1229 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1230 return -EINVAL;
1231 }
1232 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1233 return -EFAULT;
1234 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1235 duprintf("get_entries: %u != %zu\n",
1236 *len, sizeof(get) + get.size);
1237 return -EINVAL;
1238 }
1239
1240 t = xt_find_table_lock(net, AF_INET6, get.name);
1241 if (t && !IS_ERR(t)) {
1242 struct xt_table_info *private = t->private;
1243 duprintf("t->private->number = %u\n", private->number);
1244 if (get.size == private->size)
1245 ret = copy_entries_to_user(private->size,
1246 t, uptr->entrytable);
1247 else {
1248 duprintf("get_entries: I've got %u not %u!\n",
1249 private->size, get.size);
1250 ret = -EAGAIN;
1251 }
1252 module_put(t->me);
1253 xt_table_unlock(t);
1254 } else
1255 ret = t ? PTR_ERR(t) : -ENOENT;
1256
1257 return ret;
1258 }
1259
1260 static int
1261 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1262 struct xt_table_info *newinfo, unsigned int num_counters,
1263 void __user *counters_ptr)
1264 {
1265 int ret;
1266 struct xt_table *t;
1267 struct xt_table_info *oldinfo;
1268 struct xt_counters *counters;
1269 const void *loc_cpu_old_entry;
1270
1271 ret = 0;
1272 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1273 numa_node_id());
1274 if (!counters) {
1275 ret = -ENOMEM;
1276 goto out;
1277 }
1278
1279 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1280 "ip6table_%s", name);
1281 if (!t || IS_ERR(t)) {
1282 ret = t ? PTR_ERR(t) : -ENOENT;
1283 goto free_newinfo_counters_untrans;
1284 }
1285
1286 /* You lied! */
1287 if (valid_hooks != t->valid_hooks) {
1288 duprintf("Valid hook crap: %08X vs %08X\n",
1289 valid_hooks, t->valid_hooks);
1290 ret = -EINVAL;
1291 goto put_module;
1292 }
1293
1294 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1295 if (!oldinfo)
1296 goto put_module;
1297
1298 /* Update module usage count based on number of rules */
1299 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1300 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1301 if ((oldinfo->number > oldinfo->initial_entries) ||
1302 (newinfo->number <= oldinfo->initial_entries))
1303 module_put(t->me);
1304 if ((oldinfo->number > oldinfo->initial_entries) &&
1305 (newinfo->number <= oldinfo->initial_entries))
1306 module_put(t->me);
1307
1308 /* Get the old counters, and synchronize with replace */
1309 get_counters(oldinfo, counters);
1310
1311 /* Decrease module usage counts and free resource */
1312 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1313 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1314 net, NULL);
1315 xt_free_table_info(oldinfo);
1316 if (copy_to_user(counters_ptr, counters,
1317 sizeof(struct xt_counters) * num_counters) != 0)
1318 ret = -EFAULT;
1319 vfree(counters);
1320 xt_table_unlock(t);
1321 return ret;
1322
1323 put_module:
1324 module_put(t->me);
1325 xt_table_unlock(t);
1326 free_newinfo_counters_untrans:
1327 vfree(counters);
1328 out:
1329 return ret;
1330 }
1331
1332 static int
1333 do_replace(struct net *net, const void __user *user, unsigned int len)
1334 {
1335 int ret;
1336 struct ip6t_replace tmp;
1337 struct xt_table_info *newinfo;
1338 void *loc_cpu_entry;
1339
1340 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1341 return -EFAULT;
1342
1343 /* overflow check */
1344 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1345 return -ENOMEM;
1346
1347 newinfo = xt_alloc_table_info(tmp.size);
1348 if (!newinfo)
1349 return -ENOMEM;
1350
1351 /* choose the copy that is on our node/cpu */
1352 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1353 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1354 tmp.size) != 0) {
1355 ret = -EFAULT;
1356 goto free_newinfo;
1357 }
1358
1359 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1360 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1361 tmp.hook_entry, tmp.underflow);
1362 if (ret != 0)
1363 goto free_newinfo;
1364
1365 duprintf("ip_tables: Translated table\n");
1366
1367 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1368 tmp.num_counters, tmp.counters);
1369 if (ret)
1370 goto free_newinfo_untrans;
1371 return 0;
1372
1373 free_newinfo_untrans:
1374 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1375 free_newinfo:
1376 xt_free_table_info(newinfo);
1377 return ret;
1378 }
1379
1380 /* We're lazy, and add to the first CPU; overflow works its fey magic
1381 * and everything is OK. */
1382 static int
1383 add_counter_to_entry(struct ip6t_entry *e,
1384 const struct xt_counters addme[],
1385 unsigned int *i)
1386 {
1387 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1388
1389 (*i)++;
1390 return 0;
1391 }
1392
1393 static int
1394 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1395 int compat)
1396 {
1397 unsigned int i, curcpu;
1398 struct xt_counters_info tmp;
1399 struct xt_counters *paddc;
1400 unsigned int num_counters;
1401 char *name;
1402 int size;
1403 void *ptmp;
1404 struct xt_table *t;
1405 const struct xt_table_info *private;
1406 int ret = 0;
1407 const void *loc_cpu_entry;
1408 #ifdef CONFIG_COMPAT
1409 struct compat_xt_counters_info compat_tmp;
1410
1411 if (compat) {
1412 ptmp = &compat_tmp;
1413 size = sizeof(struct compat_xt_counters_info);
1414 } else
1415 #endif
1416 {
1417 ptmp = &tmp;
1418 size = sizeof(struct xt_counters_info);
1419 }
1420
1421 if (copy_from_user(ptmp, user, size) != 0)
1422 return -EFAULT;
1423
1424 #ifdef CONFIG_COMPAT
1425 if (compat) {
1426 num_counters = compat_tmp.num_counters;
1427 name = compat_tmp.name;
1428 } else
1429 #endif
1430 {
1431 num_counters = tmp.num_counters;
1432 name = tmp.name;
1433 }
1434
1435 if (len != size + num_counters * sizeof(struct xt_counters))
1436 return -EINVAL;
1437
1438 paddc = vmalloc_node(len - size, numa_node_id());
1439 if (!paddc)
1440 return -ENOMEM;
1441
1442 if (copy_from_user(paddc, user + size, len - size) != 0) {
1443 ret = -EFAULT;
1444 goto free;
1445 }
1446
1447 t = xt_find_table_lock(net, AF_INET6, name);
1448 if (!t || IS_ERR(t)) {
1449 ret = t ? PTR_ERR(t) : -ENOENT;
1450 goto free;
1451 }
1452
1453
1454 local_bh_disable();
1455 private = t->private;
1456 if (private->number != num_counters) {
1457 ret = -EINVAL;
1458 goto unlock_up_free;
1459 }
1460
1461 i = 0;
1462 /* Choose the copy that is on our node */
1463 curcpu = smp_processor_id();
1464 xt_info_wrlock(curcpu);
1465 loc_cpu_entry = private->entries[curcpu];
1466 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1467 private->size,
1468 add_counter_to_entry,
1469 paddc,
1470 &i);
1471 xt_info_wrunlock(curcpu);
1472
1473 unlock_up_free:
1474 local_bh_enable();
1475 xt_table_unlock(t);
1476 module_put(t->me);
1477 free:
1478 vfree(paddc);
1479
1480 return ret;
1481 }
1482
1483 #ifdef CONFIG_COMPAT
1484 struct compat_ip6t_replace {
1485 char name[IP6T_TABLE_MAXNAMELEN];
1486 u32 valid_hooks;
1487 u32 num_entries;
1488 u32 size;
1489 u32 hook_entry[NF_INET_NUMHOOKS];
1490 u32 underflow[NF_INET_NUMHOOKS];
1491 u32 num_counters;
1492 compat_uptr_t counters; /* struct ip6t_counters * */
1493 struct compat_ip6t_entry entries[0];
1494 };
1495
1496 static int
1497 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1498 unsigned int *size, struct xt_counters *counters,
1499 unsigned int *i)
1500 {
1501 struct ip6t_entry_target *t;
1502 struct compat_ip6t_entry __user *ce;
1503 u_int16_t target_offset, next_offset;
1504 compat_uint_t origsize;
1505 int ret;
1506
1507 ret = -EFAULT;
1508 origsize = *size;
1509 ce = (struct compat_ip6t_entry __user *)*dstptr;
1510 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1511 goto out;
1512
1513 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1514 goto out;
1515
1516 *dstptr += sizeof(struct compat_ip6t_entry);
1517 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1518
1519 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1520 target_offset = e->target_offset - (origsize - *size);
1521 if (ret)
1522 goto out;
1523 t = ip6t_get_target(e);
1524 ret = xt_compat_target_to_user(t, dstptr, size);
1525 if (ret)
1526 goto out;
1527 ret = -EFAULT;
1528 next_offset = e->next_offset - (origsize - *size);
1529 if (put_user(target_offset, &ce->target_offset))
1530 goto out;
1531 if (put_user(next_offset, &ce->next_offset))
1532 goto out;
1533
1534 (*i)++;
1535 return 0;
1536 out:
1537 return ret;
1538 }
1539
1540 static int
1541 compat_find_calc_match(struct ip6t_entry_match *m,
1542 const char *name,
1543 const struct ip6t_ip6 *ipv6,
1544 unsigned int hookmask,
1545 int *size, unsigned int *i)
1546 {
1547 struct xt_match *match;
1548
1549 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1550 m->u.user.revision),
1551 "ip6t_%s", m->u.user.name);
1552 if (IS_ERR(match) || !match) {
1553 duprintf("compat_check_calc_match: `%s' not found\n",
1554 m->u.user.name);
1555 return match ? PTR_ERR(match) : -ENOENT;
1556 }
1557 m->u.kernel.match = match;
1558 *size += xt_compat_match_offset(match);
1559
1560 (*i)++;
1561 return 0;
1562 }
1563
1564 static int
1565 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1566 {
1567 if (i && (*i)-- == 0)
1568 return 1;
1569
1570 module_put(m->u.kernel.match->me);
1571 return 0;
1572 }
1573
1574 static int
1575 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1576 {
1577 struct ip6t_entry_target *t;
1578
1579 if (i && (*i)-- == 0)
1580 return 1;
1581
1582 /* Cleanup all matches */
1583 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1584 t = compat_ip6t_get_target(e);
1585 module_put(t->u.kernel.target->me);
1586 return 0;
1587 }
1588
1589 static int
1590 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1591 struct xt_table_info *newinfo,
1592 unsigned int *size,
1593 const unsigned char *base,
1594 const unsigned char *limit,
1595 const unsigned int *hook_entries,
1596 const unsigned int *underflows,
1597 unsigned int *i,
1598 const char *name)
1599 {
1600 struct ip6t_entry_target *t;
1601 struct xt_target *target;
1602 unsigned int entry_offset;
1603 unsigned int j;
1604 int ret, off, h;
1605
1606 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1607 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1608 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1609 duprintf("Bad offset %p, limit = %p\n", e, limit);
1610 return -EINVAL;
1611 }
1612
1613 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1614 sizeof(struct compat_xt_entry_target)) {
1615 duprintf("checking: element %p size %u\n",
1616 e, e->next_offset);
1617 return -EINVAL;
1618 }
1619
1620 /* For purposes of check_entry casting the compat entry is fine */
1621 ret = check_entry((struct ip6t_entry *)e, name);
1622 if (ret)
1623 return ret;
1624
1625 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1626 entry_offset = (void *)e - (void *)base;
1627 j = 0;
1628 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1629 &e->ipv6, e->comefrom, &off, &j);
1630 if (ret != 0)
1631 goto release_matches;
1632
1633 t = compat_ip6t_get_target(e);
1634 target = try_then_request_module(xt_find_target(AF_INET6,
1635 t->u.user.name,
1636 t->u.user.revision),
1637 "ip6t_%s", t->u.user.name);
1638 if (IS_ERR(target) || !target) {
1639 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1640 t->u.user.name);
1641 ret = target ? PTR_ERR(target) : -ENOENT;
1642 goto release_matches;
1643 }
1644 t->u.kernel.target = target;
1645
1646 off += xt_compat_target_offset(target);
1647 *size += off;
1648 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1649 if (ret)
1650 goto out;
1651
1652 /* Check hooks & underflows */
1653 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1654 if ((unsigned char *)e - base == hook_entries[h])
1655 newinfo->hook_entry[h] = hook_entries[h];
1656 if ((unsigned char *)e - base == underflows[h])
1657 newinfo->underflow[h] = underflows[h];
1658 }
1659
1660 /* Clear counters and comefrom */
1661 memset(&e->counters, 0, sizeof(e->counters));
1662 e->comefrom = 0;
1663
1664 (*i)++;
1665 return 0;
1666
1667 out:
1668 module_put(t->u.kernel.target->me);
1669 release_matches:
1670 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1671 return ret;
1672 }
1673
1674 static int
1675 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1676 unsigned int *size, const char *name,
1677 struct xt_table_info *newinfo, unsigned char *base)
1678 {
1679 struct ip6t_entry_target *t;
1680 struct xt_target *target;
1681 struct ip6t_entry *de;
1682 unsigned int origsize;
1683 int ret, h;
1684
1685 ret = 0;
1686 origsize = *size;
1687 de = (struct ip6t_entry *)*dstptr;
1688 memcpy(de, e, sizeof(struct ip6t_entry));
1689 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1690
1691 *dstptr += sizeof(struct ip6t_entry);
1692 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1693
1694 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1695 dstptr, size);
1696 if (ret)
1697 return ret;
1698 de->target_offset = e->target_offset - (origsize - *size);
1699 t = compat_ip6t_get_target(e);
1700 target = t->u.kernel.target;
1701 xt_compat_target_from_user(t, dstptr, size);
1702
1703 de->next_offset = e->next_offset - (origsize - *size);
1704 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1705 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1706 newinfo->hook_entry[h] -= origsize - *size;
1707 if ((unsigned char *)de - base < newinfo->underflow[h])
1708 newinfo->underflow[h] -= origsize - *size;
1709 }
1710 return ret;
1711 }
1712
1713 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1714 const char *name, unsigned int *i)
1715 {
1716 unsigned int j;
1717 int ret;
1718 struct xt_mtchk_param mtpar;
1719
1720 j = 0;
1721 mtpar.net = net;
1722 mtpar.table = name;
1723 mtpar.entryinfo = &e->ipv6;
1724 mtpar.hook_mask = e->comefrom;
1725 mtpar.family = NFPROTO_IPV6;
1726 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1727 if (ret)
1728 goto cleanup_matches;
1729
1730 ret = check_target(e, net, name);
1731 if (ret)
1732 goto cleanup_matches;
1733
1734 (*i)++;
1735 return 0;
1736
1737 cleanup_matches:
1738 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1739 return ret;
1740 }
1741
1742 static int
1743 translate_compat_table(struct net *net,
1744 const char *name,
1745 unsigned int valid_hooks,
1746 struct xt_table_info **pinfo,
1747 void **pentry0,
1748 unsigned int total_size,
1749 unsigned int number,
1750 unsigned int *hook_entries,
1751 unsigned int *underflows)
1752 {
1753 unsigned int i, j;
1754 struct xt_table_info *newinfo, *info;
1755 void *pos, *entry0, *entry1;
1756 unsigned int size;
1757 int ret;
1758
1759 info = *pinfo;
1760 entry0 = *pentry0;
1761 size = total_size;
1762 info->number = number;
1763
1764 /* Init all hooks to impossible value. */
1765 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1766 info->hook_entry[i] = 0xFFFFFFFF;
1767 info->underflow[i] = 0xFFFFFFFF;
1768 }
1769
1770 duprintf("translate_compat_table: size %u\n", info->size);
1771 j = 0;
1772 xt_compat_lock(AF_INET6);
1773 /* Walk through entries, checking offsets. */
1774 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1775 check_compat_entry_size_and_hooks,
1776 info, &size, entry0,
1777 entry0 + total_size,
1778 hook_entries, underflows, &j, name);
1779 if (ret != 0)
1780 goto out_unlock;
1781
1782 ret = -EINVAL;
1783 if (j != number) {
1784 duprintf("translate_compat_table: %u not %u entries\n",
1785 j, number);
1786 goto out_unlock;
1787 }
1788
1789 /* Check hooks all assigned */
1790 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1791 /* Only hooks which are valid */
1792 if (!(valid_hooks & (1 << i)))
1793 continue;
1794 if (info->hook_entry[i] == 0xFFFFFFFF) {
1795 duprintf("Invalid hook entry %u %u\n",
1796 i, hook_entries[i]);
1797 goto out_unlock;
1798 }
1799 if (info->underflow[i] == 0xFFFFFFFF) {
1800 duprintf("Invalid underflow %u %u\n",
1801 i, underflows[i]);
1802 goto out_unlock;
1803 }
1804 }
1805
1806 ret = -ENOMEM;
1807 newinfo = xt_alloc_table_info(size);
1808 if (!newinfo)
1809 goto out_unlock;
1810
1811 newinfo->number = number;
1812 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1813 newinfo->hook_entry[i] = info->hook_entry[i];
1814 newinfo->underflow[i] = info->underflow[i];
1815 }
1816 entry1 = newinfo->entries[raw_smp_processor_id()];
1817 pos = entry1;
1818 size = total_size;
1819 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1820 compat_copy_entry_from_user,
1821 &pos, &size, name, newinfo, entry1);
1822 xt_compat_flush_offsets(AF_INET6);
1823 xt_compat_unlock(AF_INET6);
1824 if (ret)
1825 goto free_newinfo;
1826
1827 ret = -ELOOP;
1828 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1829 goto free_newinfo;
1830
1831 i = 0;
1832 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1833 net, name, &i);
1834 if (ret) {
1835 j -= i;
1836 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1837 compat_release_entry, &j);
1838 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
1839 xt_free_table_info(newinfo);
1840 return ret;
1841 }
1842
1843 /* And one copy for every other CPU */
1844 for_each_possible_cpu(i)
1845 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1846 memcpy(newinfo->entries[i], entry1, newinfo->size);
1847
1848 *pinfo = newinfo;
1849 *pentry0 = entry1;
1850 xt_free_table_info(info);
1851 return 0;
1852
1853 free_newinfo:
1854 xt_free_table_info(newinfo);
1855 out:
1856 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1857 return ret;
1858 out_unlock:
1859 xt_compat_flush_offsets(AF_INET6);
1860 xt_compat_unlock(AF_INET6);
1861 goto out;
1862 }
1863
1864 static int
1865 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1866 {
1867 int ret;
1868 struct compat_ip6t_replace tmp;
1869 struct xt_table_info *newinfo;
1870 void *loc_cpu_entry;
1871
1872 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1873 return -EFAULT;
1874
1875 /* overflow check */
1876 if (tmp.size >= INT_MAX / num_possible_cpus())
1877 return -ENOMEM;
1878 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1879 return -ENOMEM;
1880
1881 newinfo = xt_alloc_table_info(tmp.size);
1882 if (!newinfo)
1883 return -ENOMEM;
1884
1885 /* choose the copy that is on our node/cpu */
1886 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1887 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1888 tmp.size) != 0) {
1889 ret = -EFAULT;
1890 goto free_newinfo;
1891 }
1892
1893 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1894 &newinfo, &loc_cpu_entry, tmp.size,
1895 tmp.num_entries, tmp.hook_entry,
1896 tmp.underflow);
1897 if (ret != 0)
1898 goto free_newinfo;
1899
1900 duprintf("compat_do_replace: Translated table\n");
1901
1902 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1903 tmp.num_counters, compat_ptr(tmp.counters));
1904 if (ret)
1905 goto free_newinfo_untrans;
1906 return 0;
1907
1908 free_newinfo_untrans:
1909 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1910 free_newinfo:
1911 xt_free_table_info(newinfo);
1912 return ret;
1913 }
1914
1915 static int
1916 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1917 unsigned int len)
1918 {
1919 int ret;
1920
1921 if (!capable(CAP_NET_ADMIN))
1922 return -EPERM;
1923
1924 switch (cmd) {
1925 case IP6T_SO_SET_REPLACE:
1926 ret = compat_do_replace(sock_net(sk), user, len);
1927 break;
1928
1929 case IP6T_SO_SET_ADD_COUNTERS:
1930 ret = do_add_counters(sock_net(sk), user, len, 1);
1931 break;
1932
1933 default:
1934 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1935 ret = -EINVAL;
1936 }
1937
1938 return ret;
1939 }
1940
1941 struct compat_ip6t_get_entries {
1942 char name[IP6T_TABLE_MAXNAMELEN];
1943 compat_uint_t size;
1944 struct compat_ip6t_entry entrytable[0];
1945 };
1946
1947 static int
1948 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1949 void __user *userptr)
1950 {
1951 struct xt_counters *counters;
1952 const struct xt_table_info *private = table->private;
1953 void __user *pos;
1954 unsigned int size;
1955 int ret = 0;
1956 const void *loc_cpu_entry;
1957 unsigned int i = 0;
1958
1959 counters = alloc_counters(table);
1960 if (IS_ERR(counters))
1961 return PTR_ERR(counters);
1962
1963 /* choose the copy that is on our node/cpu, ...
1964 * This choice is lazy (because current thread is
1965 * allowed to migrate to another cpu)
1966 */
1967 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1968 pos = userptr;
1969 size = total_size;
1970 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1971 compat_copy_entry_to_user,
1972 &pos, &size, counters, &i);
1973
1974 vfree(counters);
1975 return ret;
1976 }
1977
1978 static int
1979 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1980 int *len)
1981 {
1982 int ret;
1983 struct compat_ip6t_get_entries get;
1984 struct xt_table *t;
1985
1986 if (*len < sizeof(get)) {
1987 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1988 return -EINVAL;
1989 }
1990
1991 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1992 return -EFAULT;
1993
1994 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1995 duprintf("compat_get_entries: %u != %zu\n",
1996 *len, sizeof(get) + get.size);
1997 return -EINVAL;
1998 }
1999
2000 xt_compat_lock(AF_INET6);
2001 t = xt_find_table_lock(net, AF_INET6, get.name);
2002 if (t && !IS_ERR(t)) {
2003 const struct xt_table_info *private = t->private;
2004 struct xt_table_info info;
2005 duprintf("t->private->number = %u\n", private->number);
2006 ret = compat_table_info(private, &info);
2007 if (!ret && get.size == info.size) {
2008 ret = compat_copy_entries_to_user(private->size,
2009 t, uptr->entrytable);
2010 } else if (!ret) {
2011 duprintf("compat_get_entries: I've got %u not %u!\n",
2012 private->size, get.size);
2013 ret = -EAGAIN;
2014 }
2015 xt_compat_flush_offsets(AF_INET6);
2016 module_put(t->me);
2017 xt_table_unlock(t);
2018 } else
2019 ret = t ? PTR_ERR(t) : -ENOENT;
2020
2021 xt_compat_unlock(AF_INET6);
2022 return ret;
2023 }
2024
2025 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2026
2027 static int
2028 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2029 {
2030 int ret;
2031
2032 if (!capable(CAP_NET_ADMIN))
2033 return -EPERM;
2034
2035 switch (cmd) {
2036 case IP6T_SO_GET_INFO:
2037 ret = get_info(sock_net(sk), user, len, 1);
2038 break;
2039 case IP6T_SO_GET_ENTRIES:
2040 ret = compat_get_entries(sock_net(sk), user, len);
2041 break;
2042 default:
2043 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2044 }
2045 return ret;
2046 }
2047 #endif
2048
2049 static int
2050 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2051 {
2052 int ret;
2053
2054 if (!capable(CAP_NET_ADMIN))
2055 return -EPERM;
2056
2057 switch (cmd) {
2058 case IP6T_SO_SET_REPLACE:
2059 ret = do_replace(sock_net(sk), user, len);
2060 break;
2061
2062 case IP6T_SO_SET_ADD_COUNTERS:
2063 ret = do_add_counters(sock_net(sk), user, len, 0);
2064 break;
2065
2066 default:
2067 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2068 ret = -EINVAL;
2069 }
2070
2071 return ret;
2072 }
2073
2074 static int
2075 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2076 {
2077 int ret;
2078
2079 if (!capable(CAP_NET_ADMIN))
2080 return -EPERM;
2081
2082 switch (cmd) {
2083 case IP6T_SO_GET_INFO:
2084 ret = get_info(sock_net(sk), user, len, 0);
2085 break;
2086
2087 case IP6T_SO_GET_ENTRIES:
2088 ret = get_entries(sock_net(sk), user, len);
2089 break;
2090
2091 case IP6T_SO_GET_REVISION_MATCH:
2092 case IP6T_SO_GET_REVISION_TARGET: {
2093 struct ip6t_get_revision rev;
2094 int target;
2095
2096 if (*len != sizeof(rev)) {
2097 ret = -EINVAL;
2098 break;
2099 }
2100 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2101 ret = -EFAULT;
2102 break;
2103 }
2104
2105 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2106 target = 1;
2107 else
2108 target = 0;
2109
2110 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2111 rev.revision,
2112 target, &ret),
2113 "ip6t_%s", rev.name);
2114 break;
2115 }
2116
2117 default:
2118 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2119 ret = -EINVAL;
2120 }
2121
2122 return ret;
2123 }
2124
2125 struct xt_table *ip6t_register_table(struct net *net,
2126 const struct xt_table *table,
2127 const struct ip6t_replace *repl)
2128 {
2129 int ret;
2130 struct xt_table_info *newinfo;
2131 struct xt_table_info bootstrap
2132 = { 0, 0, 0, { 0 }, { 0 }, { } };
2133 void *loc_cpu_entry;
2134 struct xt_table *new_table;
2135
2136 newinfo = xt_alloc_table_info(repl->size);
2137 if (!newinfo) {
2138 ret = -ENOMEM;
2139 goto out;
2140 }
2141
2142 /* choose the copy on our node/cpu, but dont care about preemption */
2143 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2144 memcpy(loc_cpu_entry, repl->entries, repl->size);
2145
2146 ret = translate_table(net, table->name, table->valid_hooks,
2147 newinfo, loc_cpu_entry, repl->size,
2148 repl->num_entries,
2149 repl->hook_entry,
2150 repl->underflow);
2151 if (ret != 0)
2152 goto out_free;
2153
2154 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2155 if (IS_ERR(new_table)) {
2156 ret = PTR_ERR(new_table);
2157 goto out_free;
2158 }
2159 return new_table;
2160
2161 out_free:
2162 xt_free_table_info(newinfo);
2163 out:
2164 return ERR_PTR(ret);
2165 }
2166
2167 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2168 {
2169 struct xt_table_info *private;
2170 void *loc_cpu_entry;
2171 struct module *table_owner = table->me;
2172
2173 private = xt_unregister_table(table);
2174
2175 /* Decrease module usage counts and free resources */
2176 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2177 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
2178 if (private->number > private->initial_entries)
2179 module_put(table_owner);
2180 xt_free_table_info(private);
2181 }
2182
2183 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2184 static inline bool
2185 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2186 u_int8_t type, u_int8_t code,
2187 bool invert)
2188 {
2189 return (type == test_type && code >= min_code && code <= max_code)
2190 ^ invert;
2191 }
2192
2193 static bool
2194 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2195 {
2196 const struct icmp6hdr *ic;
2197 struct icmp6hdr _icmph;
2198 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2199
2200 /* Must not be a fragment. */
2201 if (par->fragoff != 0)
2202 return false;
2203
2204 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2205 if (ic == NULL) {
2206 /* We've been asked to examine this packet, and we
2207 * can't. Hence, no choice but to drop.
2208 */
2209 duprintf("Dropping evil ICMP tinygram.\n");
2210 *par->hotdrop = true;
2211 return false;
2212 }
2213
2214 return icmp6_type_code_match(icmpinfo->type,
2215 icmpinfo->code[0],
2216 icmpinfo->code[1],
2217 ic->icmp6_type, ic->icmp6_code,
2218 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2219 }
2220
2221 /* Called when user tries to insert an entry of this type. */
2222 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2223 {
2224 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2225
2226 /* Must specify no unknown invflags */
2227 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2228 }
2229
2230 /* The built-in targets: standard (NULL) and error. */
2231 static struct xt_target ip6t_standard_target __read_mostly = {
2232 .name = IP6T_STANDARD_TARGET,
2233 .targetsize = sizeof(int),
2234 .family = NFPROTO_IPV6,
2235 #ifdef CONFIG_COMPAT
2236 .compatsize = sizeof(compat_int_t),
2237 .compat_from_user = compat_standard_from_user,
2238 .compat_to_user = compat_standard_to_user,
2239 #endif
2240 };
2241
2242 static struct xt_target ip6t_error_target __read_mostly = {
2243 .name = IP6T_ERROR_TARGET,
2244 .target = ip6t_error,
2245 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2246 .family = NFPROTO_IPV6,
2247 };
2248
2249 static struct nf_sockopt_ops ip6t_sockopts = {
2250 .pf = PF_INET6,
2251 .set_optmin = IP6T_BASE_CTL,
2252 .set_optmax = IP6T_SO_SET_MAX+1,
2253 .set = do_ip6t_set_ctl,
2254 #ifdef CONFIG_COMPAT
2255 .compat_set = compat_do_ip6t_set_ctl,
2256 #endif
2257 .get_optmin = IP6T_BASE_CTL,
2258 .get_optmax = IP6T_SO_GET_MAX+1,
2259 .get = do_ip6t_get_ctl,
2260 #ifdef CONFIG_COMPAT
2261 .compat_get = compat_do_ip6t_get_ctl,
2262 #endif
2263 .owner = THIS_MODULE,
2264 };
2265
2266 static struct xt_match icmp6_matchstruct __read_mostly = {
2267 .name = "icmp6",
2268 .match = icmp6_match,
2269 .matchsize = sizeof(struct ip6t_icmp),
2270 .checkentry = icmp6_checkentry,
2271 .proto = IPPROTO_ICMPV6,
2272 .family = NFPROTO_IPV6,
2273 };
2274
2275 static int __net_init ip6_tables_net_init(struct net *net)
2276 {
2277 return xt_proto_init(net, NFPROTO_IPV6);
2278 }
2279
2280 static void __net_exit ip6_tables_net_exit(struct net *net)
2281 {
2282 xt_proto_fini(net, NFPROTO_IPV6);
2283 }
2284
2285 static struct pernet_operations ip6_tables_net_ops = {
2286 .init = ip6_tables_net_init,
2287 .exit = ip6_tables_net_exit,
2288 };
2289
2290 static int __init ip6_tables_init(void)
2291 {
2292 int ret;
2293
2294 ret = register_pernet_subsys(&ip6_tables_net_ops);
2295 if (ret < 0)
2296 goto err1;
2297
2298 /* Noone else will be downing sem now, so we won't sleep */
2299 ret = xt_register_target(&ip6t_standard_target);
2300 if (ret < 0)
2301 goto err2;
2302 ret = xt_register_target(&ip6t_error_target);
2303 if (ret < 0)
2304 goto err3;
2305 ret = xt_register_match(&icmp6_matchstruct);
2306 if (ret < 0)
2307 goto err4;
2308
2309 /* Register setsockopt */
2310 ret = nf_register_sockopt(&ip6t_sockopts);
2311 if (ret < 0)
2312 goto err5;
2313
2314 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2315 return 0;
2316
2317 err5:
2318 xt_unregister_match(&icmp6_matchstruct);
2319 err4:
2320 xt_unregister_target(&ip6t_error_target);
2321 err3:
2322 xt_unregister_target(&ip6t_standard_target);
2323 err2:
2324 unregister_pernet_subsys(&ip6_tables_net_ops);
2325 err1:
2326 return ret;
2327 }
2328
2329 static void __exit ip6_tables_fini(void)
2330 {
2331 nf_unregister_sockopt(&ip6t_sockopts);
2332
2333 xt_unregister_match(&icmp6_matchstruct);
2334 xt_unregister_target(&ip6t_error_target);
2335 xt_unregister_target(&ip6t_standard_target);
2336
2337 unregister_pernet_subsys(&ip6_tables_net_ops);
2338 }
2339
2340 /*
2341 * find the offset to specified header or the protocol number of last header
2342 * if target < 0. "last header" is transport protocol header, ESP, or
2343 * "No next header".
2344 *
2345 * If target header is found, its offset is set in *offset and return protocol
2346 * number. Otherwise, return -1.
2347 *
2348 * If the first fragment doesn't contain the final protocol header or
2349 * NEXTHDR_NONE it is considered invalid.
2350 *
2351 * Note that non-1st fragment is special case that "the protocol number
2352 * of last header" is "next header" field in Fragment header. In this case,
2353 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2354 * isn't NULL.
2355 *
2356 */
2357 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2358 int target, unsigned short *fragoff)
2359 {
2360 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2361 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2362 unsigned int len = skb->len - start;
2363
2364 if (fragoff)
2365 *fragoff = 0;
2366
2367 while (nexthdr != target) {
2368 struct ipv6_opt_hdr _hdr, *hp;
2369 unsigned int hdrlen;
2370
2371 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2372 if (target < 0)
2373 break;
2374 return -ENOENT;
2375 }
2376
2377 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2378 if (hp == NULL)
2379 return -EBADMSG;
2380 if (nexthdr == NEXTHDR_FRAGMENT) {
2381 unsigned short _frag_off;
2382 __be16 *fp;
2383 fp = skb_header_pointer(skb,
2384 start+offsetof(struct frag_hdr,
2385 frag_off),
2386 sizeof(_frag_off),
2387 &_frag_off);
2388 if (fp == NULL)
2389 return -EBADMSG;
2390
2391 _frag_off = ntohs(*fp) & ~0x7;
2392 if (_frag_off) {
2393 if (target < 0 &&
2394 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2395 hp->nexthdr == NEXTHDR_NONE)) {
2396 if (fragoff)
2397 *fragoff = _frag_off;
2398 return hp->nexthdr;
2399 }
2400 return -ENOENT;
2401 }
2402 hdrlen = 8;
2403 } else if (nexthdr == NEXTHDR_AUTH)
2404 hdrlen = (hp->hdrlen + 2) << 2;
2405 else
2406 hdrlen = ipv6_optlen(hp);
2407
2408 nexthdr = hp->nexthdr;
2409 len -= hdrlen;
2410 start += hdrlen;
2411 }
2412
2413 *offset = start;
2414 return nexthdr;
2415 }
2416
2417 EXPORT_SYMBOL(ip6t_register_table);
2418 EXPORT_SYMBOL(ip6t_unregister_table);
2419 EXPORT_SYMBOL(ip6t_do_table);
2420 EXPORT_SYMBOL(ip6t_ext_hdr);
2421 EXPORT_SYMBOL(ipv6_find_hdr);
2422
2423 module_init(ip6_tables_init);
2424 module_exit(ip6_tables_fini);
This page took 0.12589 seconds and 5 git commands to generate.