netfilter: Use LOGLEVEL_<FOO> defines
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
45
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
48 #else
49 #define dprintf(format, args...)
50 #endif
51
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
54 #else
55 #define duprintf(format, args...)
56 #endif
57
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
70 void *ip6t_alloc_initial_table(const struct xt_table *info)
71 {
72 return xt_alloc_initial_table(ip6t, IP6T);
73 }
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
75
76 /*
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
82
83 Hence the start of any table is given by get_table() below. */
84
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
88 ip6_packet_match(const struct sk_buff *skb,
89 const char *indev,
90 const char *outdev,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
94 {
95 unsigned long ret;
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
97
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
99
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
105 /*
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 return false;
113 }
114
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
116
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
121 return false;
122 }
123
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
130 return false;
131 }
132
133 /* ... might want to do something with class and flowlabel here ... */
134
135 /* look for the desired protocol header */
136 if((ip6info->flags & IP6T_F_PROTO)) {
137 int protohdr;
138 unsigned short _frag_off;
139
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
141 if (protohdr < 0) {
142 if (_frag_off == 0)
143 *hotdrop = true;
144 return false;
145 }
146 *fragoff = _frag_off;
147
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
149 protohdr,
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
151 ip6info->proto);
152
153 if (ip6info->proto == protohdr) {
154 if(ip6info->invflags & IP6T_INV_PROTO) {
155 return false;
156 }
157 return true;
158 }
159
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
163 return false;
164 }
165 return true;
166 }
167
168 /* should be ip6 safe */
169 static bool
170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
171 {
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
175 return false;
176 }
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
180 return false;
181 }
182 return true;
183 }
184
185 static unsigned int
186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
187 {
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189
190 return NF_DROP;
191 }
192
193 static inline struct ip6t_entry *
194 get_entry(const void *base, unsigned int offset)
195 {
196 return (struct ip6t_entry *)(base + offset);
197 }
198
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
202 {
203 static const struct ip6t_ip6 uncond;
204
205 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
206 }
207
208 static inline const struct xt_entry_target *
209 ip6t_get_target_c(const struct ip6t_entry *e)
210 {
211 return ip6t_get_target((struct ip6t_entry *)e);
212 }
213
214 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
215 /* This cries for unification! */
216 static const char *const hooknames[] = {
217 [NF_INET_PRE_ROUTING] = "PREROUTING",
218 [NF_INET_LOCAL_IN] = "INPUT",
219 [NF_INET_FORWARD] = "FORWARD",
220 [NF_INET_LOCAL_OUT] = "OUTPUT",
221 [NF_INET_POST_ROUTING] = "POSTROUTING",
222 };
223
224 enum nf_ip_trace_comments {
225 NF_IP6_TRACE_COMMENT_RULE,
226 NF_IP6_TRACE_COMMENT_RETURN,
227 NF_IP6_TRACE_COMMENT_POLICY,
228 };
229
230 static const char *const comments[] = {
231 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
232 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
233 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
234 };
235
236 static struct nf_loginfo trace_loginfo = {
237 .type = NF_LOG_TYPE_LOG,
238 .u = {
239 .log = {
240 .level = LOGLEVEL_WARNING,
241 .logflags = NF_LOG_MASK,
242 },
243 },
244 };
245
246 /* Mildly perf critical (only if packet tracing is on) */
247 static inline int
248 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
249 const char *hookname, const char **chainname,
250 const char **comment, unsigned int *rulenum)
251 {
252 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
253
254 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
255 /* Head of user chain: ERROR target with chainname */
256 *chainname = t->target.data;
257 (*rulenum) = 0;
258 } else if (s == e) {
259 (*rulenum)++;
260
261 if (s->target_offset == sizeof(struct ip6t_entry) &&
262 strcmp(t->target.u.kernel.target->name,
263 XT_STANDARD_TARGET) == 0 &&
264 t->verdict < 0 &&
265 unconditional(&s->ipv6)) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
270 }
271 return 1;
272 } else
273 (*rulenum)++;
274
275 return 0;
276 }
277
278 static void trace_packet(const struct sk_buff *skb,
279 unsigned int hook,
280 const struct net_device *in,
281 const struct net_device *out,
282 const char *tablename,
283 const struct xt_table_info *private,
284 const struct ip6t_entry *e)
285 {
286 const void *table_base;
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
291 struct net *net = dev_net(in ? in : out);
292
293 table_base = private->entries[smp_processor_id()];
294 root = get_entry(table_base, private->hook_entry[hook]);
295
296 hookname = chainname = hooknames[hook];
297 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
298
299 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
300 if (get_chainname_rulenum(iter, e, hookname,
301 &chainname, &comment, &rulenum) != 0)
302 break;
303
304 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
305 "TRACE: %s:%s:%s:%u ",
306 tablename, chainname, comment, rulenum);
307 }
308 #endif
309
310 static inline __pure struct ip6t_entry *
311 ip6t_next_entry(const struct ip6t_entry *entry)
312 {
313 return (void *)entry + entry->next_offset;
314 }
315
316 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
317 unsigned int
318 ip6t_do_table(struct sk_buff *skb,
319 unsigned int hook,
320 const struct net_device *in,
321 const struct net_device *out,
322 struct xt_table *table)
323 {
324 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
325 /* Initializing verdict to NF_DROP keeps gcc happy. */
326 unsigned int verdict = NF_DROP;
327 const char *indev, *outdev;
328 const void *table_base;
329 struct ip6t_entry *e, **jumpstack;
330 unsigned int *stackptr, origptr, cpu;
331 const struct xt_table_info *private;
332 struct xt_action_param acpar;
333 unsigned int addend;
334
335 /* Initialization */
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
343 * match it. */
344 acpar.hotdrop = false;
345 acpar.in = in;
346 acpar.out = out;
347 acpar.family = NFPROTO_IPV6;
348 acpar.hooknum = hook;
349
350 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
351
352 local_bh_disable();
353 addend = xt_write_recseq_begin();
354 private = table->private;
355 /*
356 * Ensure we load private-> members after we've fetched the base
357 * pointer.
358 */
359 smp_read_barrier_depends();
360 cpu = smp_processor_id();
361 table_base = private->entries[cpu];
362 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
363 stackptr = per_cpu_ptr(private->stackptr, cpu);
364 origptr = *stackptr;
365
366 e = get_entry(table_base, private->hook_entry[hook]);
367
368 do {
369 const struct xt_entry_target *t;
370 const struct xt_entry_match *ematch;
371
372 IP_NF_ASSERT(e);
373 acpar.thoff = 0;
374 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
375 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
376 no_match:
377 e = ip6t_next_entry(e);
378 continue;
379 }
380
381 xt_ematch_foreach(ematch, e) {
382 acpar.match = ematch->u.kernel.match;
383 acpar.matchinfo = ematch->data;
384 if (!acpar.match->match(skb, &acpar))
385 goto no_match;
386 }
387
388 ADD_COUNTER(e->counters, skb->len, 1);
389
390 t = ip6t_get_target_c(e);
391 IP_NF_ASSERT(t->u.kernel.target);
392
393 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
394 /* The packet is traced: log it */
395 if (unlikely(skb->nf_trace))
396 trace_packet(skb, hook, in, out,
397 table->name, private, e);
398 #endif
399 /* Standard target? */
400 if (!t->u.kernel.target->target) {
401 int v;
402
403 v = ((struct xt_standard_target *)t)->verdict;
404 if (v < 0) {
405 /* Pop from stack? */
406 if (v != XT_RETURN) {
407 verdict = (unsigned int)(-v) - 1;
408 break;
409 }
410 if (*stackptr <= origptr)
411 e = get_entry(table_base,
412 private->underflow[hook]);
413 else
414 e = ip6t_next_entry(jumpstack[--*stackptr]);
415 continue;
416 }
417 if (table_base + v != ip6t_next_entry(e) &&
418 !(e->ipv6.flags & IP6T_F_GOTO)) {
419 if (*stackptr >= private->stacksize) {
420 verdict = NF_DROP;
421 break;
422 }
423 jumpstack[(*stackptr)++] = e;
424 }
425
426 e = get_entry(table_base, v);
427 continue;
428 }
429
430 acpar.target = t->u.kernel.target;
431 acpar.targinfo = t->data;
432
433 verdict = t->u.kernel.target->target(skb, &acpar);
434 if (verdict == XT_CONTINUE)
435 e = ip6t_next_entry(e);
436 else
437 /* Verdict */
438 break;
439 } while (!acpar.hotdrop);
440
441 *stackptr = origptr;
442
443 xt_write_recseq_end(addend);
444 local_bh_enable();
445
446 #ifdef DEBUG_ALLOW_ALL
447 return NF_ACCEPT;
448 #else
449 if (acpar.hotdrop)
450 return NF_DROP;
451 else return verdict;
452 #endif
453 }
454
455 /* Figures out from what hook each rule can be called: returns 0 if
456 there are loops. Puts hook bitmask in comefrom. */
457 static int
458 mark_source_chains(const struct xt_table_info *newinfo,
459 unsigned int valid_hooks, void *entry0)
460 {
461 unsigned int hook;
462
463 /* No recursion; use packet counter to save back ptrs (reset
464 to 0 as we leave), and comefrom to save source hook bitmask */
465 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
466 unsigned int pos = newinfo->hook_entry[hook];
467 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
468
469 if (!(valid_hooks & (1 << hook)))
470 continue;
471
472 /* Set initial back pointer. */
473 e->counters.pcnt = pos;
474
475 for (;;) {
476 const struct xt_standard_target *t
477 = (void *)ip6t_get_target_c(e);
478 int visited = e->comefrom & (1 << hook);
479
480 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
481 pr_err("iptables: loop hook %u pos %u %08X.\n",
482 hook, pos, e->comefrom);
483 return 0;
484 }
485 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
486
487 /* Unconditional return/END. */
488 if ((e->target_offset == sizeof(struct ip6t_entry) &&
489 (strcmp(t->target.u.user.name,
490 XT_STANDARD_TARGET) == 0) &&
491 t->verdict < 0 &&
492 unconditional(&e->ipv6)) || visited) {
493 unsigned int oldpos, size;
494
495 if ((strcmp(t->target.u.user.name,
496 XT_STANDARD_TARGET) == 0) &&
497 t->verdict < -NF_MAX_VERDICT - 1) {
498 duprintf("mark_source_chains: bad "
499 "negative verdict (%i)\n",
500 t->verdict);
501 return 0;
502 }
503
504 /* Return: backtrack through the last
505 big jump. */
506 do {
507 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
508 #ifdef DEBUG_IP_FIREWALL_USER
509 if (e->comefrom
510 & (1 << NF_INET_NUMHOOKS)) {
511 duprintf("Back unset "
512 "on hook %u "
513 "rule %u\n",
514 hook, pos);
515 }
516 #endif
517 oldpos = pos;
518 pos = e->counters.pcnt;
519 e->counters.pcnt = 0;
520
521 /* We're at the start. */
522 if (pos == oldpos)
523 goto next;
524
525 e = (struct ip6t_entry *)
526 (entry0 + pos);
527 } while (oldpos == pos + e->next_offset);
528
529 /* Move along one */
530 size = e->next_offset;
531 e = (struct ip6t_entry *)
532 (entry0 + pos + size);
533 e->counters.pcnt = pos;
534 pos += size;
535 } else {
536 int newpos = t->verdict;
537
538 if (strcmp(t->target.u.user.name,
539 XT_STANDARD_TARGET) == 0 &&
540 newpos >= 0) {
541 if (newpos > newinfo->size -
542 sizeof(struct ip6t_entry)) {
543 duprintf("mark_source_chains: "
544 "bad verdict (%i)\n",
545 newpos);
546 return 0;
547 }
548 /* This a jump; chase it. */
549 duprintf("Jump rule %u -> %u\n",
550 pos, newpos);
551 } else {
552 /* ... this is a fallthru */
553 newpos = pos + e->next_offset;
554 }
555 e = (struct ip6t_entry *)
556 (entry0 + newpos);
557 e->counters.pcnt = pos;
558 pos = newpos;
559 }
560 }
561 next:
562 duprintf("Finished chain %u\n", hook);
563 }
564 return 1;
565 }
566
567 static void cleanup_match(struct xt_entry_match *m, struct net *net)
568 {
569 struct xt_mtdtor_param par;
570
571 par.net = net;
572 par.match = m->u.kernel.match;
573 par.matchinfo = m->data;
574 par.family = NFPROTO_IPV6;
575 if (par.match->destroy != NULL)
576 par.match->destroy(&par);
577 module_put(par.match->me);
578 }
579
580 static int
581 check_entry(const struct ip6t_entry *e, const char *name)
582 {
583 const struct xt_entry_target *t;
584
585 if (!ip6_checkentry(&e->ipv6)) {
586 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
587 return -EINVAL;
588 }
589
590 if (e->target_offset + sizeof(struct xt_entry_target) >
591 e->next_offset)
592 return -EINVAL;
593
594 t = ip6t_get_target_c(e);
595 if (e->target_offset + t->u.target_size > e->next_offset)
596 return -EINVAL;
597
598 return 0;
599 }
600
601 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
602 {
603 const struct ip6t_ip6 *ipv6 = par->entryinfo;
604 int ret;
605
606 par->match = m->u.kernel.match;
607 par->matchinfo = m->data;
608
609 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
610 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
611 if (ret < 0) {
612 duprintf("ip_tables: check failed for `%s'.\n",
613 par.match->name);
614 return ret;
615 }
616 return 0;
617 }
618
619 static int
620 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
621 {
622 struct xt_match *match;
623 int ret;
624
625 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
626 m->u.user.revision);
627 if (IS_ERR(match)) {
628 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
629 return PTR_ERR(match);
630 }
631 m->u.kernel.match = match;
632
633 ret = check_match(m, par);
634 if (ret)
635 goto err;
636
637 return 0;
638 err:
639 module_put(m->u.kernel.match->me);
640 return ret;
641 }
642
643 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
644 {
645 struct xt_entry_target *t = ip6t_get_target(e);
646 struct xt_tgchk_param par = {
647 .net = net,
648 .table = name,
649 .entryinfo = e,
650 .target = t->u.kernel.target,
651 .targinfo = t->data,
652 .hook_mask = e->comefrom,
653 .family = NFPROTO_IPV6,
654 };
655 int ret;
656
657 t = ip6t_get_target(e);
658 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
659 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
660 if (ret < 0) {
661 duprintf("ip_tables: check failed for `%s'.\n",
662 t->u.kernel.target->name);
663 return ret;
664 }
665 return 0;
666 }
667
668 static int
669 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
670 unsigned int size)
671 {
672 struct xt_entry_target *t;
673 struct xt_target *target;
674 int ret;
675 unsigned int j;
676 struct xt_mtchk_param mtpar;
677 struct xt_entry_match *ematch;
678
679 ret = check_entry(e, name);
680 if (ret)
681 return ret;
682
683 j = 0;
684 mtpar.net = net;
685 mtpar.table = name;
686 mtpar.entryinfo = &e->ipv6;
687 mtpar.hook_mask = e->comefrom;
688 mtpar.family = NFPROTO_IPV6;
689 xt_ematch_foreach(ematch, e) {
690 ret = find_check_match(ematch, &mtpar);
691 if (ret != 0)
692 goto cleanup_matches;
693 ++j;
694 }
695
696 t = ip6t_get_target(e);
697 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
698 t->u.user.revision);
699 if (IS_ERR(target)) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = PTR_ERR(target);
702 goto cleanup_matches;
703 }
704 t->u.kernel.target = target;
705
706 ret = check_target(e, net, name);
707 if (ret)
708 goto err;
709 return 0;
710 err:
711 module_put(t->u.kernel.target->me);
712 cleanup_matches:
713 xt_ematch_foreach(ematch, e) {
714 if (j-- == 0)
715 break;
716 cleanup_match(ematch, net);
717 }
718 return ret;
719 }
720
721 static bool check_underflow(const struct ip6t_entry *e)
722 {
723 const struct xt_entry_target *t;
724 unsigned int verdict;
725
726 if (!unconditional(&e->ipv6))
727 return false;
728 t = ip6t_get_target_c(e);
729 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
730 return false;
731 verdict = ((struct xt_standard_target *)t)->verdict;
732 verdict = -verdict - 1;
733 return verdict == NF_DROP || verdict == NF_ACCEPT;
734 }
735
736 static int
737 check_entry_size_and_hooks(struct ip6t_entry *e,
738 struct xt_table_info *newinfo,
739 const unsigned char *base,
740 const unsigned char *limit,
741 const unsigned int *hook_entries,
742 const unsigned int *underflows,
743 unsigned int valid_hooks)
744 {
745 unsigned int h;
746
747 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
748 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
749 duprintf("Bad offset %p\n", e);
750 return -EINVAL;
751 }
752
753 if (e->next_offset
754 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
755 duprintf("checking: element %p size %u\n",
756 e, e->next_offset);
757 return -EINVAL;
758 }
759
760 /* Check hooks & underflows */
761 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
762 if (!(valid_hooks & (1 << h)))
763 continue;
764 if ((unsigned char *)e - base == hook_entries[h])
765 newinfo->hook_entry[h] = hook_entries[h];
766 if ((unsigned char *)e - base == underflows[h]) {
767 if (!check_underflow(e)) {
768 pr_err("Underflows must be unconditional and "
769 "use the STANDARD target with "
770 "ACCEPT/DROP\n");
771 return -EINVAL;
772 }
773 newinfo->underflow[h] = underflows[h];
774 }
775 }
776
777 /* Clear counters and comefrom */
778 e->counters = ((struct xt_counters) { 0, 0 });
779 e->comefrom = 0;
780 return 0;
781 }
782
783 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
784 {
785 struct xt_tgdtor_param par;
786 struct xt_entry_target *t;
787 struct xt_entry_match *ematch;
788
789 /* Cleanup all matches */
790 xt_ematch_foreach(ematch, e)
791 cleanup_match(ematch, net);
792 t = ip6t_get_target(e);
793
794 par.net = net;
795 par.target = t->u.kernel.target;
796 par.targinfo = t->data;
797 par.family = NFPROTO_IPV6;
798 if (par.target->destroy != NULL)
799 par.target->destroy(&par);
800 module_put(par.target->me);
801 }
802
803 /* Checks and translates the user-supplied table segment (held in
804 newinfo) */
805 static int
806 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
807 const struct ip6t_replace *repl)
808 {
809 struct ip6t_entry *iter;
810 unsigned int i;
811 int ret = 0;
812
813 newinfo->size = repl->size;
814 newinfo->number = repl->num_entries;
815
816 /* Init all hooks to impossible value. */
817 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
818 newinfo->hook_entry[i] = 0xFFFFFFFF;
819 newinfo->underflow[i] = 0xFFFFFFFF;
820 }
821
822 duprintf("translate_table: size %u\n", newinfo->size);
823 i = 0;
824 /* Walk through entries, checking offsets. */
825 xt_entry_foreach(iter, entry0, newinfo->size) {
826 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
827 entry0 + repl->size,
828 repl->hook_entry,
829 repl->underflow,
830 repl->valid_hooks);
831 if (ret != 0)
832 return ret;
833 ++i;
834 if (strcmp(ip6t_get_target(iter)->u.user.name,
835 XT_ERROR_TARGET) == 0)
836 ++newinfo->stacksize;
837 }
838
839 if (i != repl->num_entries) {
840 duprintf("translate_table: %u not %u entries\n",
841 i, repl->num_entries);
842 return -EINVAL;
843 }
844
845 /* Check hooks all assigned */
846 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
847 /* Only hooks which are valid */
848 if (!(repl->valid_hooks & (1 << i)))
849 continue;
850 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
851 duprintf("Invalid hook entry %u %u\n",
852 i, repl->hook_entry[i]);
853 return -EINVAL;
854 }
855 if (newinfo->underflow[i] == 0xFFFFFFFF) {
856 duprintf("Invalid underflow %u %u\n",
857 i, repl->underflow[i]);
858 return -EINVAL;
859 }
860 }
861
862 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
863 return -ELOOP;
864
865 /* Finally, each sanity check must pass */
866 i = 0;
867 xt_entry_foreach(iter, entry0, newinfo->size) {
868 ret = find_check_entry(iter, net, repl->name, repl->size);
869 if (ret != 0)
870 break;
871 ++i;
872 }
873
874 if (ret != 0) {
875 xt_entry_foreach(iter, entry0, newinfo->size) {
876 if (i-- == 0)
877 break;
878 cleanup_entry(iter, net);
879 }
880 return ret;
881 }
882
883 /* And one copy for every other CPU */
884 for_each_possible_cpu(i) {
885 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
886 memcpy(newinfo->entries[i], entry0, newinfo->size);
887 }
888
889 return ret;
890 }
891
892 static void
893 get_counters(const struct xt_table_info *t,
894 struct xt_counters counters[])
895 {
896 struct ip6t_entry *iter;
897 unsigned int cpu;
898 unsigned int i;
899
900 for_each_possible_cpu(cpu) {
901 seqcount_t *s = &per_cpu(xt_recseq, cpu);
902
903 i = 0;
904 xt_entry_foreach(iter, t->entries[cpu], t->size) {
905 u64 bcnt, pcnt;
906 unsigned int start;
907
908 do {
909 start = read_seqcount_begin(s);
910 bcnt = iter->counters.bcnt;
911 pcnt = iter->counters.pcnt;
912 } while (read_seqcount_retry(s, start));
913
914 ADD_COUNTER(counters[i], bcnt, pcnt);
915 ++i;
916 }
917 }
918 }
919
920 static struct xt_counters *alloc_counters(const struct xt_table *table)
921 {
922 unsigned int countersize;
923 struct xt_counters *counters;
924 const struct xt_table_info *private = table->private;
925
926 /* We need atomic snapshot of counters: rest doesn't change
927 (other than comefrom, which userspace doesn't care
928 about). */
929 countersize = sizeof(struct xt_counters) * private->number;
930 counters = vzalloc(countersize);
931
932 if (counters == NULL)
933 return ERR_PTR(-ENOMEM);
934
935 get_counters(private, counters);
936
937 return counters;
938 }
939
940 static int
941 copy_entries_to_user(unsigned int total_size,
942 const struct xt_table *table,
943 void __user *userptr)
944 {
945 unsigned int off, num;
946 const struct ip6t_entry *e;
947 struct xt_counters *counters;
948 const struct xt_table_info *private = table->private;
949 int ret = 0;
950 const void *loc_cpu_entry;
951
952 counters = alloc_counters(table);
953 if (IS_ERR(counters))
954 return PTR_ERR(counters);
955
956 /* choose the copy that is on our node/cpu, ...
957 * This choice is lazy (because current thread is
958 * allowed to migrate to another cpu)
959 */
960 loc_cpu_entry = private->entries[raw_smp_processor_id()];
961 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
962 ret = -EFAULT;
963 goto free_counters;
964 }
965
966 /* FIXME: use iterator macros --RR */
967 /* ... then go back and fix counters and names */
968 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
969 unsigned int i;
970 const struct xt_entry_match *m;
971 const struct xt_entry_target *t;
972
973 e = (struct ip6t_entry *)(loc_cpu_entry + off);
974 if (copy_to_user(userptr + off
975 + offsetof(struct ip6t_entry, counters),
976 &counters[num],
977 sizeof(counters[num])) != 0) {
978 ret = -EFAULT;
979 goto free_counters;
980 }
981
982 for (i = sizeof(struct ip6t_entry);
983 i < e->target_offset;
984 i += m->u.match_size) {
985 m = (void *)e + i;
986
987 if (copy_to_user(userptr + off + i
988 + offsetof(struct xt_entry_match,
989 u.user.name),
990 m->u.kernel.match->name,
991 strlen(m->u.kernel.match->name)+1)
992 != 0) {
993 ret = -EFAULT;
994 goto free_counters;
995 }
996 }
997
998 t = ip6t_get_target_c(e);
999 if (copy_to_user(userptr + off + e->target_offset
1000 + offsetof(struct xt_entry_target,
1001 u.user.name),
1002 t->u.kernel.target->name,
1003 strlen(t->u.kernel.target->name)+1) != 0) {
1004 ret = -EFAULT;
1005 goto free_counters;
1006 }
1007 }
1008
1009 free_counters:
1010 vfree(counters);
1011 return ret;
1012 }
1013
1014 #ifdef CONFIG_COMPAT
1015 static void compat_standard_from_user(void *dst, const void *src)
1016 {
1017 int v = *(compat_int_t *)src;
1018
1019 if (v > 0)
1020 v += xt_compat_calc_jump(AF_INET6, v);
1021 memcpy(dst, &v, sizeof(v));
1022 }
1023
1024 static int compat_standard_to_user(void __user *dst, const void *src)
1025 {
1026 compat_int_t cv = *(int *)src;
1027
1028 if (cv > 0)
1029 cv -= xt_compat_calc_jump(AF_INET6, cv);
1030 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1031 }
1032
1033 static int compat_calc_entry(const struct ip6t_entry *e,
1034 const struct xt_table_info *info,
1035 const void *base, struct xt_table_info *newinfo)
1036 {
1037 const struct xt_entry_match *ematch;
1038 const struct xt_entry_target *t;
1039 unsigned int entry_offset;
1040 int off, i, ret;
1041
1042 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1043 entry_offset = (void *)e - base;
1044 xt_ematch_foreach(ematch, e)
1045 off += xt_compat_match_offset(ematch->u.kernel.match);
1046 t = ip6t_get_target_c(e);
1047 off += xt_compat_target_offset(t->u.kernel.target);
1048 newinfo->size -= off;
1049 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1050 if (ret)
1051 return ret;
1052
1053 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1054 if (info->hook_entry[i] &&
1055 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1056 newinfo->hook_entry[i] -= off;
1057 if (info->underflow[i] &&
1058 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1059 newinfo->underflow[i] -= off;
1060 }
1061 return 0;
1062 }
1063
1064 static int compat_table_info(const struct xt_table_info *info,
1065 struct xt_table_info *newinfo)
1066 {
1067 struct ip6t_entry *iter;
1068 void *loc_cpu_entry;
1069 int ret;
1070
1071 if (!newinfo || !info)
1072 return -EINVAL;
1073
1074 /* we dont care about newinfo->entries[] */
1075 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1076 newinfo->initial_entries = 0;
1077 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1078 xt_compat_init_offsets(AF_INET6, info->number);
1079 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1080 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1081 if (ret != 0)
1082 return ret;
1083 }
1084 return 0;
1085 }
1086 #endif
1087
1088 static int get_info(struct net *net, void __user *user,
1089 const int *len, int compat)
1090 {
1091 char name[XT_TABLE_MAXNAMELEN];
1092 struct xt_table *t;
1093 int ret;
1094
1095 if (*len != sizeof(struct ip6t_getinfo)) {
1096 duprintf("length %u != %zu\n", *len,
1097 sizeof(struct ip6t_getinfo));
1098 return -EINVAL;
1099 }
1100
1101 if (copy_from_user(name, user, sizeof(name)) != 0)
1102 return -EFAULT;
1103
1104 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1105 #ifdef CONFIG_COMPAT
1106 if (compat)
1107 xt_compat_lock(AF_INET6);
1108 #endif
1109 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1110 "ip6table_%s", name);
1111 if (!IS_ERR_OR_NULL(t)) {
1112 struct ip6t_getinfo info;
1113 const struct xt_table_info *private = t->private;
1114 #ifdef CONFIG_COMPAT
1115 struct xt_table_info tmp;
1116
1117 if (compat) {
1118 ret = compat_table_info(private, &tmp);
1119 xt_compat_flush_offsets(AF_INET6);
1120 private = &tmp;
1121 }
1122 #endif
1123 memset(&info, 0, sizeof(info));
1124 info.valid_hooks = t->valid_hooks;
1125 memcpy(info.hook_entry, private->hook_entry,
1126 sizeof(info.hook_entry));
1127 memcpy(info.underflow, private->underflow,
1128 sizeof(info.underflow));
1129 info.num_entries = private->number;
1130 info.size = private->size;
1131 strcpy(info.name, name);
1132
1133 if (copy_to_user(user, &info, *len) != 0)
1134 ret = -EFAULT;
1135 else
1136 ret = 0;
1137
1138 xt_table_unlock(t);
1139 module_put(t->me);
1140 } else
1141 ret = t ? PTR_ERR(t) : -ENOENT;
1142 #ifdef CONFIG_COMPAT
1143 if (compat)
1144 xt_compat_unlock(AF_INET6);
1145 #endif
1146 return ret;
1147 }
1148
1149 static int
1150 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1151 const int *len)
1152 {
1153 int ret;
1154 struct ip6t_get_entries get;
1155 struct xt_table *t;
1156
1157 if (*len < sizeof(get)) {
1158 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1159 return -EINVAL;
1160 }
1161 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1162 return -EFAULT;
1163 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1164 duprintf("get_entries: %u != %zu\n",
1165 *len, sizeof(get) + get.size);
1166 return -EINVAL;
1167 }
1168
1169 t = xt_find_table_lock(net, AF_INET6, get.name);
1170 if (!IS_ERR_OR_NULL(t)) {
1171 struct xt_table_info *private = t->private;
1172 duprintf("t->private->number = %u\n", private->number);
1173 if (get.size == private->size)
1174 ret = copy_entries_to_user(private->size,
1175 t, uptr->entrytable);
1176 else {
1177 duprintf("get_entries: I've got %u not %u!\n",
1178 private->size, get.size);
1179 ret = -EAGAIN;
1180 }
1181 module_put(t->me);
1182 xt_table_unlock(t);
1183 } else
1184 ret = t ? PTR_ERR(t) : -ENOENT;
1185
1186 return ret;
1187 }
1188
1189 static int
1190 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1191 struct xt_table_info *newinfo, unsigned int num_counters,
1192 void __user *counters_ptr)
1193 {
1194 int ret;
1195 struct xt_table *t;
1196 struct xt_table_info *oldinfo;
1197 struct xt_counters *counters;
1198 const void *loc_cpu_old_entry;
1199 struct ip6t_entry *iter;
1200
1201 ret = 0;
1202 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1203 if (!counters) {
1204 ret = -ENOMEM;
1205 goto out;
1206 }
1207
1208 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1209 "ip6table_%s", name);
1210 if (IS_ERR_OR_NULL(t)) {
1211 ret = t ? PTR_ERR(t) : -ENOENT;
1212 goto free_newinfo_counters_untrans;
1213 }
1214
1215 /* You lied! */
1216 if (valid_hooks != t->valid_hooks) {
1217 duprintf("Valid hook crap: %08X vs %08X\n",
1218 valid_hooks, t->valid_hooks);
1219 ret = -EINVAL;
1220 goto put_module;
1221 }
1222
1223 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1224 if (!oldinfo)
1225 goto put_module;
1226
1227 /* Update module usage count based on number of rules */
1228 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1229 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1230 if ((oldinfo->number > oldinfo->initial_entries) ||
1231 (newinfo->number <= oldinfo->initial_entries))
1232 module_put(t->me);
1233 if ((oldinfo->number > oldinfo->initial_entries) &&
1234 (newinfo->number <= oldinfo->initial_entries))
1235 module_put(t->me);
1236
1237 /* Get the old counters, and synchronize with replace */
1238 get_counters(oldinfo, counters);
1239
1240 /* Decrease module usage counts and free resource */
1241 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1242 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1243 cleanup_entry(iter, net);
1244
1245 xt_free_table_info(oldinfo);
1246 if (copy_to_user(counters_ptr, counters,
1247 sizeof(struct xt_counters) * num_counters) != 0) {
1248 /* Silent error, can't fail, new table is already in place */
1249 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1250 }
1251 vfree(counters);
1252 xt_table_unlock(t);
1253 return ret;
1254
1255 put_module:
1256 module_put(t->me);
1257 xt_table_unlock(t);
1258 free_newinfo_counters_untrans:
1259 vfree(counters);
1260 out:
1261 return ret;
1262 }
1263
1264 static int
1265 do_replace(struct net *net, const void __user *user, unsigned int len)
1266 {
1267 int ret;
1268 struct ip6t_replace tmp;
1269 struct xt_table_info *newinfo;
1270 void *loc_cpu_entry;
1271 struct ip6t_entry *iter;
1272
1273 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1274 return -EFAULT;
1275
1276 /* overflow check */
1277 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1278 return -ENOMEM;
1279 tmp.name[sizeof(tmp.name)-1] = 0;
1280
1281 newinfo = xt_alloc_table_info(tmp.size);
1282 if (!newinfo)
1283 return -ENOMEM;
1284
1285 /* choose the copy that is on our node/cpu */
1286 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1287 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1288 tmp.size) != 0) {
1289 ret = -EFAULT;
1290 goto free_newinfo;
1291 }
1292
1293 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1294 if (ret != 0)
1295 goto free_newinfo;
1296
1297 duprintf("ip_tables: Translated table\n");
1298
1299 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1300 tmp.num_counters, tmp.counters);
1301 if (ret)
1302 goto free_newinfo_untrans;
1303 return 0;
1304
1305 free_newinfo_untrans:
1306 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1307 cleanup_entry(iter, net);
1308 free_newinfo:
1309 xt_free_table_info(newinfo);
1310 return ret;
1311 }
1312
1313 static int
1314 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1315 int compat)
1316 {
1317 unsigned int i, curcpu;
1318 struct xt_counters_info tmp;
1319 struct xt_counters *paddc;
1320 unsigned int num_counters;
1321 char *name;
1322 int size;
1323 void *ptmp;
1324 struct xt_table *t;
1325 const struct xt_table_info *private;
1326 int ret = 0;
1327 const void *loc_cpu_entry;
1328 struct ip6t_entry *iter;
1329 unsigned int addend;
1330 #ifdef CONFIG_COMPAT
1331 struct compat_xt_counters_info compat_tmp;
1332
1333 if (compat) {
1334 ptmp = &compat_tmp;
1335 size = sizeof(struct compat_xt_counters_info);
1336 } else
1337 #endif
1338 {
1339 ptmp = &tmp;
1340 size = sizeof(struct xt_counters_info);
1341 }
1342
1343 if (copy_from_user(ptmp, user, size) != 0)
1344 return -EFAULT;
1345
1346 #ifdef CONFIG_COMPAT
1347 if (compat) {
1348 num_counters = compat_tmp.num_counters;
1349 name = compat_tmp.name;
1350 } else
1351 #endif
1352 {
1353 num_counters = tmp.num_counters;
1354 name = tmp.name;
1355 }
1356
1357 if (len != size + num_counters * sizeof(struct xt_counters))
1358 return -EINVAL;
1359
1360 paddc = vmalloc(len - size);
1361 if (!paddc)
1362 return -ENOMEM;
1363
1364 if (copy_from_user(paddc, user + size, len - size) != 0) {
1365 ret = -EFAULT;
1366 goto free;
1367 }
1368
1369 t = xt_find_table_lock(net, AF_INET6, name);
1370 if (IS_ERR_OR_NULL(t)) {
1371 ret = t ? PTR_ERR(t) : -ENOENT;
1372 goto free;
1373 }
1374
1375
1376 local_bh_disable();
1377 private = t->private;
1378 if (private->number != num_counters) {
1379 ret = -EINVAL;
1380 goto unlock_up_free;
1381 }
1382
1383 i = 0;
1384 /* Choose the copy that is on our node */
1385 curcpu = smp_processor_id();
1386 addend = xt_write_recseq_begin();
1387 loc_cpu_entry = private->entries[curcpu];
1388 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1389 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1390 ++i;
1391 }
1392 xt_write_recseq_end(addend);
1393
1394 unlock_up_free:
1395 local_bh_enable();
1396 xt_table_unlock(t);
1397 module_put(t->me);
1398 free:
1399 vfree(paddc);
1400
1401 return ret;
1402 }
1403
1404 #ifdef CONFIG_COMPAT
1405 struct compat_ip6t_replace {
1406 char name[XT_TABLE_MAXNAMELEN];
1407 u32 valid_hooks;
1408 u32 num_entries;
1409 u32 size;
1410 u32 hook_entry[NF_INET_NUMHOOKS];
1411 u32 underflow[NF_INET_NUMHOOKS];
1412 u32 num_counters;
1413 compat_uptr_t counters; /* struct xt_counters * */
1414 struct compat_ip6t_entry entries[0];
1415 };
1416
1417 static int
1418 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1419 unsigned int *size, struct xt_counters *counters,
1420 unsigned int i)
1421 {
1422 struct xt_entry_target *t;
1423 struct compat_ip6t_entry __user *ce;
1424 u_int16_t target_offset, next_offset;
1425 compat_uint_t origsize;
1426 const struct xt_entry_match *ematch;
1427 int ret = 0;
1428
1429 origsize = *size;
1430 ce = (struct compat_ip6t_entry __user *)*dstptr;
1431 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1432 copy_to_user(&ce->counters, &counters[i],
1433 sizeof(counters[i])) != 0)
1434 return -EFAULT;
1435
1436 *dstptr += sizeof(struct compat_ip6t_entry);
1437 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1438
1439 xt_ematch_foreach(ematch, e) {
1440 ret = xt_compat_match_to_user(ematch, dstptr, size);
1441 if (ret != 0)
1442 return ret;
1443 }
1444 target_offset = e->target_offset - (origsize - *size);
1445 t = ip6t_get_target(e);
1446 ret = xt_compat_target_to_user(t, dstptr, size);
1447 if (ret)
1448 return ret;
1449 next_offset = e->next_offset - (origsize - *size);
1450 if (put_user(target_offset, &ce->target_offset) != 0 ||
1451 put_user(next_offset, &ce->next_offset) != 0)
1452 return -EFAULT;
1453 return 0;
1454 }
1455
1456 static int
1457 compat_find_calc_match(struct xt_entry_match *m,
1458 const char *name,
1459 const struct ip6t_ip6 *ipv6,
1460 unsigned int hookmask,
1461 int *size)
1462 {
1463 struct xt_match *match;
1464
1465 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1466 m->u.user.revision);
1467 if (IS_ERR(match)) {
1468 duprintf("compat_check_calc_match: `%s' not found\n",
1469 m->u.user.name);
1470 return PTR_ERR(match);
1471 }
1472 m->u.kernel.match = match;
1473 *size += xt_compat_match_offset(match);
1474 return 0;
1475 }
1476
1477 static void compat_release_entry(struct compat_ip6t_entry *e)
1478 {
1479 struct xt_entry_target *t;
1480 struct xt_entry_match *ematch;
1481
1482 /* Cleanup all matches */
1483 xt_ematch_foreach(ematch, e)
1484 module_put(ematch->u.kernel.match->me);
1485 t = compat_ip6t_get_target(e);
1486 module_put(t->u.kernel.target->me);
1487 }
1488
1489 static int
1490 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1491 struct xt_table_info *newinfo,
1492 unsigned int *size,
1493 const unsigned char *base,
1494 const unsigned char *limit,
1495 const unsigned int *hook_entries,
1496 const unsigned int *underflows,
1497 const char *name)
1498 {
1499 struct xt_entry_match *ematch;
1500 struct xt_entry_target *t;
1501 struct xt_target *target;
1502 unsigned int entry_offset;
1503 unsigned int j;
1504 int ret, off, h;
1505
1506 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1507 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1508 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1509 duprintf("Bad offset %p, limit = %p\n", e, limit);
1510 return -EINVAL;
1511 }
1512
1513 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1514 sizeof(struct compat_xt_entry_target)) {
1515 duprintf("checking: element %p size %u\n",
1516 e, e->next_offset);
1517 return -EINVAL;
1518 }
1519
1520 /* For purposes of check_entry casting the compat entry is fine */
1521 ret = check_entry((struct ip6t_entry *)e, name);
1522 if (ret)
1523 return ret;
1524
1525 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1526 entry_offset = (void *)e - (void *)base;
1527 j = 0;
1528 xt_ematch_foreach(ematch, e) {
1529 ret = compat_find_calc_match(ematch, name,
1530 &e->ipv6, e->comefrom, &off);
1531 if (ret != 0)
1532 goto release_matches;
1533 ++j;
1534 }
1535
1536 t = compat_ip6t_get_target(e);
1537 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1538 t->u.user.revision);
1539 if (IS_ERR(target)) {
1540 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1541 t->u.user.name);
1542 ret = PTR_ERR(target);
1543 goto release_matches;
1544 }
1545 t->u.kernel.target = target;
1546
1547 off += xt_compat_target_offset(target);
1548 *size += off;
1549 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1550 if (ret)
1551 goto out;
1552
1553 /* Check hooks & underflows */
1554 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1555 if ((unsigned char *)e - base == hook_entries[h])
1556 newinfo->hook_entry[h] = hook_entries[h];
1557 if ((unsigned char *)e - base == underflows[h])
1558 newinfo->underflow[h] = underflows[h];
1559 }
1560
1561 /* Clear counters and comefrom */
1562 memset(&e->counters, 0, sizeof(e->counters));
1563 e->comefrom = 0;
1564 return 0;
1565
1566 out:
1567 module_put(t->u.kernel.target->me);
1568 release_matches:
1569 xt_ematch_foreach(ematch, e) {
1570 if (j-- == 0)
1571 break;
1572 module_put(ematch->u.kernel.match->me);
1573 }
1574 return ret;
1575 }
1576
1577 static int
1578 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1579 unsigned int *size, const char *name,
1580 struct xt_table_info *newinfo, unsigned char *base)
1581 {
1582 struct xt_entry_target *t;
1583 struct ip6t_entry *de;
1584 unsigned int origsize;
1585 int ret, h;
1586 struct xt_entry_match *ematch;
1587
1588 ret = 0;
1589 origsize = *size;
1590 de = (struct ip6t_entry *)*dstptr;
1591 memcpy(de, e, sizeof(struct ip6t_entry));
1592 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1593
1594 *dstptr += sizeof(struct ip6t_entry);
1595 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1596
1597 xt_ematch_foreach(ematch, e) {
1598 ret = xt_compat_match_from_user(ematch, dstptr, size);
1599 if (ret != 0)
1600 return ret;
1601 }
1602 de->target_offset = e->target_offset - (origsize - *size);
1603 t = compat_ip6t_get_target(e);
1604 xt_compat_target_from_user(t, dstptr, size);
1605
1606 de->next_offset = e->next_offset - (origsize - *size);
1607 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1608 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1609 newinfo->hook_entry[h] -= origsize - *size;
1610 if ((unsigned char *)de - base < newinfo->underflow[h])
1611 newinfo->underflow[h] -= origsize - *size;
1612 }
1613 return ret;
1614 }
1615
1616 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1617 const char *name)
1618 {
1619 unsigned int j;
1620 int ret = 0;
1621 struct xt_mtchk_param mtpar;
1622 struct xt_entry_match *ematch;
1623
1624 j = 0;
1625 mtpar.net = net;
1626 mtpar.table = name;
1627 mtpar.entryinfo = &e->ipv6;
1628 mtpar.hook_mask = e->comefrom;
1629 mtpar.family = NFPROTO_IPV6;
1630 xt_ematch_foreach(ematch, e) {
1631 ret = check_match(ematch, &mtpar);
1632 if (ret != 0)
1633 goto cleanup_matches;
1634 ++j;
1635 }
1636
1637 ret = check_target(e, net, name);
1638 if (ret)
1639 goto cleanup_matches;
1640 return 0;
1641
1642 cleanup_matches:
1643 xt_ematch_foreach(ematch, e) {
1644 if (j-- == 0)
1645 break;
1646 cleanup_match(ematch, net);
1647 }
1648 return ret;
1649 }
1650
1651 static int
1652 translate_compat_table(struct net *net,
1653 const char *name,
1654 unsigned int valid_hooks,
1655 struct xt_table_info **pinfo,
1656 void **pentry0,
1657 unsigned int total_size,
1658 unsigned int number,
1659 unsigned int *hook_entries,
1660 unsigned int *underflows)
1661 {
1662 unsigned int i, j;
1663 struct xt_table_info *newinfo, *info;
1664 void *pos, *entry0, *entry1;
1665 struct compat_ip6t_entry *iter0;
1666 struct ip6t_entry *iter1;
1667 unsigned int size;
1668 int ret = 0;
1669
1670 info = *pinfo;
1671 entry0 = *pentry0;
1672 size = total_size;
1673 info->number = number;
1674
1675 /* Init all hooks to impossible value. */
1676 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1677 info->hook_entry[i] = 0xFFFFFFFF;
1678 info->underflow[i] = 0xFFFFFFFF;
1679 }
1680
1681 duprintf("translate_compat_table: size %u\n", info->size);
1682 j = 0;
1683 xt_compat_lock(AF_INET6);
1684 xt_compat_init_offsets(AF_INET6, number);
1685 /* Walk through entries, checking offsets. */
1686 xt_entry_foreach(iter0, entry0, total_size) {
1687 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1688 entry0,
1689 entry0 + total_size,
1690 hook_entries,
1691 underflows,
1692 name);
1693 if (ret != 0)
1694 goto out_unlock;
1695 ++j;
1696 }
1697
1698 ret = -EINVAL;
1699 if (j != number) {
1700 duprintf("translate_compat_table: %u not %u entries\n",
1701 j, number);
1702 goto out_unlock;
1703 }
1704
1705 /* Check hooks all assigned */
1706 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1707 /* Only hooks which are valid */
1708 if (!(valid_hooks & (1 << i)))
1709 continue;
1710 if (info->hook_entry[i] == 0xFFFFFFFF) {
1711 duprintf("Invalid hook entry %u %u\n",
1712 i, hook_entries[i]);
1713 goto out_unlock;
1714 }
1715 if (info->underflow[i] == 0xFFFFFFFF) {
1716 duprintf("Invalid underflow %u %u\n",
1717 i, underflows[i]);
1718 goto out_unlock;
1719 }
1720 }
1721
1722 ret = -ENOMEM;
1723 newinfo = xt_alloc_table_info(size);
1724 if (!newinfo)
1725 goto out_unlock;
1726
1727 newinfo->number = number;
1728 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1729 newinfo->hook_entry[i] = info->hook_entry[i];
1730 newinfo->underflow[i] = info->underflow[i];
1731 }
1732 entry1 = newinfo->entries[raw_smp_processor_id()];
1733 pos = entry1;
1734 size = total_size;
1735 xt_entry_foreach(iter0, entry0, total_size) {
1736 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1737 name, newinfo, entry1);
1738 if (ret != 0)
1739 break;
1740 }
1741 xt_compat_flush_offsets(AF_INET6);
1742 xt_compat_unlock(AF_INET6);
1743 if (ret)
1744 goto free_newinfo;
1745
1746 ret = -ELOOP;
1747 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1748 goto free_newinfo;
1749
1750 i = 0;
1751 xt_entry_foreach(iter1, entry1, newinfo->size) {
1752 ret = compat_check_entry(iter1, net, name);
1753 if (ret != 0)
1754 break;
1755 ++i;
1756 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1757 XT_ERROR_TARGET) == 0)
1758 ++newinfo->stacksize;
1759 }
1760 if (ret) {
1761 /*
1762 * The first i matches need cleanup_entry (calls ->destroy)
1763 * because they had called ->check already. The other j-i
1764 * entries need only release.
1765 */
1766 int skip = i;
1767 j -= i;
1768 xt_entry_foreach(iter0, entry0, newinfo->size) {
1769 if (skip-- > 0)
1770 continue;
1771 if (j-- == 0)
1772 break;
1773 compat_release_entry(iter0);
1774 }
1775 xt_entry_foreach(iter1, entry1, newinfo->size) {
1776 if (i-- == 0)
1777 break;
1778 cleanup_entry(iter1, net);
1779 }
1780 xt_free_table_info(newinfo);
1781 return ret;
1782 }
1783
1784 /* And one copy for every other CPU */
1785 for_each_possible_cpu(i)
1786 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1787 memcpy(newinfo->entries[i], entry1, newinfo->size);
1788
1789 *pinfo = newinfo;
1790 *pentry0 = entry1;
1791 xt_free_table_info(info);
1792 return 0;
1793
1794 free_newinfo:
1795 xt_free_table_info(newinfo);
1796 out:
1797 xt_entry_foreach(iter0, entry0, total_size) {
1798 if (j-- == 0)
1799 break;
1800 compat_release_entry(iter0);
1801 }
1802 return ret;
1803 out_unlock:
1804 xt_compat_flush_offsets(AF_INET6);
1805 xt_compat_unlock(AF_INET6);
1806 goto out;
1807 }
1808
1809 static int
1810 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1811 {
1812 int ret;
1813 struct compat_ip6t_replace tmp;
1814 struct xt_table_info *newinfo;
1815 void *loc_cpu_entry;
1816 struct ip6t_entry *iter;
1817
1818 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1819 return -EFAULT;
1820
1821 /* overflow check */
1822 if (tmp.size >= INT_MAX / num_possible_cpus())
1823 return -ENOMEM;
1824 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1825 return -ENOMEM;
1826 tmp.name[sizeof(tmp.name)-1] = 0;
1827
1828 newinfo = xt_alloc_table_info(tmp.size);
1829 if (!newinfo)
1830 return -ENOMEM;
1831
1832 /* choose the copy that is on our node/cpu */
1833 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1834 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1835 tmp.size) != 0) {
1836 ret = -EFAULT;
1837 goto free_newinfo;
1838 }
1839
1840 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1841 &newinfo, &loc_cpu_entry, tmp.size,
1842 tmp.num_entries, tmp.hook_entry,
1843 tmp.underflow);
1844 if (ret != 0)
1845 goto free_newinfo;
1846
1847 duprintf("compat_do_replace: Translated table\n");
1848
1849 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1850 tmp.num_counters, compat_ptr(tmp.counters));
1851 if (ret)
1852 goto free_newinfo_untrans;
1853 return 0;
1854
1855 free_newinfo_untrans:
1856 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1857 cleanup_entry(iter, net);
1858 free_newinfo:
1859 xt_free_table_info(newinfo);
1860 return ret;
1861 }
1862
1863 static int
1864 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1865 unsigned int len)
1866 {
1867 int ret;
1868
1869 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1870 return -EPERM;
1871
1872 switch (cmd) {
1873 case IP6T_SO_SET_REPLACE:
1874 ret = compat_do_replace(sock_net(sk), user, len);
1875 break;
1876
1877 case IP6T_SO_SET_ADD_COUNTERS:
1878 ret = do_add_counters(sock_net(sk), user, len, 1);
1879 break;
1880
1881 default:
1882 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1883 ret = -EINVAL;
1884 }
1885
1886 return ret;
1887 }
1888
1889 struct compat_ip6t_get_entries {
1890 char name[XT_TABLE_MAXNAMELEN];
1891 compat_uint_t size;
1892 struct compat_ip6t_entry entrytable[0];
1893 };
1894
1895 static int
1896 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1897 void __user *userptr)
1898 {
1899 struct xt_counters *counters;
1900 const struct xt_table_info *private = table->private;
1901 void __user *pos;
1902 unsigned int size;
1903 int ret = 0;
1904 const void *loc_cpu_entry;
1905 unsigned int i = 0;
1906 struct ip6t_entry *iter;
1907
1908 counters = alloc_counters(table);
1909 if (IS_ERR(counters))
1910 return PTR_ERR(counters);
1911
1912 /* choose the copy that is on our node/cpu, ...
1913 * This choice is lazy (because current thread is
1914 * allowed to migrate to another cpu)
1915 */
1916 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1917 pos = userptr;
1918 size = total_size;
1919 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1920 ret = compat_copy_entry_to_user(iter, &pos,
1921 &size, counters, i++);
1922 if (ret != 0)
1923 break;
1924 }
1925
1926 vfree(counters);
1927 return ret;
1928 }
1929
1930 static int
1931 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1932 int *len)
1933 {
1934 int ret;
1935 struct compat_ip6t_get_entries get;
1936 struct xt_table *t;
1937
1938 if (*len < sizeof(get)) {
1939 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1940 return -EINVAL;
1941 }
1942
1943 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1944 return -EFAULT;
1945
1946 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1947 duprintf("compat_get_entries: %u != %zu\n",
1948 *len, sizeof(get) + get.size);
1949 return -EINVAL;
1950 }
1951
1952 xt_compat_lock(AF_INET6);
1953 t = xt_find_table_lock(net, AF_INET6, get.name);
1954 if (!IS_ERR_OR_NULL(t)) {
1955 const struct xt_table_info *private = t->private;
1956 struct xt_table_info info;
1957 duprintf("t->private->number = %u\n", private->number);
1958 ret = compat_table_info(private, &info);
1959 if (!ret && get.size == info.size) {
1960 ret = compat_copy_entries_to_user(private->size,
1961 t, uptr->entrytable);
1962 } else if (!ret) {
1963 duprintf("compat_get_entries: I've got %u not %u!\n",
1964 private->size, get.size);
1965 ret = -EAGAIN;
1966 }
1967 xt_compat_flush_offsets(AF_INET6);
1968 module_put(t->me);
1969 xt_table_unlock(t);
1970 } else
1971 ret = t ? PTR_ERR(t) : -ENOENT;
1972
1973 xt_compat_unlock(AF_INET6);
1974 return ret;
1975 }
1976
1977 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1978
1979 static int
1980 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1981 {
1982 int ret;
1983
1984 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1985 return -EPERM;
1986
1987 switch (cmd) {
1988 case IP6T_SO_GET_INFO:
1989 ret = get_info(sock_net(sk), user, len, 1);
1990 break;
1991 case IP6T_SO_GET_ENTRIES:
1992 ret = compat_get_entries(sock_net(sk), user, len);
1993 break;
1994 default:
1995 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1996 }
1997 return ret;
1998 }
1999 #endif
2000
2001 static int
2002 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2003 {
2004 int ret;
2005
2006 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2007 return -EPERM;
2008
2009 switch (cmd) {
2010 case IP6T_SO_SET_REPLACE:
2011 ret = do_replace(sock_net(sk), user, len);
2012 break;
2013
2014 case IP6T_SO_SET_ADD_COUNTERS:
2015 ret = do_add_counters(sock_net(sk), user, len, 0);
2016 break;
2017
2018 default:
2019 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2020 ret = -EINVAL;
2021 }
2022
2023 return ret;
2024 }
2025
2026 static int
2027 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2028 {
2029 int ret;
2030
2031 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2032 return -EPERM;
2033
2034 switch (cmd) {
2035 case IP6T_SO_GET_INFO:
2036 ret = get_info(sock_net(sk), user, len, 0);
2037 break;
2038
2039 case IP6T_SO_GET_ENTRIES:
2040 ret = get_entries(sock_net(sk), user, len);
2041 break;
2042
2043 case IP6T_SO_GET_REVISION_MATCH:
2044 case IP6T_SO_GET_REVISION_TARGET: {
2045 struct xt_get_revision rev;
2046 int target;
2047
2048 if (*len != sizeof(rev)) {
2049 ret = -EINVAL;
2050 break;
2051 }
2052 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2053 ret = -EFAULT;
2054 break;
2055 }
2056 rev.name[sizeof(rev.name)-1] = 0;
2057
2058 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2059 target = 1;
2060 else
2061 target = 0;
2062
2063 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2064 rev.revision,
2065 target, &ret),
2066 "ip6t_%s", rev.name);
2067 break;
2068 }
2069
2070 default:
2071 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2072 ret = -EINVAL;
2073 }
2074
2075 return ret;
2076 }
2077
2078 struct xt_table *ip6t_register_table(struct net *net,
2079 const struct xt_table *table,
2080 const struct ip6t_replace *repl)
2081 {
2082 int ret;
2083 struct xt_table_info *newinfo;
2084 struct xt_table_info bootstrap = {0};
2085 void *loc_cpu_entry;
2086 struct xt_table *new_table;
2087
2088 newinfo = xt_alloc_table_info(repl->size);
2089 if (!newinfo) {
2090 ret = -ENOMEM;
2091 goto out;
2092 }
2093
2094 /* choose the copy on our node/cpu, but dont care about preemption */
2095 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2096 memcpy(loc_cpu_entry, repl->entries, repl->size);
2097
2098 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2099 if (ret != 0)
2100 goto out_free;
2101
2102 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2103 if (IS_ERR(new_table)) {
2104 ret = PTR_ERR(new_table);
2105 goto out_free;
2106 }
2107 return new_table;
2108
2109 out_free:
2110 xt_free_table_info(newinfo);
2111 out:
2112 return ERR_PTR(ret);
2113 }
2114
2115 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2116 {
2117 struct xt_table_info *private;
2118 void *loc_cpu_entry;
2119 struct module *table_owner = table->me;
2120 struct ip6t_entry *iter;
2121
2122 private = xt_unregister_table(table);
2123
2124 /* Decrease module usage counts and free resources */
2125 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2126 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2127 cleanup_entry(iter, net);
2128 if (private->number > private->initial_entries)
2129 module_put(table_owner);
2130 xt_free_table_info(private);
2131 }
2132
2133 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2134 static inline bool
2135 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2136 u_int8_t type, u_int8_t code,
2137 bool invert)
2138 {
2139 return (type == test_type && code >= min_code && code <= max_code)
2140 ^ invert;
2141 }
2142
2143 static bool
2144 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2145 {
2146 const struct icmp6hdr *ic;
2147 struct icmp6hdr _icmph;
2148 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2149
2150 /* Must not be a fragment. */
2151 if (par->fragoff != 0)
2152 return false;
2153
2154 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2155 if (ic == NULL) {
2156 /* We've been asked to examine this packet, and we
2157 * can't. Hence, no choice but to drop.
2158 */
2159 duprintf("Dropping evil ICMP tinygram.\n");
2160 par->hotdrop = true;
2161 return false;
2162 }
2163
2164 return icmp6_type_code_match(icmpinfo->type,
2165 icmpinfo->code[0],
2166 icmpinfo->code[1],
2167 ic->icmp6_type, ic->icmp6_code,
2168 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2169 }
2170
2171 /* Called when user tries to insert an entry of this type. */
2172 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2173 {
2174 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2175
2176 /* Must specify no unknown invflags */
2177 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2178 }
2179
2180 /* The built-in targets: standard (NULL) and error. */
2181 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2182 {
2183 .name = XT_STANDARD_TARGET,
2184 .targetsize = sizeof(int),
2185 .family = NFPROTO_IPV6,
2186 #ifdef CONFIG_COMPAT
2187 .compatsize = sizeof(compat_int_t),
2188 .compat_from_user = compat_standard_from_user,
2189 .compat_to_user = compat_standard_to_user,
2190 #endif
2191 },
2192 {
2193 .name = XT_ERROR_TARGET,
2194 .target = ip6t_error,
2195 .targetsize = XT_FUNCTION_MAXNAMELEN,
2196 .family = NFPROTO_IPV6,
2197 },
2198 };
2199
2200 static struct nf_sockopt_ops ip6t_sockopts = {
2201 .pf = PF_INET6,
2202 .set_optmin = IP6T_BASE_CTL,
2203 .set_optmax = IP6T_SO_SET_MAX+1,
2204 .set = do_ip6t_set_ctl,
2205 #ifdef CONFIG_COMPAT
2206 .compat_set = compat_do_ip6t_set_ctl,
2207 #endif
2208 .get_optmin = IP6T_BASE_CTL,
2209 .get_optmax = IP6T_SO_GET_MAX+1,
2210 .get = do_ip6t_get_ctl,
2211 #ifdef CONFIG_COMPAT
2212 .compat_get = compat_do_ip6t_get_ctl,
2213 #endif
2214 .owner = THIS_MODULE,
2215 };
2216
2217 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2218 {
2219 .name = "icmp6",
2220 .match = icmp6_match,
2221 .matchsize = sizeof(struct ip6t_icmp),
2222 .checkentry = icmp6_checkentry,
2223 .proto = IPPROTO_ICMPV6,
2224 .family = NFPROTO_IPV6,
2225 },
2226 };
2227
2228 static int __net_init ip6_tables_net_init(struct net *net)
2229 {
2230 return xt_proto_init(net, NFPROTO_IPV6);
2231 }
2232
2233 static void __net_exit ip6_tables_net_exit(struct net *net)
2234 {
2235 xt_proto_fini(net, NFPROTO_IPV6);
2236 }
2237
2238 static struct pernet_operations ip6_tables_net_ops = {
2239 .init = ip6_tables_net_init,
2240 .exit = ip6_tables_net_exit,
2241 };
2242
2243 static int __init ip6_tables_init(void)
2244 {
2245 int ret;
2246
2247 ret = register_pernet_subsys(&ip6_tables_net_ops);
2248 if (ret < 0)
2249 goto err1;
2250
2251 /* No one else will be downing sem now, so we won't sleep */
2252 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2253 if (ret < 0)
2254 goto err2;
2255 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2256 if (ret < 0)
2257 goto err4;
2258
2259 /* Register setsockopt */
2260 ret = nf_register_sockopt(&ip6t_sockopts);
2261 if (ret < 0)
2262 goto err5;
2263
2264 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2265 return 0;
2266
2267 err5:
2268 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2269 err4:
2270 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2271 err2:
2272 unregister_pernet_subsys(&ip6_tables_net_ops);
2273 err1:
2274 return ret;
2275 }
2276
2277 static void __exit ip6_tables_fini(void)
2278 {
2279 nf_unregister_sockopt(&ip6t_sockopts);
2280
2281 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2282 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2283 unregister_pernet_subsys(&ip6_tables_net_ops);
2284 }
2285
2286 EXPORT_SYMBOL(ip6t_register_table);
2287 EXPORT_SYMBOL(ip6t_unregister_table);
2288 EXPORT_SYMBOL(ip6t_do_table);
2289
2290 module_init(ip6_tables_init);
2291 module_exit(ip6_tables_fini);
This page took 0.157019 seconds and 5 git commands to generate.