2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
49 #define dprintf(format, args...)
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
55 #define duprintf(format, args...)
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
61 #define IP_NF_ASSERT(x)
65 /* All the better to debug you with... */
70 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
72 return xt_alloc_initial_table(ip6t
, IP6T
);
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
83 Hence the start of any table is given by get_table() below. */
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
88 ip6_packet_match(const struct sk_buff
*skb
,
91 const struct ip6t_ip6
*ip6info
,
92 unsigned int *protoff
,
93 int *fragoff
, bool *hotdrop
)
96 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
101 &ip6info
->src
), IP6T_INV_SRCIP
) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
103 &ip6info
->dst
), IP6T_INV_DSTIP
)) {
104 dprintf("Source or dest mismatch.\n");
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
115 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
117 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev
, ip6info
->iniface
,
120 ip6info
->invflags
&IP6T_INV_VIA_IN
?" (INV)":"");
124 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
126 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev
, ip6info
->outiface
,
129 ip6info
->invflags
&IP6T_INV_VIA_OUT
?" (INV)":"");
133 /* ... might want to do something with class and flowlabel here ... */
135 /* look for the desired protocol header */
136 if((ip6info
->flags
& IP6T_F_PROTO
)) {
138 unsigned short _frag_off
;
140 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
, NULL
);
146 *fragoff
= _frag_off
;
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
150 ip6info
->invflags
& IP6T_INV_PROTO
? "!":"",
153 if (ip6info
->proto
== protohdr
) {
154 if(ip6info
->invflags
& IP6T_INV_PROTO
) {
160 /* We need match for the '-p all', too! */
161 if ((ip6info
->proto
!= 0) &&
162 !(ip6info
->invflags
& IP6T_INV_PROTO
))
168 /* should be ip6 safe */
170 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
172 if (ipv6
->flags
& ~IP6T_F_MASK
) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6
->flags
& ~IP6T_F_MASK
);
177 if (ipv6
->invflags
& ~IP6T_INV_MASK
) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6
->invflags
& ~IP6T_INV_MASK
);
186 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
188 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
193 static inline struct ip6t_entry
*
194 get_entry(const void *base
, unsigned int offset
)
196 return (struct ip6t_entry
*)(base
+ offset
);
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
201 static inline bool unconditional(const struct ip6t_ip6
*ipv6
)
203 static const struct ip6t_ip6 uncond
;
205 return memcmp(ipv6
, &uncond
, sizeof(uncond
)) == 0;
208 static inline const struct xt_entry_target
*
209 ip6t_get_target_c(const struct ip6t_entry
*e
)
211 return ip6t_get_target((struct ip6t_entry
*)e
);
214 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
215 /* This cries for unification! */
216 static const char *const hooknames
[] = {
217 [NF_INET_PRE_ROUTING
] = "PREROUTING",
218 [NF_INET_LOCAL_IN
] = "INPUT",
219 [NF_INET_FORWARD
] = "FORWARD",
220 [NF_INET_LOCAL_OUT
] = "OUTPUT",
221 [NF_INET_POST_ROUTING
] = "POSTROUTING",
224 enum nf_ip_trace_comments
{
225 NF_IP6_TRACE_COMMENT_RULE
,
226 NF_IP6_TRACE_COMMENT_RETURN
,
227 NF_IP6_TRACE_COMMENT_POLICY
,
230 static const char *const comments
[] = {
231 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
232 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
233 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
236 static struct nf_loginfo trace_loginfo
= {
237 .type
= NF_LOG_TYPE_LOG
,
240 .level
= LOGLEVEL_WARNING
,
241 .logflags
= NF_LOG_MASK
,
246 /* Mildly perf critical (only if packet tracing is on) */
248 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
249 const char *hookname
, const char **chainname
,
250 const char **comment
, unsigned int *rulenum
)
252 const struct xt_standard_target
*t
= (void *)ip6t_get_target_c(s
);
254 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
255 /* Head of user chain: ERROR target with chainname */
256 *chainname
= t
->target
.data
;
261 if (s
->target_offset
== sizeof(struct ip6t_entry
) &&
262 strcmp(t
->target
.u
.kernel
.target
->name
,
263 XT_STANDARD_TARGET
) == 0 &&
265 unconditional(&s
->ipv6
)) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment
= *chainname
== hookname
268 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
269 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
278 static void trace_packet(const struct sk_buff
*skb
,
280 const struct net_device
*in
,
281 const struct net_device
*out
,
282 const char *tablename
,
283 const struct xt_table_info
*private,
284 const struct ip6t_entry
*e
)
286 const void *table_base
;
287 const struct ip6t_entry
*root
;
288 const char *hookname
, *chainname
, *comment
;
289 const struct ip6t_entry
*iter
;
290 unsigned int rulenum
= 0;
291 struct net
*net
= dev_net(in
? in
: out
);
293 table_base
= private->entries
[smp_processor_id()];
294 root
= get_entry(table_base
, private->hook_entry
[hook
]);
296 hookname
= chainname
= hooknames
[hook
];
297 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
299 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
300 if (get_chainname_rulenum(iter
, e
, hookname
,
301 &chainname
, &comment
, &rulenum
) != 0)
304 nf_log_trace(net
, AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
305 "TRACE: %s:%s:%s:%u ",
306 tablename
, chainname
, comment
, rulenum
);
310 static inline __pure
struct ip6t_entry
*
311 ip6t_next_entry(const struct ip6t_entry
*entry
)
313 return (void *)entry
+ entry
->next_offset
;
316 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
318 ip6t_do_table(struct sk_buff
*skb
,
320 const struct net_device
*in
,
321 const struct net_device
*out
,
322 struct xt_table
*table
)
324 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
325 /* Initializing verdict to NF_DROP keeps gcc happy. */
326 unsigned int verdict
= NF_DROP
;
327 const char *indev
, *outdev
;
328 const void *table_base
;
329 struct ip6t_entry
*e
, **jumpstack
;
330 unsigned int *stackptr
, origptr
, cpu
;
331 const struct xt_table_info
*private;
332 struct xt_action_param acpar
;
336 indev
= in
? in
->name
: nulldevname
;
337 outdev
= out
? out
->name
: nulldevname
;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
344 acpar
.hotdrop
= false;
347 acpar
.family
= NFPROTO_IPV6
;
348 acpar
.hooknum
= hook
;
350 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
353 addend
= xt_write_recseq_begin();
354 private = table
->private;
356 * Ensure we load private-> members after we've fetched the base
359 smp_read_barrier_depends();
360 cpu
= smp_processor_id();
361 table_base
= private->entries
[cpu
];
362 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
363 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
366 e
= get_entry(table_base
, private->hook_entry
[hook
]);
369 const struct xt_entry_target
*t
;
370 const struct xt_entry_match
*ematch
;
374 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
375 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
377 e
= ip6t_next_entry(e
);
381 xt_ematch_foreach(ematch
, e
) {
382 acpar
.match
= ematch
->u
.kernel
.match
;
383 acpar
.matchinfo
= ematch
->data
;
384 if (!acpar
.match
->match(skb
, &acpar
))
388 ADD_COUNTER(e
->counters
, skb
->len
, 1);
390 t
= ip6t_get_target_c(e
);
391 IP_NF_ASSERT(t
->u
.kernel
.target
);
393 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
394 /* The packet is traced: log it */
395 if (unlikely(skb
->nf_trace
))
396 trace_packet(skb
, hook
, in
, out
,
397 table
->name
, private, e
);
399 /* Standard target? */
400 if (!t
->u
.kernel
.target
->target
) {
403 v
= ((struct xt_standard_target
*)t
)->verdict
;
405 /* Pop from stack? */
406 if (v
!= XT_RETURN
) {
407 verdict
= (unsigned int)(-v
) - 1;
410 if (*stackptr
<= origptr
)
411 e
= get_entry(table_base
,
412 private->underflow
[hook
]);
414 e
= ip6t_next_entry(jumpstack
[--*stackptr
]);
417 if (table_base
+ v
!= ip6t_next_entry(e
) &&
418 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
419 if (*stackptr
>= private->stacksize
) {
423 jumpstack
[(*stackptr
)++] = e
;
426 e
= get_entry(table_base
, v
);
430 acpar
.target
= t
->u
.kernel
.target
;
431 acpar
.targinfo
= t
->data
;
433 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
434 if (verdict
== XT_CONTINUE
)
435 e
= ip6t_next_entry(e
);
439 } while (!acpar
.hotdrop
);
443 xt_write_recseq_end(addend
);
446 #ifdef DEBUG_ALLOW_ALL
455 /* Figures out from what hook each rule can be called: returns 0 if
456 there are loops. Puts hook bitmask in comefrom. */
458 mark_source_chains(const struct xt_table_info
*newinfo
,
459 unsigned int valid_hooks
, void *entry0
)
463 /* No recursion; use packet counter to save back ptrs (reset
464 to 0 as we leave), and comefrom to save source hook bitmask */
465 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
466 unsigned int pos
= newinfo
->hook_entry
[hook
];
467 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
469 if (!(valid_hooks
& (1 << hook
)))
472 /* Set initial back pointer. */
473 e
->counters
.pcnt
= pos
;
476 const struct xt_standard_target
*t
477 = (void *)ip6t_get_target_c(e
);
478 int visited
= e
->comefrom
& (1 << hook
);
480 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
481 pr_err("iptables: loop hook %u pos %u %08X.\n",
482 hook
, pos
, e
->comefrom
);
485 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
487 /* Unconditional return/END. */
488 if ((e
->target_offset
== sizeof(struct ip6t_entry
) &&
489 (strcmp(t
->target
.u
.user
.name
,
490 XT_STANDARD_TARGET
) == 0) &&
492 unconditional(&e
->ipv6
)) || visited
) {
493 unsigned int oldpos
, size
;
495 if ((strcmp(t
->target
.u
.user
.name
,
496 XT_STANDARD_TARGET
) == 0) &&
497 t
->verdict
< -NF_MAX_VERDICT
- 1) {
498 duprintf("mark_source_chains: bad "
499 "negative verdict (%i)\n",
504 /* Return: backtrack through the last
507 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
508 #ifdef DEBUG_IP_FIREWALL_USER
510 & (1 << NF_INET_NUMHOOKS
)) {
511 duprintf("Back unset "
518 pos
= e
->counters
.pcnt
;
519 e
->counters
.pcnt
= 0;
521 /* We're at the start. */
525 e
= (struct ip6t_entry
*)
527 } while (oldpos
== pos
+ e
->next_offset
);
530 size
= e
->next_offset
;
531 e
= (struct ip6t_entry
*)
532 (entry0
+ pos
+ size
);
533 e
->counters
.pcnt
= pos
;
536 int newpos
= t
->verdict
;
538 if (strcmp(t
->target
.u
.user
.name
,
539 XT_STANDARD_TARGET
) == 0 &&
541 if (newpos
> newinfo
->size
-
542 sizeof(struct ip6t_entry
)) {
543 duprintf("mark_source_chains: "
544 "bad verdict (%i)\n",
548 /* This a jump; chase it. */
549 duprintf("Jump rule %u -> %u\n",
552 /* ... this is a fallthru */
553 newpos
= pos
+ e
->next_offset
;
555 e
= (struct ip6t_entry
*)
557 e
->counters
.pcnt
= pos
;
562 duprintf("Finished chain %u\n", hook
);
567 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
569 struct xt_mtdtor_param par
;
572 par
.match
= m
->u
.kernel
.match
;
573 par
.matchinfo
= m
->data
;
574 par
.family
= NFPROTO_IPV6
;
575 if (par
.match
->destroy
!= NULL
)
576 par
.match
->destroy(&par
);
577 module_put(par
.match
->me
);
581 check_entry(const struct ip6t_entry
*e
, const char *name
)
583 const struct xt_entry_target
*t
;
585 if (!ip6_checkentry(&e
->ipv6
)) {
586 duprintf("ip_tables: ip check failed %p %s.\n", e
, name
);
590 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
594 t
= ip6t_get_target_c(e
);
595 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
601 static int check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
603 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
606 par
->match
= m
->u
.kernel
.match
;
607 par
->matchinfo
= m
->data
;
609 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
610 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
612 duprintf("ip_tables: check failed for `%s'.\n",
620 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
622 struct xt_match
*match
;
625 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
628 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
629 return PTR_ERR(match
);
631 m
->u
.kernel
.match
= match
;
633 ret
= check_match(m
, par
);
639 module_put(m
->u
.kernel
.match
->me
);
643 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
645 struct xt_entry_target
*t
= ip6t_get_target(e
);
646 struct xt_tgchk_param par
= {
650 .target
= t
->u
.kernel
.target
,
652 .hook_mask
= e
->comefrom
,
653 .family
= NFPROTO_IPV6
,
657 t
= ip6t_get_target(e
);
658 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
659 e
->ipv6
.proto
, e
->ipv6
.invflags
& IP6T_INV_PROTO
);
661 duprintf("ip_tables: check failed for `%s'.\n",
662 t
->u
.kernel
.target
->name
);
669 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
672 struct xt_entry_target
*t
;
673 struct xt_target
*target
;
676 struct xt_mtchk_param mtpar
;
677 struct xt_entry_match
*ematch
;
679 ret
= check_entry(e
, name
);
686 mtpar
.entryinfo
= &e
->ipv6
;
687 mtpar
.hook_mask
= e
->comefrom
;
688 mtpar
.family
= NFPROTO_IPV6
;
689 xt_ematch_foreach(ematch
, e
) {
690 ret
= find_check_match(ematch
, &mtpar
);
692 goto cleanup_matches
;
696 t
= ip6t_get_target(e
);
697 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
699 if (IS_ERR(target
)) {
700 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
701 ret
= PTR_ERR(target
);
702 goto cleanup_matches
;
704 t
->u
.kernel
.target
= target
;
706 ret
= check_target(e
, net
, name
);
711 module_put(t
->u
.kernel
.target
->me
);
713 xt_ematch_foreach(ematch
, e
) {
716 cleanup_match(ematch
, net
);
721 static bool check_underflow(const struct ip6t_entry
*e
)
723 const struct xt_entry_target
*t
;
724 unsigned int verdict
;
726 if (!unconditional(&e
->ipv6
))
728 t
= ip6t_get_target_c(e
);
729 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
731 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
732 verdict
= -verdict
- 1;
733 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
737 check_entry_size_and_hooks(struct ip6t_entry
*e
,
738 struct xt_table_info
*newinfo
,
739 const unsigned char *base
,
740 const unsigned char *limit
,
741 const unsigned int *hook_entries
,
742 const unsigned int *underflows
,
743 unsigned int valid_hooks
)
747 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
748 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
) {
749 duprintf("Bad offset %p\n", e
);
754 < sizeof(struct ip6t_entry
) + sizeof(struct xt_entry_target
)) {
755 duprintf("checking: element %p size %u\n",
760 /* Check hooks & underflows */
761 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
762 if (!(valid_hooks
& (1 << h
)))
764 if ((unsigned char *)e
- base
== hook_entries
[h
])
765 newinfo
->hook_entry
[h
] = hook_entries
[h
];
766 if ((unsigned char *)e
- base
== underflows
[h
]) {
767 if (!check_underflow(e
)) {
768 pr_err("Underflows must be unconditional and "
769 "use the STANDARD target with "
773 newinfo
->underflow
[h
] = underflows
[h
];
777 /* Clear counters and comefrom */
778 e
->counters
= ((struct xt_counters
) { 0, 0 });
783 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
785 struct xt_tgdtor_param par
;
786 struct xt_entry_target
*t
;
787 struct xt_entry_match
*ematch
;
789 /* Cleanup all matches */
790 xt_ematch_foreach(ematch
, e
)
791 cleanup_match(ematch
, net
);
792 t
= ip6t_get_target(e
);
795 par
.target
= t
->u
.kernel
.target
;
796 par
.targinfo
= t
->data
;
797 par
.family
= NFPROTO_IPV6
;
798 if (par
.target
->destroy
!= NULL
)
799 par
.target
->destroy(&par
);
800 module_put(par
.target
->me
);
803 /* Checks and translates the user-supplied table segment (held in
806 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
807 const struct ip6t_replace
*repl
)
809 struct ip6t_entry
*iter
;
813 newinfo
->size
= repl
->size
;
814 newinfo
->number
= repl
->num_entries
;
816 /* Init all hooks to impossible value. */
817 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
818 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
819 newinfo
->underflow
[i
] = 0xFFFFFFFF;
822 duprintf("translate_table: size %u\n", newinfo
->size
);
824 /* Walk through entries, checking offsets. */
825 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
826 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
834 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
835 XT_ERROR_TARGET
) == 0)
836 ++newinfo
->stacksize
;
839 if (i
!= repl
->num_entries
) {
840 duprintf("translate_table: %u not %u entries\n",
841 i
, repl
->num_entries
);
845 /* Check hooks all assigned */
846 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
847 /* Only hooks which are valid */
848 if (!(repl
->valid_hooks
& (1 << i
)))
850 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
851 duprintf("Invalid hook entry %u %u\n",
852 i
, repl
->hook_entry
[i
]);
855 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
856 duprintf("Invalid underflow %u %u\n",
857 i
, repl
->underflow
[i
]);
862 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
865 /* Finally, each sanity check must pass */
867 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
868 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
875 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
878 cleanup_entry(iter
, net
);
883 /* And one copy for every other CPU */
884 for_each_possible_cpu(i
) {
885 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
886 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
893 get_counters(const struct xt_table_info
*t
,
894 struct xt_counters counters
[])
896 struct ip6t_entry
*iter
;
900 for_each_possible_cpu(cpu
) {
901 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
904 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
909 start
= read_seqcount_begin(s
);
910 bcnt
= iter
->counters
.bcnt
;
911 pcnt
= iter
->counters
.pcnt
;
912 } while (read_seqcount_retry(s
, start
));
914 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
920 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
922 unsigned int countersize
;
923 struct xt_counters
*counters
;
924 const struct xt_table_info
*private = table
->private;
926 /* We need atomic snapshot of counters: rest doesn't change
927 (other than comefrom, which userspace doesn't care
929 countersize
= sizeof(struct xt_counters
) * private->number
;
930 counters
= vzalloc(countersize
);
932 if (counters
== NULL
)
933 return ERR_PTR(-ENOMEM
);
935 get_counters(private, counters
);
941 copy_entries_to_user(unsigned int total_size
,
942 const struct xt_table
*table
,
943 void __user
*userptr
)
945 unsigned int off
, num
;
946 const struct ip6t_entry
*e
;
947 struct xt_counters
*counters
;
948 const struct xt_table_info
*private = table
->private;
950 const void *loc_cpu_entry
;
952 counters
= alloc_counters(table
);
953 if (IS_ERR(counters
))
954 return PTR_ERR(counters
);
956 /* choose the copy that is on our node/cpu, ...
957 * This choice is lazy (because current thread is
958 * allowed to migrate to another cpu)
960 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
961 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
966 /* FIXME: use iterator macros --RR */
967 /* ... then go back and fix counters and names */
968 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
970 const struct xt_entry_match
*m
;
971 const struct xt_entry_target
*t
;
973 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
974 if (copy_to_user(userptr
+ off
975 + offsetof(struct ip6t_entry
, counters
),
977 sizeof(counters
[num
])) != 0) {
982 for (i
= sizeof(struct ip6t_entry
);
983 i
< e
->target_offset
;
984 i
+= m
->u
.match_size
) {
987 if (copy_to_user(userptr
+ off
+ i
988 + offsetof(struct xt_entry_match
,
990 m
->u
.kernel
.match
->name
,
991 strlen(m
->u
.kernel
.match
->name
)+1)
998 t
= ip6t_get_target_c(e
);
999 if (copy_to_user(userptr
+ off
+ e
->target_offset
1000 + offsetof(struct xt_entry_target
,
1002 t
->u
.kernel
.target
->name
,
1003 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1014 #ifdef CONFIG_COMPAT
1015 static void compat_standard_from_user(void *dst
, const void *src
)
1017 int v
= *(compat_int_t
*)src
;
1020 v
+= xt_compat_calc_jump(AF_INET6
, v
);
1021 memcpy(dst
, &v
, sizeof(v
));
1024 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1026 compat_int_t cv
= *(int *)src
;
1029 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
1030 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1033 static int compat_calc_entry(const struct ip6t_entry
*e
,
1034 const struct xt_table_info
*info
,
1035 const void *base
, struct xt_table_info
*newinfo
)
1037 const struct xt_entry_match
*ematch
;
1038 const struct xt_entry_target
*t
;
1039 unsigned int entry_offset
;
1042 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1043 entry_offset
= (void *)e
- base
;
1044 xt_ematch_foreach(ematch
, e
)
1045 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1046 t
= ip6t_get_target_c(e
);
1047 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1048 newinfo
->size
-= off
;
1049 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1053 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1054 if (info
->hook_entry
[i
] &&
1055 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
1056 newinfo
->hook_entry
[i
] -= off
;
1057 if (info
->underflow
[i
] &&
1058 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
1059 newinfo
->underflow
[i
] -= off
;
1064 static int compat_table_info(const struct xt_table_info
*info
,
1065 struct xt_table_info
*newinfo
)
1067 struct ip6t_entry
*iter
;
1068 void *loc_cpu_entry
;
1071 if (!newinfo
|| !info
)
1074 /* we dont care about newinfo->entries[] */
1075 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1076 newinfo
->initial_entries
= 0;
1077 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1078 xt_compat_init_offsets(AF_INET6
, info
->number
);
1079 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1080 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1088 static int get_info(struct net
*net
, void __user
*user
,
1089 const int *len
, int compat
)
1091 char name
[XT_TABLE_MAXNAMELEN
];
1095 if (*len
!= sizeof(struct ip6t_getinfo
)) {
1096 duprintf("length %u != %zu\n", *len
,
1097 sizeof(struct ip6t_getinfo
));
1101 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1104 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1105 #ifdef CONFIG_COMPAT
1107 xt_compat_lock(AF_INET6
);
1109 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1110 "ip6table_%s", name
);
1111 if (!IS_ERR_OR_NULL(t
)) {
1112 struct ip6t_getinfo info
;
1113 const struct xt_table_info
*private = t
->private;
1114 #ifdef CONFIG_COMPAT
1115 struct xt_table_info tmp
;
1118 ret
= compat_table_info(private, &tmp
);
1119 xt_compat_flush_offsets(AF_INET6
);
1123 memset(&info
, 0, sizeof(info
));
1124 info
.valid_hooks
= t
->valid_hooks
;
1125 memcpy(info
.hook_entry
, private->hook_entry
,
1126 sizeof(info
.hook_entry
));
1127 memcpy(info
.underflow
, private->underflow
,
1128 sizeof(info
.underflow
));
1129 info
.num_entries
= private->number
;
1130 info
.size
= private->size
;
1131 strcpy(info
.name
, name
);
1133 if (copy_to_user(user
, &info
, *len
) != 0)
1141 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1142 #ifdef CONFIG_COMPAT
1144 xt_compat_unlock(AF_INET6
);
1150 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1154 struct ip6t_get_entries get
;
1157 if (*len
< sizeof(get
)) {
1158 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1161 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1163 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
) {
1164 duprintf("get_entries: %u != %zu\n",
1165 *len
, sizeof(get
) + get
.size
);
1169 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1170 if (!IS_ERR_OR_NULL(t
)) {
1171 struct xt_table_info
*private = t
->private;
1172 duprintf("t->private->number = %u\n", private->number
);
1173 if (get
.size
== private->size
)
1174 ret
= copy_entries_to_user(private->size
,
1175 t
, uptr
->entrytable
);
1177 duprintf("get_entries: I've got %u not %u!\n",
1178 private->size
, get
.size
);
1184 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1190 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1191 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1192 void __user
*counters_ptr
)
1196 struct xt_table_info
*oldinfo
;
1197 struct xt_counters
*counters
;
1198 const void *loc_cpu_old_entry
;
1199 struct ip6t_entry
*iter
;
1202 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1208 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1209 "ip6table_%s", name
);
1210 if (IS_ERR_OR_NULL(t
)) {
1211 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1212 goto free_newinfo_counters_untrans
;
1216 if (valid_hooks
!= t
->valid_hooks
) {
1217 duprintf("Valid hook crap: %08X vs %08X\n",
1218 valid_hooks
, t
->valid_hooks
);
1223 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1227 /* Update module usage count based on number of rules */
1228 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1229 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1230 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1231 (newinfo
->number
<= oldinfo
->initial_entries
))
1233 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1234 (newinfo
->number
<= oldinfo
->initial_entries
))
1237 /* Get the old counters, and synchronize with replace */
1238 get_counters(oldinfo
, counters
);
1240 /* Decrease module usage counts and free resource */
1241 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1242 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1243 cleanup_entry(iter
, net
);
1245 xt_free_table_info(oldinfo
);
1246 if (copy_to_user(counters_ptr
, counters
,
1247 sizeof(struct xt_counters
) * num_counters
) != 0) {
1248 /* Silent error, can't fail, new table is already in place */
1249 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1258 free_newinfo_counters_untrans
:
1265 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1268 struct ip6t_replace tmp
;
1269 struct xt_table_info
*newinfo
;
1270 void *loc_cpu_entry
;
1271 struct ip6t_entry
*iter
;
1273 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1276 /* overflow check */
1277 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1279 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1281 newinfo
= xt_alloc_table_info(tmp
.size
);
1285 /* choose the copy that is on our node/cpu */
1286 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1287 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1293 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1297 duprintf("ip_tables: Translated table\n");
1299 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1300 tmp
.num_counters
, tmp
.counters
);
1302 goto free_newinfo_untrans
;
1305 free_newinfo_untrans
:
1306 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1307 cleanup_entry(iter
, net
);
1309 xt_free_table_info(newinfo
);
1314 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1317 unsigned int i
, curcpu
;
1318 struct xt_counters_info tmp
;
1319 struct xt_counters
*paddc
;
1320 unsigned int num_counters
;
1325 const struct xt_table_info
*private;
1327 const void *loc_cpu_entry
;
1328 struct ip6t_entry
*iter
;
1329 unsigned int addend
;
1330 #ifdef CONFIG_COMPAT
1331 struct compat_xt_counters_info compat_tmp
;
1335 size
= sizeof(struct compat_xt_counters_info
);
1340 size
= sizeof(struct xt_counters_info
);
1343 if (copy_from_user(ptmp
, user
, size
) != 0)
1346 #ifdef CONFIG_COMPAT
1348 num_counters
= compat_tmp
.num_counters
;
1349 name
= compat_tmp
.name
;
1353 num_counters
= tmp
.num_counters
;
1357 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1360 paddc
= vmalloc(len
- size
);
1364 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1369 t
= xt_find_table_lock(net
, AF_INET6
, name
);
1370 if (IS_ERR_OR_NULL(t
)) {
1371 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1377 private = t
->private;
1378 if (private->number
!= num_counters
) {
1380 goto unlock_up_free
;
1384 /* Choose the copy that is on our node */
1385 curcpu
= smp_processor_id();
1386 addend
= xt_write_recseq_begin();
1387 loc_cpu_entry
= private->entries
[curcpu
];
1388 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1389 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1392 xt_write_recseq_end(addend
);
1404 #ifdef CONFIG_COMPAT
1405 struct compat_ip6t_replace
{
1406 char name
[XT_TABLE_MAXNAMELEN
];
1410 u32 hook_entry
[NF_INET_NUMHOOKS
];
1411 u32 underflow
[NF_INET_NUMHOOKS
];
1413 compat_uptr_t counters
; /* struct xt_counters * */
1414 struct compat_ip6t_entry entries
[0];
1418 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1419 unsigned int *size
, struct xt_counters
*counters
,
1422 struct xt_entry_target
*t
;
1423 struct compat_ip6t_entry __user
*ce
;
1424 u_int16_t target_offset
, next_offset
;
1425 compat_uint_t origsize
;
1426 const struct xt_entry_match
*ematch
;
1430 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1431 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1432 copy_to_user(&ce
->counters
, &counters
[i
],
1433 sizeof(counters
[i
])) != 0)
1436 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1437 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1439 xt_ematch_foreach(ematch
, e
) {
1440 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1444 target_offset
= e
->target_offset
- (origsize
- *size
);
1445 t
= ip6t_get_target(e
);
1446 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1449 next_offset
= e
->next_offset
- (origsize
- *size
);
1450 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1451 put_user(next_offset
, &ce
->next_offset
) != 0)
1457 compat_find_calc_match(struct xt_entry_match
*m
,
1459 const struct ip6t_ip6
*ipv6
,
1460 unsigned int hookmask
,
1463 struct xt_match
*match
;
1465 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1466 m
->u
.user
.revision
);
1467 if (IS_ERR(match
)) {
1468 duprintf("compat_check_calc_match: `%s' not found\n",
1470 return PTR_ERR(match
);
1472 m
->u
.kernel
.match
= match
;
1473 *size
+= xt_compat_match_offset(match
);
1477 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1479 struct xt_entry_target
*t
;
1480 struct xt_entry_match
*ematch
;
1482 /* Cleanup all matches */
1483 xt_ematch_foreach(ematch
, e
)
1484 module_put(ematch
->u
.kernel
.match
->me
);
1485 t
= compat_ip6t_get_target(e
);
1486 module_put(t
->u
.kernel
.target
->me
);
1490 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1491 struct xt_table_info
*newinfo
,
1493 const unsigned char *base
,
1494 const unsigned char *limit
,
1495 const unsigned int *hook_entries
,
1496 const unsigned int *underflows
,
1499 struct xt_entry_match
*ematch
;
1500 struct xt_entry_target
*t
;
1501 struct xt_target
*target
;
1502 unsigned int entry_offset
;
1506 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1507 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1508 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
) {
1509 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1513 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1514 sizeof(struct compat_xt_entry_target
)) {
1515 duprintf("checking: element %p size %u\n",
1520 /* For purposes of check_entry casting the compat entry is fine */
1521 ret
= check_entry((struct ip6t_entry
*)e
, name
);
1525 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1526 entry_offset
= (void *)e
- (void *)base
;
1528 xt_ematch_foreach(ematch
, e
) {
1529 ret
= compat_find_calc_match(ematch
, name
,
1530 &e
->ipv6
, e
->comefrom
, &off
);
1532 goto release_matches
;
1536 t
= compat_ip6t_get_target(e
);
1537 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1538 t
->u
.user
.revision
);
1539 if (IS_ERR(target
)) {
1540 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1542 ret
= PTR_ERR(target
);
1543 goto release_matches
;
1545 t
->u
.kernel
.target
= target
;
1547 off
+= xt_compat_target_offset(target
);
1549 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1553 /* Check hooks & underflows */
1554 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1555 if ((unsigned char *)e
- base
== hook_entries
[h
])
1556 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1557 if ((unsigned char *)e
- base
== underflows
[h
])
1558 newinfo
->underflow
[h
] = underflows
[h
];
1561 /* Clear counters and comefrom */
1562 memset(&e
->counters
, 0, sizeof(e
->counters
));
1567 module_put(t
->u
.kernel
.target
->me
);
1569 xt_ematch_foreach(ematch
, e
) {
1572 module_put(ematch
->u
.kernel
.match
->me
);
1578 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1579 unsigned int *size
, const char *name
,
1580 struct xt_table_info
*newinfo
, unsigned char *base
)
1582 struct xt_entry_target
*t
;
1583 struct ip6t_entry
*de
;
1584 unsigned int origsize
;
1586 struct xt_entry_match
*ematch
;
1590 de
= (struct ip6t_entry
*)*dstptr
;
1591 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1592 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1594 *dstptr
+= sizeof(struct ip6t_entry
);
1595 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1597 xt_ematch_foreach(ematch
, e
) {
1598 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1602 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1603 t
= compat_ip6t_get_target(e
);
1604 xt_compat_target_from_user(t
, dstptr
, size
);
1606 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1607 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1608 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1609 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1610 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1611 newinfo
->underflow
[h
] -= origsize
- *size
;
1616 static int compat_check_entry(struct ip6t_entry
*e
, struct net
*net
,
1621 struct xt_mtchk_param mtpar
;
1622 struct xt_entry_match
*ematch
;
1627 mtpar
.entryinfo
= &e
->ipv6
;
1628 mtpar
.hook_mask
= e
->comefrom
;
1629 mtpar
.family
= NFPROTO_IPV6
;
1630 xt_ematch_foreach(ematch
, e
) {
1631 ret
= check_match(ematch
, &mtpar
);
1633 goto cleanup_matches
;
1637 ret
= check_target(e
, net
, name
);
1639 goto cleanup_matches
;
1643 xt_ematch_foreach(ematch
, e
) {
1646 cleanup_match(ematch
, net
);
1652 translate_compat_table(struct net
*net
,
1654 unsigned int valid_hooks
,
1655 struct xt_table_info
**pinfo
,
1657 unsigned int total_size
,
1658 unsigned int number
,
1659 unsigned int *hook_entries
,
1660 unsigned int *underflows
)
1663 struct xt_table_info
*newinfo
, *info
;
1664 void *pos
, *entry0
, *entry1
;
1665 struct compat_ip6t_entry
*iter0
;
1666 struct ip6t_entry
*iter1
;
1673 info
->number
= number
;
1675 /* Init all hooks to impossible value. */
1676 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1677 info
->hook_entry
[i
] = 0xFFFFFFFF;
1678 info
->underflow
[i
] = 0xFFFFFFFF;
1681 duprintf("translate_compat_table: size %u\n", info
->size
);
1683 xt_compat_lock(AF_INET6
);
1684 xt_compat_init_offsets(AF_INET6
, number
);
1685 /* Walk through entries, checking offsets. */
1686 xt_entry_foreach(iter0
, entry0
, total_size
) {
1687 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1689 entry0
+ total_size
,
1700 duprintf("translate_compat_table: %u not %u entries\n",
1705 /* Check hooks all assigned */
1706 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1707 /* Only hooks which are valid */
1708 if (!(valid_hooks
& (1 << i
)))
1710 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1711 duprintf("Invalid hook entry %u %u\n",
1712 i
, hook_entries
[i
]);
1715 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1716 duprintf("Invalid underflow %u %u\n",
1723 newinfo
= xt_alloc_table_info(size
);
1727 newinfo
->number
= number
;
1728 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1729 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1730 newinfo
->underflow
[i
] = info
->underflow
[i
];
1732 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1735 xt_entry_foreach(iter0
, entry0
, total_size
) {
1736 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1737 name
, newinfo
, entry1
);
1741 xt_compat_flush_offsets(AF_INET6
);
1742 xt_compat_unlock(AF_INET6
);
1747 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1751 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1752 ret
= compat_check_entry(iter1
, net
, name
);
1756 if (strcmp(ip6t_get_target(iter1
)->u
.user
.name
,
1757 XT_ERROR_TARGET
) == 0)
1758 ++newinfo
->stacksize
;
1762 * The first i matches need cleanup_entry (calls ->destroy)
1763 * because they had called ->check already. The other j-i
1764 * entries need only release.
1768 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1773 compat_release_entry(iter0
);
1775 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1778 cleanup_entry(iter1
, net
);
1780 xt_free_table_info(newinfo
);
1784 /* And one copy for every other CPU */
1785 for_each_possible_cpu(i
)
1786 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1787 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1791 xt_free_table_info(info
);
1795 xt_free_table_info(newinfo
);
1797 xt_entry_foreach(iter0
, entry0
, total_size
) {
1800 compat_release_entry(iter0
);
1804 xt_compat_flush_offsets(AF_INET6
);
1805 xt_compat_unlock(AF_INET6
);
1810 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1813 struct compat_ip6t_replace tmp
;
1814 struct xt_table_info
*newinfo
;
1815 void *loc_cpu_entry
;
1816 struct ip6t_entry
*iter
;
1818 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1821 /* overflow check */
1822 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1824 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1826 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1828 newinfo
= xt_alloc_table_info(tmp
.size
);
1832 /* choose the copy that is on our node/cpu */
1833 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1834 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1840 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1841 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1842 tmp
.num_entries
, tmp
.hook_entry
,
1847 duprintf("compat_do_replace: Translated table\n");
1849 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1850 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1852 goto free_newinfo_untrans
;
1855 free_newinfo_untrans
:
1856 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1857 cleanup_entry(iter
, net
);
1859 xt_free_table_info(newinfo
);
1864 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1869 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1873 case IP6T_SO_SET_REPLACE
:
1874 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1877 case IP6T_SO_SET_ADD_COUNTERS
:
1878 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1882 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
1889 struct compat_ip6t_get_entries
{
1890 char name
[XT_TABLE_MAXNAMELEN
];
1892 struct compat_ip6t_entry entrytable
[0];
1896 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1897 void __user
*userptr
)
1899 struct xt_counters
*counters
;
1900 const struct xt_table_info
*private = table
->private;
1904 const void *loc_cpu_entry
;
1906 struct ip6t_entry
*iter
;
1908 counters
= alloc_counters(table
);
1909 if (IS_ERR(counters
))
1910 return PTR_ERR(counters
);
1912 /* choose the copy that is on our node/cpu, ...
1913 * This choice is lazy (because current thread is
1914 * allowed to migrate to another cpu)
1916 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1919 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1920 ret
= compat_copy_entry_to_user(iter
, &pos
,
1921 &size
, counters
, i
++);
1931 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1935 struct compat_ip6t_get_entries get
;
1938 if (*len
< sizeof(get
)) {
1939 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1943 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1946 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
) {
1947 duprintf("compat_get_entries: %u != %zu\n",
1948 *len
, sizeof(get
) + get
.size
);
1952 xt_compat_lock(AF_INET6
);
1953 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1954 if (!IS_ERR_OR_NULL(t
)) {
1955 const struct xt_table_info
*private = t
->private;
1956 struct xt_table_info info
;
1957 duprintf("t->private->number = %u\n", private->number
);
1958 ret
= compat_table_info(private, &info
);
1959 if (!ret
&& get
.size
== info
.size
) {
1960 ret
= compat_copy_entries_to_user(private->size
,
1961 t
, uptr
->entrytable
);
1963 duprintf("compat_get_entries: I've got %u not %u!\n",
1964 private->size
, get
.size
);
1967 xt_compat_flush_offsets(AF_INET6
);
1971 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1973 xt_compat_unlock(AF_INET6
);
1977 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1980 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1984 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1988 case IP6T_SO_GET_INFO
:
1989 ret
= get_info(sock_net(sk
), user
, len
, 1);
1991 case IP6T_SO_GET_ENTRIES
:
1992 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1995 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
2002 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
2006 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2010 case IP6T_SO_SET_REPLACE
:
2011 ret
= do_replace(sock_net(sk
), user
, len
);
2014 case IP6T_SO_SET_ADD_COUNTERS
:
2015 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2019 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd
);
2027 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2031 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2035 case IP6T_SO_GET_INFO
:
2036 ret
= get_info(sock_net(sk
), user
, len
, 0);
2039 case IP6T_SO_GET_ENTRIES
:
2040 ret
= get_entries(sock_net(sk
), user
, len
);
2043 case IP6T_SO_GET_REVISION_MATCH
:
2044 case IP6T_SO_GET_REVISION_TARGET
: {
2045 struct xt_get_revision rev
;
2048 if (*len
!= sizeof(rev
)) {
2052 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2056 rev
.name
[sizeof(rev
.name
)-1] = 0;
2058 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
2063 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
2066 "ip6t_%s", rev
.name
);
2071 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd
);
2078 struct xt_table
*ip6t_register_table(struct net
*net
,
2079 const struct xt_table
*table
,
2080 const struct ip6t_replace
*repl
)
2083 struct xt_table_info
*newinfo
;
2084 struct xt_table_info bootstrap
= {0};
2085 void *loc_cpu_entry
;
2086 struct xt_table
*new_table
;
2088 newinfo
= xt_alloc_table_info(repl
->size
);
2094 /* choose the copy on our node/cpu, but dont care about preemption */
2095 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2096 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2098 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2102 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2103 if (IS_ERR(new_table
)) {
2104 ret
= PTR_ERR(new_table
);
2110 xt_free_table_info(newinfo
);
2112 return ERR_PTR(ret
);
2115 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
2117 struct xt_table_info
*private;
2118 void *loc_cpu_entry
;
2119 struct module
*table_owner
= table
->me
;
2120 struct ip6t_entry
*iter
;
2122 private = xt_unregister_table(table
);
2124 /* Decrease module usage counts and free resources */
2125 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2126 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2127 cleanup_entry(iter
, net
);
2128 if (private->number
> private->initial_entries
)
2129 module_put(table_owner
);
2130 xt_free_table_info(private);
2133 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2135 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2136 u_int8_t type
, u_int8_t code
,
2139 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
2144 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2146 const struct icmp6hdr
*ic
;
2147 struct icmp6hdr _icmph
;
2148 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2150 /* Must not be a fragment. */
2151 if (par
->fragoff
!= 0)
2154 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2156 /* We've been asked to examine this packet, and we
2157 * can't. Hence, no choice but to drop.
2159 duprintf("Dropping evil ICMP tinygram.\n");
2160 par
->hotdrop
= true;
2164 return icmp6_type_code_match(icmpinfo
->type
,
2167 ic
->icmp6_type
, ic
->icmp6_code
,
2168 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
2171 /* Called when user tries to insert an entry of this type. */
2172 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
2174 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
2176 /* Must specify no unknown invflags */
2177 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
2180 /* The built-in targets: standard (NULL) and error. */
2181 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
2183 .name
= XT_STANDARD_TARGET
,
2184 .targetsize
= sizeof(int),
2185 .family
= NFPROTO_IPV6
,
2186 #ifdef CONFIG_COMPAT
2187 .compatsize
= sizeof(compat_int_t
),
2188 .compat_from_user
= compat_standard_from_user
,
2189 .compat_to_user
= compat_standard_to_user
,
2193 .name
= XT_ERROR_TARGET
,
2194 .target
= ip6t_error
,
2195 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2196 .family
= NFPROTO_IPV6
,
2200 static struct nf_sockopt_ops ip6t_sockopts
= {
2202 .set_optmin
= IP6T_BASE_CTL
,
2203 .set_optmax
= IP6T_SO_SET_MAX
+1,
2204 .set
= do_ip6t_set_ctl
,
2205 #ifdef CONFIG_COMPAT
2206 .compat_set
= compat_do_ip6t_set_ctl
,
2208 .get_optmin
= IP6T_BASE_CTL
,
2209 .get_optmax
= IP6T_SO_GET_MAX
+1,
2210 .get
= do_ip6t_get_ctl
,
2211 #ifdef CONFIG_COMPAT
2212 .compat_get
= compat_do_ip6t_get_ctl
,
2214 .owner
= THIS_MODULE
,
2217 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
2220 .match
= icmp6_match
,
2221 .matchsize
= sizeof(struct ip6t_icmp
),
2222 .checkentry
= icmp6_checkentry
,
2223 .proto
= IPPROTO_ICMPV6
,
2224 .family
= NFPROTO_IPV6
,
2228 static int __net_init
ip6_tables_net_init(struct net
*net
)
2230 return xt_proto_init(net
, NFPROTO_IPV6
);
2233 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
2235 xt_proto_fini(net
, NFPROTO_IPV6
);
2238 static struct pernet_operations ip6_tables_net_ops
= {
2239 .init
= ip6_tables_net_init
,
2240 .exit
= ip6_tables_net_exit
,
2243 static int __init
ip6_tables_init(void)
2247 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
2251 /* No one else will be downing sem now, so we won't sleep */
2252 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2255 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2259 /* Register setsockopt */
2260 ret
= nf_register_sockopt(&ip6t_sockopts
);
2264 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2268 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2270 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2272 unregister_pernet_subsys(&ip6_tables_net_ops
);
2277 static void __exit
ip6_tables_fini(void)
2279 nf_unregister_sockopt(&ip6t_sockopts
);
2281 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
2282 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
2283 unregister_pernet_subsys(&ip6_tables_net_ops
);
2286 EXPORT_SYMBOL(ip6t_register_table
);
2287 EXPORT_SYMBOL(ip6t_unregister_table
);
2288 EXPORT_SYMBOL(ip6t_do_table
);
2290 module_init(ip6_tables_init
);
2291 module_exit(ip6_tables_fini
);