2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #define IP_NF_ASSERT(x)
61 /* All the better to debug you with... */
66 void *ipt_alloc_initial_table(const struct xt_table
*info
)
68 return xt_alloc_initial_table(ipt
, IPT
);
70 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table
);
72 /* Returns whether matches rule or not. */
73 /* Performance critical - called for every packet */
75 ip_packet_match(const struct iphdr
*ip
,
78 const struct ipt_ip
*ipinfo
,
83 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
85 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
87 FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
89 dprintf("Source or dest mismatch.\n");
91 dprintf("SRC: %pI4. Mask: %pI4. Target: %pI4.%s\n",
92 &ip
->saddr
, &ipinfo
->smsk
.s_addr
, &ipinfo
->src
.s_addr
,
93 ipinfo
->invflags
& IPT_INV_SRCIP
? " (INV)" : "");
94 dprintf("DST: %pI4 Mask: %pI4 Target: %pI4.%s\n",
95 &ip
->daddr
, &ipinfo
->dmsk
.s_addr
, &ipinfo
->dst
.s_addr
,
96 ipinfo
->invflags
& IPT_INV_DSTIP
? " (INV)" : "");
100 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
102 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
)) {
103 dprintf("VIA in mismatch (%s vs %s).%s\n",
104 indev
, ipinfo
->iniface
,
105 ipinfo
->invflags
&IPT_INV_VIA_IN
?" (INV)":"");
109 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
111 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
)) {
112 dprintf("VIA out mismatch (%s vs %s).%s\n",
113 outdev
, ipinfo
->outiface
,
114 ipinfo
->invflags
&IPT_INV_VIA_OUT
?" (INV)":"");
118 /* Check specific protocol */
120 FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
)) {
121 dprintf("Packet protocol %hi does not match %hi.%s\n",
122 ip
->protocol
, ipinfo
->proto
,
123 ipinfo
->invflags
&IPT_INV_PROTO
? " (INV)":"");
127 /* If we have a fragment rule but the packet is not a fragment
128 * then we return zero */
129 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
)) {
130 dprintf("Fragment rule but not fragment.%s\n",
131 ipinfo
->invflags
& IPT_INV_FRAG
? " (INV)" : "");
139 ip_checkentry(const struct ipt_ip
*ip
)
141 if (ip
->flags
& ~IPT_F_MASK
) {
142 duprintf("Unknown flag bits set: %08X\n",
143 ip
->flags
& ~IPT_F_MASK
);
146 if (ip
->invflags
& ~IPT_INV_MASK
) {
147 duprintf("Unknown invflag bits set: %08X\n",
148 ip
->invflags
& ~IPT_INV_MASK
);
155 ipt_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
157 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
162 /* Performance critical */
163 static inline struct ipt_entry
*
164 get_entry(const void *base
, unsigned int offset
)
166 return (struct ipt_entry
*)(base
+ offset
);
169 /* All zeroes == unconditional rule. */
170 /* Mildly perf critical (only if packet tracing is on) */
171 static inline bool unconditional(const struct ipt_ip
*ip
)
173 static const struct ipt_ip uncond
;
175 return memcmp(ip
, &uncond
, sizeof(uncond
)) == 0;
179 /* for const-correctness */
180 static inline const struct xt_entry_target
*
181 ipt_get_target_c(const struct ipt_entry
*e
)
183 return ipt_get_target((struct ipt_entry
*)e
);
186 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
187 static const char *const hooknames
[] = {
188 [NF_INET_PRE_ROUTING
] = "PREROUTING",
189 [NF_INET_LOCAL_IN
] = "INPUT",
190 [NF_INET_FORWARD
] = "FORWARD",
191 [NF_INET_LOCAL_OUT
] = "OUTPUT",
192 [NF_INET_POST_ROUTING
] = "POSTROUTING",
195 enum nf_ip_trace_comments
{
196 NF_IP_TRACE_COMMENT_RULE
,
197 NF_IP_TRACE_COMMENT_RETURN
,
198 NF_IP_TRACE_COMMENT_POLICY
,
201 static const char *const comments
[] = {
202 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
203 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
204 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
207 static struct nf_loginfo trace_loginfo
= {
208 .type
= NF_LOG_TYPE_LOG
,
212 .logflags
= NF_LOG_MASK
,
217 /* Mildly perf critical (only if packet tracing is on) */
219 get_chainname_rulenum(const struct ipt_entry
*s
, const struct ipt_entry
*e
,
220 const char *hookname
, const char **chainname
,
221 const char **comment
, unsigned int *rulenum
)
223 const struct xt_standard_target
*t
= (void *)ipt_get_target_c(s
);
225 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
226 /* Head of user chain: ERROR target with chainname */
227 *chainname
= t
->target
.data
;
232 if (s
->target_offset
== sizeof(struct ipt_entry
) &&
233 strcmp(t
->target
.u
.kernel
.target
->name
,
234 XT_STANDARD_TARGET
) == 0 &&
236 unconditional(&s
->ip
)) {
237 /* Tail of chains: STANDARD target (return/policy) */
238 *comment
= *chainname
== hookname
239 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
240 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
249 static void trace_packet(const struct sk_buff
*skb
,
251 const struct net_device
*in
,
252 const struct net_device
*out
,
253 const char *tablename
,
254 const struct xt_table_info
*private,
255 const struct ipt_entry
*e
)
257 const void *table_base
;
258 const struct ipt_entry
*root
;
259 const char *hookname
, *chainname
, *comment
;
260 const struct ipt_entry
*iter
;
261 unsigned int rulenum
= 0;
262 struct net
*net
= dev_net(in
? in
: out
);
264 table_base
= private->entries
[smp_processor_id()];
265 root
= get_entry(table_base
, private->hook_entry
[hook
]);
267 hookname
= chainname
= hooknames
[hook
];
268 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
270 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
271 if (get_chainname_rulenum(iter
, e
, hookname
,
272 &chainname
, &comment
, &rulenum
) != 0)
275 nf_log_packet(net
, AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
276 "TRACE: %s:%s:%s:%u ",
277 tablename
, chainname
, comment
, rulenum
);
282 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
284 return (void *)entry
+ entry
->next_offset
;
287 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
289 ipt_do_table(struct sk_buff
*skb
,
291 const struct net_device
*in
,
292 const struct net_device
*out
,
293 struct xt_table
*table
)
295 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
296 const struct iphdr
*ip
;
297 /* Initializing verdict to NF_DROP keeps gcc happy. */
298 unsigned int verdict
= NF_DROP
;
299 const char *indev
, *outdev
;
300 const void *table_base
;
301 struct ipt_entry
*e
, **jumpstack
;
302 unsigned int *stackptr
, origptr
, cpu
;
303 const struct xt_table_info
*private;
304 struct xt_action_param acpar
;
309 indev
= in
? in
->name
: nulldevname
;
310 outdev
= out
? out
->name
: nulldevname
;
311 /* We handle fragments by dealing with the first fragment as
312 * if it was a normal packet. All other fragments are treated
313 * normally, except that they will NEVER match rules that ask
314 * things we don't know, ie. tcp syn flag or ports). If the
315 * rule is also a fragment-specific rule, non-fragments won't
317 acpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
318 acpar
.thoff
= ip_hdrlen(skb
);
319 acpar
.hotdrop
= false;
322 acpar
.family
= NFPROTO_IPV4
;
323 acpar
.hooknum
= hook
;
325 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
327 addend
= xt_write_recseq_begin();
328 private = table
->private;
329 cpu
= smp_processor_id();
331 * Ensure we load private-> members after we've fetched the base
334 smp_read_barrier_depends();
335 table_base
= private->entries
[cpu
];
336 jumpstack
= (struct ipt_entry
**)private->jumpstack
[cpu
];
337 stackptr
= per_cpu_ptr(private->stackptr
, cpu
);
340 e
= get_entry(table_base
, private->hook_entry
[hook
]);
342 pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
343 table
->name
, hook
, origptr
,
344 get_entry(table_base
, private->underflow
[hook
]));
347 const struct xt_entry_target
*t
;
348 const struct xt_entry_match
*ematch
;
351 if (!ip_packet_match(ip
, indev
, outdev
,
352 &e
->ip
, acpar
.fragoff
)) {
354 e
= ipt_next_entry(e
);
358 xt_ematch_foreach(ematch
, e
) {
359 acpar
.match
= ematch
->u
.kernel
.match
;
360 acpar
.matchinfo
= ematch
->data
;
361 if (!acpar
.match
->match(skb
, &acpar
))
365 ADD_COUNTER(e
->counters
, skb
->len
, 1);
367 t
= ipt_get_target(e
);
368 IP_NF_ASSERT(t
->u
.kernel
.target
);
370 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
371 /* The packet is traced: log it */
372 if (unlikely(skb
->nf_trace
))
373 trace_packet(skb
, hook
, in
, out
,
374 table
->name
, private, e
);
376 /* Standard target? */
377 if (!t
->u
.kernel
.target
->target
) {
380 v
= ((struct xt_standard_target
*)t
)->verdict
;
382 /* Pop from stack? */
383 if (v
!= XT_RETURN
) {
384 verdict
= (unsigned int)(-v
) - 1;
387 if (*stackptr
<= origptr
) {
388 e
= get_entry(table_base
,
389 private->underflow
[hook
]);
390 pr_debug("Underflow (this is normal) "
393 e
= jumpstack
[--*stackptr
];
394 pr_debug("Pulled %p out from pos %u\n",
396 e
= ipt_next_entry(e
);
400 if (table_base
+ v
!= ipt_next_entry(e
) &&
401 !(e
->ip
.flags
& IPT_F_GOTO
)) {
402 if (*stackptr
>= private->stacksize
) {
406 jumpstack
[(*stackptr
)++] = e
;
407 pr_debug("Pushed %p into pos %u\n",
411 e
= get_entry(table_base
, v
);
415 acpar
.target
= t
->u
.kernel
.target
;
416 acpar
.targinfo
= t
->data
;
418 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
419 /* Target might have changed stuff. */
421 if (verdict
== XT_CONTINUE
)
422 e
= ipt_next_entry(e
);
426 } while (!acpar
.hotdrop
);
427 pr_debug("Exiting %s; resetting sp from %u to %u\n",
428 __func__
, *stackptr
, origptr
);
430 xt_write_recseq_end(addend
);
433 #ifdef DEBUG_ALLOW_ALL
442 /* Figures out from what hook each rule can be called: returns 0 if
443 there are loops. Puts hook bitmask in comefrom. */
445 mark_source_chains(const struct xt_table_info
*newinfo
,
446 unsigned int valid_hooks
, void *entry0
)
450 /* No recursion; use packet counter to save back ptrs (reset
451 to 0 as we leave), and comefrom to save source hook bitmask */
452 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
453 unsigned int pos
= newinfo
->hook_entry
[hook
];
454 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
456 if (!(valid_hooks
& (1 << hook
)))
459 /* Set initial back pointer. */
460 e
->counters
.pcnt
= pos
;
463 const struct xt_standard_target
*t
464 = (void *)ipt_get_target_c(e
);
465 int visited
= e
->comefrom
& (1 << hook
);
467 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
)) {
468 pr_err("iptables: loop hook %u pos %u %08X.\n",
469 hook
, pos
, e
->comefrom
);
472 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
474 /* Unconditional return/END. */
475 if ((e
->target_offset
== sizeof(struct ipt_entry
) &&
476 (strcmp(t
->target
.u
.user
.name
,
477 XT_STANDARD_TARGET
) == 0) &&
478 t
->verdict
< 0 && unconditional(&e
->ip
)) ||
480 unsigned int oldpos
, size
;
482 if ((strcmp(t
->target
.u
.user
.name
,
483 XT_STANDARD_TARGET
) == 0) &&
484 t
->verdict
< -NF_MAX_VERDICT
- 1) {
485 duprintf("mark_source_chains: bad "
486 "negative verdict (%i)\n",
491 /* Return: backtrack through the last
494 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
495 #ifdef DEBUG_IP_FIREWALL_USER
497 & (1 << NF_INET_NUMHOOKS
)) {
498 duprintf("Back unset "
505 pos
= e
->counters
.pcnt
;
506 e
->counters
.pcnt
= 0;
508 /* We're at the start. */
512 e
= (struct ipt_entry
*)
514 } while (oldpos
== pos
+ e
->next_offset
);
517 size
= e
->next_offset
;
518 e
= (struct ipt_entry
*)
519 (entry0
+ pos
+ size
);
520 e
->counters
.pcnt
= pos
;
523 int newpos
= t
->verdict
;
525 if (strcmp(t
->target
.u
.user
.name
,
526 XT_STANDARD_TARGET
) == 0 &&
528 if (newpos
> newinfo
->size
-
529 sizeof(struct ipt_entry
)) {
530 duprintf("mark_source_chains: "
531 "bad verdict (%i)\n",
535 /* This a jump; chase it. */
536 duprintf("Jump rule %u -> %u\n",
539 /* ... this is a fallthru */
540 newpos
= pos
+ e
->next_offset
;
542 e
= (struct ipt_entry
*)
544 e
->counters
.pcnt
= pos
;
549 duprintf("Finished chain %u\n", hook
);
554 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
556 struct xt_mtdtor_param par
;
559 par
.match
= m
->u
.kernel
.match
;
560 par
.matchinfo
= m
->data
;
561 par
.family
= NFPROTO_IPV4
;
562 if (par
.match
->destroy
!= NULL
)
563 par
.match
->destroy(&par
);
564 module_put(par
.match
->me
);
568 check_entry(const struct ipt_entry
*e
, const char *name
)
570 const struct xt_entry_target
*t
;
572 if (!ip_checkentry(&e
->ip
)) {
573 duprintf("ip check failed %p %s.\n", e
, name
);
577 if (e
->target_offset
+ sizeof(struct xt_entry_target
) >
581 t
= ipt_get_target_c(e
);
582 if (e
->target_offset
+ t
->u
.target_size
> e
->next_offset
)
589 check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
591 const struct ipt_ip
*ip
= par
->entryinfo
;
594 par
->match
= m
->u
.kernel
.match
;
595 par
->matchinfo
= m
->data
;
597 ret
= xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
598 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
600 duprintf("check failed for `%s'.\n", par
->match
->name
);
607 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
609 struct xt_match
*match
;
612 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
615 duprintf("find_check_match: `%s' not found\n", m
->u
.user
.name
);
616 return PTR_ERR(match
);
618 m
->u
.kernel
.match
= match
;
620 ret
= check_match(m
, par
);
626 module_put(m
->u
.kernel
.match
->me
);
630 static int check_target(struct ipt_entry
*e
, struct net
*net
, const char *name
)
632 struct xt_entry_target
*t
= ipt_get_target(e
);
633 struct xt_tgchk_param par
= {
637 .target
= t
->u
.kernel
.target
,
639 .hook_mask
= e
->comefrom
,
640 .family
= NFPROTO_IPV4
,
644 ret
= xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
645 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
647 duprintf("check failed for `%s'.\n",
648 t
->u
.kernel
.target
->name
);
655 find_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
,
658 struct xt_entry_target
*t
;
659 struct xt_target
*target
;
662 struct xt_mtchk_param mtpar
;
663 struct xt_entry_match
*ematch
;
665 ret
= check_entry(e
, name
);
672 mtpar
.entryinfo
= &e
->ip
;
673 mtpar
.hook_mask
= e
->comefrom
;
674 mtpar
.family
= NFPROTO_IPV4
;
675 xt_ematch_foreach(ematch
, e
) {
676 ret
= find_check_match(ematch
, &mtpar
);
678 goto cleanup_matches
;
682 t
= ipt_get_target(e
);
683 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
685 if (IS_ERR(target
)) {
686 duprintf("find_check_entry: `%s' not found\n", t
->u
.user
.name
);
687 ret
= PTR_ERR(target
);
688 goto cleanup_matches
;
690 t
->u
.kernel
.target
= target
;
692 ret
= check_target(e
, net
, name
);
697 module_put(t
->u
.kernel
.target
->me
);
699 xt_ematch_foreach(ematch
, e
) {
702 cleanup_match(ematch
, net
);
707 static bool check_underflow(const struct ipt_entry
*e
)
709 const struct xt_entry_target
*t
;
710 unsigned int verdict
;
712 if (!unconditional(&e
->ip
))
714 t
= ipt_get_target_c(e
);
715 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
717 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
718 verdict
= -verdict
- 1;
719 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
723 check_entry_size_and_hooks(struct ipt_entry
*e
,
724 struct xt_table_info
*newinfo
,
725 const unsigned char *base
,
726 const unsigned char *limit
,
727 const unsigned int *hook_entries
,
728 const unsigned int *underflows
,
729 unsigned int valid_hooks
)
733 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0 ||
734 (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
) {
735 duprintf("Bad offset %p\n", e
);
740 < sizeof(struct ipt_entry
) + sizeof(struct xt_entry_target
)) {
741 duprintf("checking: element %p size %u\n",
746 /* Check hooks & underflows */
747 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
748 if (!(valid_hooks
& (1 << h
)))
750 if ((unsigned char *)e
- base
== hook_entries
[h
])
751 newinfo
->hook_entry
[h
] = hook_entries
[h
];
752 if ((unsigned char *)e
- base
== underflows
[h
]) {
753 if (!check_underflow(e
)) {
754 pr_err("Underflows must be unconditional and "
755 "use the STANDARD target with "
759 newinfo
->underflow
[h
] = underflows
[h
];
763 /* Clear counters and comefrom */
764 e
->counters
= ((struct xt_counters
) { 0, 0 });
770 cleanup_entry(struct ipt_entry
*e
, struct net
*net
)
772 struct xt_tgdtor_param par
;
773 struct xt_entry_target
*t
;
774 struct xt_entry_match
*ematch
;
776 /* Cleanup all matches */
777 xt_ematch_foreach(ematch
, e
)
778 cleanup_match(ematch
, net
);
779 t
= ipt_get_target(e
);
782 par
.target
= t
->u
.kernel
.target
;
783 par
.targinfo
= t
->data
;
784 par
.family
= NFPROTO_IPV4
;
785 if (par
.target
->destroy
!= NULL
)
786 par
.target
->destroy(&par
);
787 module_put(par
.target
->me
);
790 /* Checks and translates the user-supplied table segment (held in
793 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
794 const struct ipt_replace
*repl
)
796 struct ipt_entry
*iter
;
800 newinfo
->size
= repl
->size
;
801 newinfo
->number
= repl
->num_entries
;
803 /* Init all hooks to impossible value. */
804 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
805 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
806 newinfo
->underflow
[i
] = 0xFFFFFFFF;
809 duprintf("translate_table: size %u\n", newinfo
->size
);
811 /* Walk through entries, checking offsets. */
812 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
813 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
821 if (strcmp(ipt_get_target(iter
)->u
.user
.name
,
822 XT_ERROR_TARGET
) == 0)
823 ++newinfo
->stacksize
;
826 if (i
!= repl
->num_entries
) {
827 duprintf("translate_table: %u not %u entries\n",
828 i
, repl
->num_entries
);
832 /* Check hooks all assigned */
833 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
834 /* Only hooks which are valid */
835 if (!(repl
->valid_hooks
& (1 << i
)))
837 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF) {
838 duprintf("Invalid hook entry %u %u\n",
839 i
, repl
->hook_entry
[i
]);
842 if (newinfo
->underflow
[i
] == 0xFFFFFFFF) {
843 duprintf("Invalid underflow %u %u\n",
844 i
, repl
->underflow
[i
]);
849 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
852 /* Finally, each sanity check must pass */
854 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
855 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
862 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
865 cleanup_entry(iter
, net
);
870 /* And one copy for every other CPU */
871 for_each_possible_cpu(i
) {
872 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry0
)
873 memcpy(newinfo
->entries
[i
], entry0
, newinfo
->size
);
880 get_counters(const struct xt_table_info
*t
,
881 struct xt_counters counters
[])
883 struct ipt_entry
*iter
;
887 for_each_possible_cpu(cpu
) {
888 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
891 xt_entry_foreach(iter
, t
->entries
[cpu
], t
->size
) {
896 start
= read_seqcount_begin(s
);
897 bcnt
= iter
->counters
.bcnt
;
898 pcnt
= iter
->counters
.pcnt
;
899 } while (read_seqcount_retry(s
, start
));
901 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
902 ++i
; /* macro does multi eval of i */
907 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
909 unsigned int countersize
;
910 struct xt_counters
*counters
;
911 const struct xt_table_info
*private = table
->private;
913 /* We need atomic snapshot of counters: rest doesn't change
914 (other than comefrom, which userspace doesn't care
916 countersize
= sizeof(struct xt_counters
) * private->number
;
917 counters
= vzalloc(countersize
);
919 if (counters
== NULL
)
920 return ERR_PTR(-ENOMEM
);
922 get_counters(private, counters
);
928 copy_entries_to_user(unsigned int total_size
,
929 const struct xt_table
*table
,
930 void __user
*userptr
)
932 unsigned int off
, num
;
933 const struct ipt_entry
*e
;
934 struct xt_counters
*counters
;
935 const struct xt_table_info
*private = table
->private;
937 const void *loc_cpu_entry
;
939 counters
= alloc_counters(table
);
940 if (IS_ERR(counters
))
941 return PTR_ERR(counters
);
943 /* choose the copy that is on our node/cpu, ...
944 * This choice is lazy (because current thread is
945 * allowed to migrate to another cpu)
947 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
948 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
953 /* FIXME: use iterator macros --RR */
954 /* ... then go back and fix counters and names */
955 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
957 const struct xt_entry_match
*m
;
958 const struct xt_entry_target
*t
;
960 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
961 if (copy_to_user(userptr
+ off
962 + offsetof(struct ipt_entry
, counters
),
964 sizeof(counters
[num
])) != 0) {
969 for (i
= sizeof(struct ipt_entry
);
970 i
< e
->target_offset
;
971 i
+= m
->u
.match_size
) {
974 if (copy_to_user(userptr
+ off
+ i
975 + offsetof(struct xt_entry_match
,
977 m
->u
.kernel
.match
->name
,
978 strlen(m
->u
.kernel
.match
->name
)+1)
985 t
= ipt_get_target_c(e
);
986 if (copy_to_user(userptr
+ off
+ e
->target_offset
987 + offsetof(struct xt_entry_target
,
989 t
->u
.kernel
.target
->name
,
990 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
1001 #ifdef CONFIG_COMPAT
1002 static void compat_standard_from_user(void *dst
, const void *src
)
1004 int v
= *(compat_int_t
*)src
;
1007 v
+= xt_compat_calc_jump(AF_INET
, v
);
1008 memcpy(dst
, &v
, sizeof(v
));
1011 static int compat_standard_to_user(void __user
*dst
, const void *src
)
1013 compat_int_t cv
= *(int *)src
;
1016 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
1017 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
1020 static int compat_calc_entry(const struct ipt_entry
*e
,
1021 const struct xt_table_info
*info
,
1022 const void *base
, struct xt_table_info
*newinfo
)
1024 const struct xt_entry_match
*ematch
;
1025 const struct xt_entry_target
*t
;
1026 unsigned int entry_offset
;
1029 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1030 entry_offset
= (void *)e
- base
;
1031 xt_ematch_foreach(ematch
, e
)
1032 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
1033 t
= ipt_get_target_c(e
);
1034 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
1035 newinfo
->size
-= off
;
1036 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1040 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1041 if (info
->hook_entry
[i
] &&
1042 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
1043 newinfo
->hook_entry
[i
] -= off
;
1044 if (info
->underflow
[i
] &&
1045 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
1046 newinfo
->underflow
[i
] -= off
;
1051 static int compat_table_info(const struct xt_table_info
*info
,
1052 struct xt_table_info
*newinfo
)
1054 struct ipt_entry
*iter
;
1055 void *loc_cpu_entry
;
1058 if (!newinfo
|| !info
)
1061 /* we dont care about newinfo->entries[] */
1062 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
1063 newinfo
->initial_entries
= 0;
1064 loc_cpu_entry
= info
->entries
[raw_smp_processor_id()];
1065 xt_compat_init_offsets(AF_INET
, info
->number
);
1066 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
1067 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
1075 static int get_info(struct net
*net
, void __user
*user
,
1076 const int *len
, int compat
)
1078 char name
[XT_TABLE_MAXNAMELEN
];
1082 if (*len
!= sizeof(struct ipt_getinfo
)) {
1083 duprintf("length %u != %zu\n", *len
,
1084 sizeof(struct ipt_getinfo
));
1088 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1091 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1092 #ifdef CONFIG_COMPAT
1094 xt_compat_lock(AF_INET
);
1096 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1097 "iptable_%s", name
);
1098 if (!IS_ERR_OR_NULL(t
)) {
1099 struct ipt_getinfo info
;
1100 const struct xt_table_info
*private = t
->private;
1101 #ifdef CONFIG_COMPAT
1102 struct xt_table_info tmp
;
1105 ret
= compat_table_info(private, &tmp
);
1106 xt_compat_flush_offsets(AF_INET
);
1110 memset(&info
, 0, sizeof(info
));
1111 info
.valid_hooks
= t
->valid_hooks
;
1112 memcpy(info
.hook_entry
, private->hook_entry
,
1113 sizeof(info
.hook_entry
));
1114 memcpy(info
.underflow
, private->underflow
,
1115 sizeof(info
.underflow
));
1116 info
.num_entries
= private->number
;
1117 info
.size
= private->size
;
1118 strcpy(info
.name
, name
);
1120 if (copy_to_user(user
, &info
, *len
) != 0)
1128 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1129 #ifdef CONFIG_COMPAT
1131 xt_compat_unlock(AF_INET
);
1137 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
,
1141 struct ipt_get_entries get
;
1144 if (*len
< sizeof(get
)) {
1145 duprintf("get_entries: %u < %zu\n", *len
, sizeof(get
));
1148 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1150 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
) {
1151 duprintf("get_entries: %u != %zu\n",
1152 *len
, sizeof(get
) + get
.size
);
1156 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1157 if (!IS_ERR_OR_NULL(t
)) {
1158 const struct xt_table_info
*private = t
->private;
1159 duprintf("t->private->number = %u\n", private->number
);
1160 if (get
.size
== private->size
)
1161 ret
= copy_entries_to_user(private->size
,
1162 t
, uptr
->entrytable
);
1164 duprintf("get_entries: I've got %u not %u!\n",
1165 private->size
, get
.size
);
1171 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1177 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1178 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1179 void __user
*counters_ptr
)
1183 struct xt_table_info
*oldinfo
;
1184 struct xt_counters
*counters
;
1185 void *loc_cpu_old_entry
;
1186 struct ipt_entry
*iter
;
1189 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1195 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1196 "iptable_%s", name
);
1197 if (IS_ERR_OR_NULL(t
)) {
1198 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1199 goto free_newinfo_counters_untrans
;
1203 if (valid_hooks
!= t
->valid_hooks
) {
1204 duprintf("Valid hook crap: %08X vs %08X\n",
1205 valid_hooks
, t
->valid_hooks
);
1210 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1214 /* Update module usage count based on number of rules */
1215 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1216 oldinfo
->number
, oldinfo
->initial_entries
, newinfo
->number
);
1217 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1218 (newinfo
->number
<= oldinfo
->initial_entries
))
1220 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1221 (newinfo
->number
<= oldinfo
->initial_entries
))
1224 /* Get the old counters, and synchronize with replace */
1225 get_counters(oldinfo
, counters
);
1227 /* Decrease module usage counts and free resource */
1228 loc_cpu_old_entry
= oldinfo
->entries
[raw_smp_processor_id()];
1229 xt_entry_foreach(iter
, loc_cpu_old_entry
, oldinfo
->size
)
1230 cleanup_entry(iter
, net
);
1232 xt_free_table_info(oldinfo
);
1233 if (copy_to_user(counters_ptr
, counters
,
1234 sizeof(struct xt_counters
) * num_counters
) != 0) {
1235 /* Silent error, can't fail, new table is already in place */
1236 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1245 free_newinfo_counters_untrans
:
1252 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1255 struct ipt_replace tmp
;
1256 struct xt_table_info
*newinfo
;
1257 void *loc_cpu_entry
;
1258 struct ipt_entry
*iter
;
1260 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1263 /* overflow check */
1264 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1266 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1268 newinfo
= xt_alloc_table_info(tmp
.size
);
1272 /* choose the copy that is on our node/cpu */
1273 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1274 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1280 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1284 duprintf("Translated table\n");
1286 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1287 tmp
.num_counters
, tmp
.counters
);
1289 goto free_newinfo_untrans
;
1292 free_newinfo_untrans
:
1293 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1294 cleanup_entry(iter
, net
);
1296 xt_free_table_info(newinfo
);
1301 do_add_counters(struct net
*net
, const void __user
*user
,
1302 unsigned int len
, int compat
)
1304 unsigned int i
, curcpu
;
1305 struct xt_counters_info tmp
;
1306 struct xt_counters
*paddc
;
1307 unsigned int num_counters
;
1312 const struct xt_table_info
*private;
1314 void *loc_cpu_entry
;
1315 struct ipt_entry
*iter
;
1316 unsigned int addend
;
1317 #ifdef CONFIG_COMPAT
1318 struct compat_xt_counters_info compat_tmp
;
1322 size
= sizeof(struct compat_xt_counters_info
);
1327 size
= sizeof(struct xt_counters_info
);
1330 if (copy_from_user(ptmp
, user
, size
) != 0)
1333 #ifdef CONFIG_COMPAT
1335 num_counters
= compat_tmp
.num_counters
;
1336 name
= compat_tmp
.name
;
1340 num_counters
= tmp
.num_counters
;
1344 if (len
!= size
+ num_counters
* sizeof(struct xt_counters
))
1347 paddc
= vmalloc(len
- size
);
1351 if (copy_from_user(paddc
, user
+ size
, len
- size
) != 0) {
1356 t
= xt_find_table_lock(net
, AF_INET
, name
);
1357 if (IS_ERR_OR_NULL(t
)) {
1358 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1363 private = t
->private;
1364 if (private->number
!= num_counters
) {
1366 goto unlock_up_free
;
1370 /* Choose the copy that is on our node */
1371 curcpu
= smp_processor_id();
1372 loc_cpu_entry
= private->entries
[curcpu
];
1373 addend
= xt_write_recseq_begin();
1374 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
) {
1375 ADD_COUNTER(iter
->counters
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1378 xt_write_recseq_end(addend
);
1389 #ifdef CONFIG_COMPAT
1390 struct compat_ipt_replace
{
1391 char name
[XT_TABLE_MAXNAMELEN
];
1395 u32 hook_entry
[NF_INET_NUMHOOKS
];
1396 u32 underflow
[NF_INET_NUMHOOKS
];
1398 compat_uptr_t counters
; /* struct xt_counters * */
1399 struct compat_ipt_entry entries
[0];
1403 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1404 unsigned int *size
, struct xt_counters
*counters
,
1407 struct xt_entry_target
*t
;
1408 struct compat_ipt_entry __user
*ce
;
1409 u_int16_t target_offset
, next_offset
;
1410 compat_uint_t origsize
;
1411 const struct xt_entry_match
*ematch
;
1415 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1416 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)) != 0 ||
1417 copy_to_user(&ce
->counters
, &counters
[i
],
1418 sizeof(counters
[i
])) != 0)
1421 *dstptr
+= sizeof(struct compat_ipt_entry
);
1422 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1424 xt_ematch_foreach(ematch
, e
) {
1425 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1429 target_offset
= e
->target_offset
- (origsize
- *size
);
1430 t
= ipt_get_target(e
);
1431 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1434 next_offset
= e
->next_offset
- (origsize
- *size
);
1435 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1436 put_user(next_offset
, &ce
->next_offset
) != 0)
1442 compat_find_calc_match(struct xt_entry_match
*m
,
1444 const struct ipt_ip
*ip
,
1445 unsigned int hookmask
,
1448 struct xt_match
*match
;
1450 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
1451 m
->u
.user
.revision
);
1452 if (IS_ERR(match
)) {
1453 duprintf("compat_check_calc_match: `%s' not found\n",
1455 return PTR_ERR(match
);
1457 m
->u
.kernel
.match
= match
;
1458 *size
+= xt_compat_match_offset(match
);
1462 static void compat_release_entry(struct compat_ipt_entry
*e
)
1464 struct xt_entry_target
*t
;
1465 struct xt_entry_match
*ematch
;
1467 /* Cleanup all matches */
1468 xt_ematch_foreach(ematch
, e
)
1469 module_put(ematch
->u
.kernel
.match
->me
);
1470 t
= compat_ipt_get_target(e
);
1471 module_put(t
->u
.kernel
.target
->me
);
1475 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1476 struct xt_table_info
*newinfo
,
1478 const unsigned char *base
,
1479 const unsigned char *limit
,
1480 const unsigned int *hook_entries
,
1481 const unsigned int *underflows
,
1484 struct xt_entry_match
*ematch
;
1485 struct xt_entry_target
*t
;
1486 struct xt_target
*target
;
1487 unsigned int entry_offset
;
1491 duprintf("check_compat_entry_size_and_hooks %p\n", e
);
1492 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0 ||
1493 (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
) {
1494 duprintf("Bad offset %p, limit = %p\n", e
, limit
);
1498 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1499 sizeof(struct compat_xt_entry_target
)) {
1500 duprintf("checking: element %p size %u\n",
1505 /* For purposes of check_entry casting the compat entry is fine */
1506 ret
= check_entry((struct ipt_entry
*)e
, name
);
1510 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1511 entry_offset
= (void *)e
- (void *)base
;
1513 xt_ematch_foreach(ematch
, e
) {
1514 ret
= compat_find_calc_match(ematch
, name
,
1515 &e
->ip
, e
->comefrom
, &off
);
1517 goto release_matches
;
1521 t
= compat_ipt_get_target(e
);
1522 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
1523 t
->u
.user
.revision
);
1524 if (IS_ERR(target
)) {
1525 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1527 ret
= PTR_ERR(target
);
1528 goto release_matches
;
1530 t
->u
.kernel
.target
= target
;
1532 off
+= xt_compat_target_offset(target
);
1534 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1538 /* Check hooks & underflows */
1539 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1540 if ((unsigned char *)e
- base
== hook_entries
[h
])
1541 newinfo
->hook_entry
[h
] = hook_entries
[h
];
1542 if ((unsigned char *)e
- base
== underflows
[h
])
1543 newinfo
->underflow
[h
] = underflows
[h
];
1546 /* Clear counters and comefrom */
1547 memset(&e
->counters
, 0, sizeof(e
->counters
));
1552 module_put(t
->u
.kernel
.target
->me
);
1554 xt_ematch_foreach(ematch
, e
) {
1557 module_put(ematch
->u
.kernel
.match
->me
);
1563 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1564 unsigned int *size
, const char *name
,
1565 struct xt_table_info
*newinfo
, unsigned char *base
)
1567 struct xt_entry_target
*t
;
1568 struct xt_target
*target
;
1569 struct ipt_entry
*de
;
1570 unsigned int origsize
;
1572 struct xt_entry_match
*ematch
;
1576 de
= (struct ipt_entry
*)*dstptr
;
1577 memcpy(de
, e
, sizeof(struct ipt_entry
));
1578 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1580 *dstptr
+= sizeof(struct ipt_entry
);
1581 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1583 xt_ematch_foreach(ematch
, e
) {
1584 ret
= xt_compat_match_from_user(ematch
, dstptr
, size
);
1588 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1589 t
= compat_ipt_get_target(e
);
1590 target
= t
->u
.kernel
.target
;
1591 xt_compat_target_from_user(t
, dstptr
, size
);
1593 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1594 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1595 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1596 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1597 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1598 newinfo
->underflow
[h
] -= origsize
- *size
;
1604 compat_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
)
1606 struct xt_entry_match
*ematch
;
1607 struct xt_mtchk_param mtpar
;
1614 mtpar
.entryinfo
= &e
->ip
;
1615 mtpar
.hook_mask
= e
->comefrom
;
1616 mtpar
.family
= NFPROTO_IPV4
;
1617 xt_ematch_foreach(ematch
, e
) {
1618 ret
= check_match(ematch
, &mtpar
);
1620 goto cleanup_matches
;
1624 ret
= check_target(e
, net
, name
);
1626 goto cleanup_matches
;
1630 xt_ematch_foreach(ematch
, e
) {
1633 cleanup_match(ematch
, net
);
1639 translate_compat_table(struct net
*net
,
1641 unsigned int valid_hooks
,
1642 struct xt_table_info
**pinfo
,
1644 unsigned int total_size
,
1645 unsigned int number
,
1646 unsigned int *hook_entries
,
1647 unsigned int *underflows
)
1650 struct xt_table_info
*newinfo
, *info
;
1651 void *pos
, *entry0
, *entry1
;
1652 struct compat_ipt_entry
*iter0
;
1653 struct ipt_entry
*iter1
;
1660 info
->number
= number
;
1662 /* Init all hooks to impossible value. */
1663 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1664 info
->hook_entry
[i
] = 0xFFFFFFFF;
1665 info
->underflow
[i
] = 0xFFFFFFFF;
1668 duprintf("translate_compat_table: size %u\n", info
->size
);
1670 xt_compat_lock(AF_INET
);
1671 xt_compat_init_offsets(AF_INET
, number
);
1672 /* Walk through entries, checking offsets. */
1673 xt_entry_foreach(iter0
, entry0
, total_size
) {
1674 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1676 entry0
+ total_size
,
1687 duprintf("translate_compat_table: %u not %u entries\n",
1692 /* Check hooks all assigned */
1693 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1694 /* Only hooks which are valid */
1695 if (!(valid_hooks
& (1 << i
)))
1697 if (info
->hook_entry
[i
] == 0xFFFFFFFF) {
1698 duprintf("Invalid hook entry %u %u\n",
1699 i
, hook_entries
[i
]);
1702 if (info
->underflow
[i
] == 0xFFFFFFFF) {
1703 duprintf("Invalid underflow %u %u\n",
1710 newinfo
= xt_alloc_table_info(size
);
1714 newinfo
->number
= number
;
1715 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1716 newinfo
->hook_entry
[i
] = info
->hook_entry
[i
];
1717 newinfo
->underflow
[i
] = info
->underflow
[i
];
1719 entry1
= newinfo
->entries
[raw_smp_processor_id()];
1722 xt_entry_foreach(iter0
, entry0
, total_size
) {
1723 ret
= compat_copy_entry_from_user(iter0
, &pos
, &size
,
1724 name
, newinfo
, entry1
);
1728 xt_compat_flush_offsets(AF_INET
);
1729 xt_compat_unlock(AF_INET
);
1734 if (!mark_source_chains(newinfo
, valid_hooks
, entry1
))
1738 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1739 ret
= compat_check_entry(iter1
, net
, name
);
1743 if (strcmp(ipt_get_target(iter1
)->u
.user
.name
,
1744 XT_ERROR_TARGET
) == 0)
1745 ++newinfo
->stacksize
;
1749 * The first i matches need cleanup_entry (calls ->destroy)
1750 * because they had called ->check already. The other j-i
1751 * entries need only release.
1755 xt_entry_foreach(iter0
, entry0
, newinfo
->size
) {
1760 compat_release_entry(iter0
);
1762 xt_entry_foreach(iter1
, entry1
, newinfo
->size
) {
1765 cleanup_entry(iter1
, net
);
1767 xt_free_table_info(newinfo
);
1771 /* And one copy for every other CPU */
1772 for_each_possible_cpu(i
)
1773 if (newinfo
->entries
[i
] && newinfo
->entries
[i
] != entry1
)
1774 memcpy(newinfo
->entries
[i
], entry1
, newinfo
->size
);
1778 xt_free_table_info(info
);
1782 xt_free_table_info(newinfo
);
1784 xt_entry_foreach(iter0
, entry0
, total_size
) {
1787 compat_release_entry(iter0
);
1791 xt_compat_flush_offsets(AF_INET
);
1792 xt_compat_unlock(AF_INET
);
1797 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1800 struct compat_ipt_replace tmp
;
1801 struct xt_table_info
*newinfo
;
1802 void *loc_cpu_entry
;
1803 struct ipt_entry
*iter
;
1805 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1808 /* overflow check */
1809 if (tmp
.size
>= INT_MAX
/ num_possible_cpus())
1811 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1813 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1815 newinfo
= xt_alloc_table_info(tmp
.size
);
1819 /* choose the copy that is on our node/cpu */
1820 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
1821 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1827 ret
= translate_compat_table(net
, tmp
.name
, tmp
.valid_hooks
,
1828 &newinfo
, &loc_cpu_entry
, tmp
.size
,
1829 tmp
.num_entries
, tmp
.hook_entry
,
1834 duprintf("compat_do_replace: Translated table\n");
1836 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1837 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1839 goto free_newinfo_untrans
;
1842 free_newinfo_untrans
:
1843 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1844 cleanup_entry(iter
, net
);
1846 xt_free_table_info(newinfo
);
1851 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1856 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1860 case IPT_SO_SET_REPLACE
:
1861 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1864 case IPT_SO_SET_ADD_COUNTERS
:
1865 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1869 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
1876 struct compat_ipt_get_entries
{
1877 char name
[XT_TABLE_MAXNAMELEN
];
1879 struct compat_ipt_entry entrytable
[0];
1883 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1884 void __user
*userptr
)
1886 struct xt_counters
*counters
;
1887 const struct xt_table_info
*private = table
->private;
1891 const void *loc_cpu_entry
;
1893 struct ipt_entry
*iter
;
1895 counters
= alloc_counters(table
);
1896 if (IS_ERR(counters
))
1897 return PTR_ERR(counters
);
1899 /* choose the copy that is on our node/cpu, ...
1900 * This choice is lazy (because current thread is
1901 * allowed to migrate to another cpu)
1903 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
1906 xt_entry_foreach(iter
, loc_cpu_entry
, total_size
) {
1907 ret
= compat_copy_entry_to_user(iter
, &pos
,
1908 &size
, counters
, i
++);
1918 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1922 struct compat_ipt_get_entries get
;
1925 if (*len
< sizeof(get
)) {
1926 duprintf("compat_get_entries: %u < %zu\n", *len
, sizeof(get
));
1930 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1933 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
) {
1934 duprintf("compat_get_entries: %u != %zu\n",
1935 *len
, sizeof(get
) + get
.size
);
1939 xt_compat_lock(AF_INET
);
1940 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1941 if (!IS_ERR_OR_NULL(t
)) {
1942 const struct xt_table_info
*private = t
->private;
1943 struct xt_table_info info
;
1944 duprintf("t->private->number = %u\n", private->number
);
1945 ret
= compat_table_info(private, &info
);
1946 if (!ret
&& get
.size
== info
.size
) {
1947 ret
= compat_copy_entries_to_user(private->size
,
1948 t
, uptr
->entrytable
);
1950 duprintf("compat_get_entries: I've got %u not %u!\n",
1951 private->size
, get
.size
);
1954 xt_compat_flush_offsets(AF_INET
);
1958 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1960 xt_compat_unlock(AF_INET
);
1964 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1967 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1971 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1975 case IPT_SO_GET_INFO
:
1976 ret
= get_info(sock_net(sk
), user
, len
, 1);
1978 case IPT_SO_GET_ENTRIES
:
1979 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1982 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1989 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1993 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1997 case IPT_SO_SET_REPLACE
:
1998 ret
= do_replace(sock_net(sk
), user
, len
);
2001 case IPT_SO_SET_ADD_COUNTERS
:
2002 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
2006 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd
);
2014 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
2018 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
2022 case IPT_SO_GET_INFO
:
2023 ret
= get_info(sock_net(sk
), user
, len
, 0);
2026 case IPT_SO_GET_ENTRIES
:
2027 ret
= get_entries(sock_net(sk
), user
, len
);
2030 case IPT_SO_GET_REVISION_MATCH
:
2031 case IPT_SO_GET_REVISION_TARGET
: {
2032 struct xt_get_revision rev
;
2035 if (*len
!= sizeof(rev
)) {
2039 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
2043 rev
.name
[sizeof(rev
.name
)-1] = 0;
2045 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
2050 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
2053 "ipt_%s", rev
.name
);
2058 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd
);
2065 struct xt_table
*ipt_register_table(struct net
*net
,
2066 const struct xt_table
*table
,
2067 const struct ipt_replace
*repl
)
2070 struct xt_table_info
*newinfo
;
2071 struct xt_table_info bootstrap
= {0};
2072 void *loc_cpu_entry
;
2073 struct xt_table
*new_table
;
2075 newinfo
= xt_alloc_table_info(repl
->size
);
2081 /* choose the copy on our node/cpu, but dont care about preemption */
2082 loc_cpu_entry
= newinfo
->entries
[raw_smp_processor_id()];
2083 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
2085 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
2089 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
2090 if (IS_ERR(new_table
)) {
2091 ret
= PTR_ERR(new_table
);
2098 xt_free_table_info(newinfo
);
2100 return ERR_PTR(ret
);
2103 void ipt_unregister_table(struct net
*net
, struct xt_table
*table
)
2105 struct xt_table_info
*private;
2106 void *loc_cpu_entry
;
2107 struct module
*table_owner
= table
->me
;
2108 struct ipt_entry
*iter
;
2110 private = xt_unregister_table(table
);
2112 /* Decrease module usage counts and free resources */
2113 loc_cpu_entry
= private->entries
[raw_smp_processor_id()];
2114 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
2115 cleanup_entry(iter
, net
);
2116 if (private->number
> private->initial_entries
)
2117 module_put(table_owner
);
2118 xt_free_table_info(private);
2121 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2123 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
2124 u_int8_t type
, u_int8_t code
,
2127 return ((test_type
== 0xFF) ||
2128 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
2133 icmp_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
2135 const struct icmphdr
*ic
;
2136 struct icmphdr _icmph
;
2137 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2139 /* Must not be a fragment. */
2140 if (par
->fragoff
!= 0)
2143 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
2145 /* We've been asked to examine this packet, and we
2146 * can't. Hence, no choice but to drop.
2148 duprintf("Dropping evil ICMP tinygram.\n");
2149 par
->hotdrop
= true;
2153 return icmp_type_code_match(icmpinfo
->type
,
2157 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
2160 static int icmp_checkentry(const struct xt_mtchk_param
*par
)
2162 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
2164 /* Must specify no unknown invflags */
2165 return (icmpinfo
->invflags
& ~IPT_ICMP_INV
) ? -EINVAL
: 0;
2168 static struct xt_target ipt_builtin_tg
[] __read_mostly
= {
2170 .name
= XT_STANDARD_TARGET
,
2171 .targetsize
= sizeof(int),
2172 .family
= NFPROTO_IPV4
,
2173 #ifdef CONFIG_COMPAT
2174 .compatsize
= sizeof(compat_int_t
),
2175 .compat_from_user
= compat_standard_from_user
,
2176 .compat_to_user
= compat_standard_to_user
,
2180 .name
= XT_ERROR_TARGET
,
2181 .target
= ipt_error
,
2182 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
2183 .family
= NFPROTO_IPV4
,
2187 static struct nf_sockopt_ops ipt_sockopts
= {
2189 .set_optmin
= IPT_BASE_CTL
,
2190 .set_optmax
= IPT_SO_SET_MAX
+1,
2191 .set
= do_ipt_set_ctl
,
2192 #ifdef CONFIG_COMPAT
2193 .compat_set
= compat_do_ipt_set_ctl
,
2195 .get_optmin
= IPT_BASE_CTL
,
2196 .get_optmax
= IPT_SO_GET_MAX
+1,
2197 .get
= do_ipt_get_ctl
,
2198 #ifdef CONFIG_COMPAT
2199 .compat_get
= compat_do_ipt_get_ctl
,
2201 .owner
= THIS_MODULE
,
2204 static struct xt_match ipt_builtin_mt
[] __read_mostly
= {
2207 .match
= icmp_match
,
2208 .matchsize
= sizeof(struct ipt_icmp
),
2209 .checkentry
= icmp_checkentry
,
2210 .proto
= IPPROTO_ICMP
,
2211 .family
= NFPROTO_IPV4
,
2215 static int __net_init
ip_tables_net_init(struct net
*net
)
2217 return xt_proto_init(net
, NFPROTO_IPV4
);
2220 static void __net_exit
ip_tables_net_exit(struct net
*net
)
2222 xt_proto_fini(net
, NFPROTO_IPV4
);
2225 static struct pernet_operations ip_tables_net_ops
= {
2226 .init
= ip_tables_net_init
,
2227 .exit
= ip_tables_net_exit
,
2230 static int __init
ip_tables_init(void)
2234 ret
= register_pernet_subsys(&ip_tables_net_ops
);
2238 /* No one else will be downing sem now, so we won't sleep */
2239 ret
= xt_register_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2242 ret
= xt_register_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2246 /* Register setsockopt */
2247 ret
= nf_register_sockopt(&ipt_sockopts
);
2251 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2255 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2257 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2259 unregister_pernet_subsys(&ip_tables_net_ops
);
2264 static void __exit
ip_tables_fini(void)
2266 nf_unregister_sockopt(&ipt_sockopts
);
2268 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
2269 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
2270 unregister_pernet_subsys(&ip_tables_net_ops
);
2273 EXPORT_SYMBOL(ipt_register_table
);
2274 EXPORT_SYMBOL(ipt_unregister_table
);
2275 EXPORT_SYMBOL(ipt_do_table
);
2276 module_init(ip_tables_init
);
2277 module_exit(ip_tables_fini
);