2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
38 #ifdef CONFIG_NETFILTER_DEBUG
39 #define IP_NF_ASSERT(x) WARN_ON(!(x))
41 #define IP_NF_ASSERT(x)
44 void *ipt_alloc_initial_table(const struct xt_table
*info
)
46 return xt_alloc_initial_table(ipt
, IPT
);
48 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table
);
50 /* Returns whether matches rule or not. */
51 /* Performance critical - called for every packet */
53 ip_packet_match(const struct iphdr
*ip
,
56 const struct ipt_ip
*ipinfo
,
61 #define FWINV(bool, invflg) ((bool) ^ !!(ipinfo->invflags & (invflg)))
63 if (FWINV((ip
->saddr
&ipinfo
->smsk
.s_addr
) != ipinfo
->src
.s_addr
,
65 FWINV((ip
->daddr
&ipinfo
->dmsk
.s_addr
) != ipinfo
->dst
.s_addr
,
69 ret
= ifname_compare_aligned(indev
, ipinfo
->iniface
, ipinfo
->iniface_mask
);
71 if (FWINV(ret
!= 0, IPT_INV_VIA_IN
))
74 ret
= ifname_compare_aligned(outdev
, ipinfo
->outiface
, ipinfo
->outiface_mask
);
76 if (FWINV(ret
!= 0, IPT_INV_VIA_OUT
))
79 /* Check specific protocol */
81 FWINV(ip
->protocol
!= ipinfo
->proto
, IPT_INV_PROTO
))
84 /* If we have a fragment rule but the packet is not a fragment
85 * then we return zero */
86 if (FWINV((ipinfo
->flags
&IPT_F_FRAG
) && !isfrag
, IPT_INV_FRAG
))
93 ip_checkentry(const struct ipt_ip
*ip
)
95 if (ip
->flags
& ~IPT_F_MASK
)
97 if (ip
->invflags
& ~IPT_INV_MASK
)
103 ipt_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
105 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
110 /* Performance critical */
111 static inline struct ipt_entry
*
112 get_entry(const void *base
, unsigned int offset
)
114 return (struct ipt_entry
*)(base
+ offset
);
117 /* All zeroes == unconditional rule. */
118 /* Mildly perf critical (only if packet tracing is on) */
119 static inline bool unconditional(const struct ipt_entry
*e
)
121 static const struct ipt_ip uncond
;
123 return e
->target_offset
== sizeof(struct ipt_entry
) &&
124 memcmp(&e
->ip
, &uncond
, sizeof(uncond
)) == 0;
128 /* for const-correctness */
129 static inline const struct xt_entry_target
*
130 ipt_get_target_c(const struct ipt_entry
*e
)
132 return ipt_get_target((struct ipt_entry
*)e
);
135 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
136 static const char *const hooknames
[] = {
137 [NF_INET_PRE_ROUTING
] = "PREROUTING",
138 [NF_INET_LOCAL_IN
] = "INPUT",
139 [NF_INET_FORWARD
] = "FORWARD",
140 [NF_INET_LOCAL_OUT
] = "OUTPUT",
141 [NF_INET_POST_ROUTING
] = "POSTROUTING",
144 enum nf_ip_trace_comments
{
145 NF_IP_TRACE_COMMENT_RULE
,
146 NF_IP_TRACE_COMMENT_RETURN
,
147 NF_IP_TRACE_COMMENT_POLICY
,
150 static const char *const comments
[] = {
151 [NF_IP_TRACE_COMMENT_RULE
] = "rule",
152 [NF_IP_TRACE_COMMENT_RETURN
] = "return",
153 [NF_IP_TRACE_COMMENT_POLICY
] = "policy",
156 static struct nf_loginfo trace_loginfo
= {
157 .type
= NF_LOG_TYPE_LOG
,
161 .logflags
= NF_LOG_MASK
,
166 /* Mildly perf critical (only if packet tracing is on) */
168 get_chainname_rulenum(const struct ipt_entry
*s
, const struct ipt_entry
*e
,
169 const char *hookname
, const char **chainname
,
170 const char **comment
, unsigned int *rulenum
)
172 const struct xt_standard_target
*t
= (void *)ipt_get_target_c(s
);
174 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
175 /* Head of user chain: ERROR target with chainname */
176 *chainname
= t
->target
.data
;
181 if (unconditional(s
) &&
182 strcmp(t
->target
.u
.kernel
.target
->name
,
183 XT_STANDARD_TARGET
) == 0 &&
185 /* Tail of chains: STANDARD target (return/policy) */
186 *comment
= *chainname
== hookname
187 ? comments
[NF_IP_TRACE_COMMENT_POLICY
]
188 : comments
[NF_IP_TRACE_COMMENT_RETURN
];
197 static void trace_packet(struct net
*net
,
198 const struct sk_buff
*skb
,
200 const struct net_device
*in
,
201 const struct net_device
*out
,
202 const char *tablename
,
203 const struct xt_table_info
*private,
204 const struct ipt_entry
*e
)
206 const struct ipt_entry
*root
;
207 const char *hookname
, *chainname
, *comment
;
208 const struct ipt_entry
*iter
;
209 unsigned int rulenum
= 0;
211 root
= get_entry(private->entries
, private->hook_entry
[hook
]);
213 hookname
= chainname
= hooknames
[hook
];
214 comment
= comments
[NF_IP_TRACE_COMMENT_RULE
];
216 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
217 if (get_chainname_rulenum(iter
, e
, hookname
,
218 &chainname
, &comment
, &rulenum
) != 0)
221 nf_log_trace(net
, AF_INET
, hook
, skb
, in
, out
, &trace_loginfo
,
222 "TRACE: %s:%s:%s:%u ",
223 tablename
, chainname
, comment
, rulenum
);
228 struct ipt_entry
*ipt_next_entry(const struct ipt_entry
*entry
)
230 return (void *)entry
+ entry
->next_offset
;
233 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
235 ipt_do_table(struct sk_buff
*skb
,
236 const struct nf_hook_state
*state
,
237 struct xt_table
*table
)
239 unsigned int hook
= state
->hook
;
240 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
241 const struct iphdr
*ip
;
242 /* Initializing verdict to NF_DROP keeps gcc happy. */
243 unsigned int verdict
= NF_DROP
;
244 const char *indev
, *outdev
;
245 const void *table_base
;
246 struct ipt_entry
*e
, **jumpstack
;
247 unsigned int stackidx
, cpu
;
248 const struct xt_table_info
*private;
249 struct xt_action_param acpar
;
255 indev
= state
->in
? state
->in
->name
: nulldevname
;
256 outdev
= state
->out
? state
->out
->name
: nulldevname
;
257 /* We handle fragments by dealing with the first fragment as
258 * if it was a normal packet. All other fragments are treated
259 * normally, except that they will NEVER match rules that ask
260 * things we don't know, ie. tcp syn flag or ports). If the
261 * rule is also a fragment-specific rule, non-fragments won't
263 acpar
.fragoff
= ntohs(ip
->frag_off
) & IP_OFFSET
;
264 acpar
.thoff
= ip_hdrlen(skb
);
265 acpar
.hotdrop
= false;
266 acpar
.net
= state
->net
;
267 acpar
.in
= state
->in
;
268 acpar
.out
= state
->out
;
269 acpar
.family
= NFPROTO_IPV4
;
270 acpar
.hooknum
= hook
;
272 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
274 addend
= xt_write_recseq_begin();
275 private = table
->private;
276 cpu
= smp_processor_id();
278 * Ensure we load private-> members after we've fetched the base
281 smp_read_barrier_depends();
282 table_base
= private->entries
;
283 jumpstack
= (struct ipt_entry
**)private->jumpstack
[cpu
];
285 /* Switch to alternate jumpstack if we're being invoked via TEE.
286 * TEE issues XT_CONTINUE verdict on original skb so we must not
287 * clobber the jumpstack.
289 * For recursion via REJECT or SYNPROXY the stack will be clobbered
290 * but it is no problem since absolute verdict is issued by these.
292 if (static_key_false(&xt_tee_enabled
))
293 jumpstack
+= private->stacksize
* __this_cpu_read(nf_skb_duplicated
);
295 e
= get_entry(table_base
, private->hook_entry
[hook
]);
298 const struct xt_entry_target
*t
;
299 const struct xt_entry_match
*ematch
;
300 struct xt_counters
*counter
;
303 if (!ip_packet_match(ip
, indev
, outdev
,
304 &e
->ip
, acpar
.fragoff
)) {
306 e
= ipt_next_entry(e
);
310 xt_ematch_foreach(ematch
, e
) {
311 acpar
.match
= ematch
->u
.kernel
.match
;
312 acpar
.matchinfo
= ematch
->data
;
313 if (!acpar
.match
->match(skb
, &acpar
))
317 counter
= xt_get_this_cpu_counter(&e
->counters
);
318 ADD_COUNTER(*counter
, skb
->len
, 1);
320 t
= ipt_get_target(e
);
321 IP_NF_ASSERT(t
->u
.kernel
.target
);
323 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
324 /* The packet is traced: log it */
325 if (unlikely(skb
->nf_trace
))
326 trace_packet(state
->net
, skb
, hook
, state
->in
,
327 state
->out
, table
->name
, private, e
);
329 /* Standard target? */
330 if (!t
->u
.kernel
.target
->target
) {
333 v
= ((struct xt_standard_target
*)t
)->verdict
;
335 /* Pop from stack? */
336 if (v
!= XT_RETURN
) {
337 verdict
= (unsigned int)(-v
) - 1;
341 e
= get_entry(table_base
,
342 private->underflow
[hook
]);
344 e
= jumpstack
[--stackidx
];
345 e
= ipt_next_entry(e
);
349 if (table_base
+ v
!= ipt_next_entry(e
) &&
350 !(e
->ip
.flags
& IPT_F_GOTO
))
351 jumpstack
[stackidx
++] = e
;
353 e
= get_entry(table_base
, v
);
357 acpar
.target
= t
->u
.kernel
.target
;
358 acpar
.targinfo
= t
->data
;
360 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
361 /* Target might have changed stuff. */
363 if (verdict
== XT_CONTINUE
)
364 e
= ipt_next_entry(e
);
368 } while (!acpar
.hotdrop
);
370 xt_write_recseq_end(addend
);
378 static bool find_jump_target(const struct xt_table_info
*t
,
379 const struct ipt_entry
*target
)
381 struct ipt_entry
*iter
;
383 xt_entry_foreach(iter
, t
->entries
, t
->size
) {
390 /* Figures out from what hook each rule can be called: returns 0 if
391 there are loops. Puts hook bitmask in comefrom. */
393 mark_source_chains(const struct xt_table_info
*newinfo
,
394 unsigned int valid_hooks
, void *entry0
)
398 /* No recursion; use packet counter to save back ptrs (reset
399 to 0 as we leave), and comefrom to save source hook bitmask */
400 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
401 unsigned int pos
= newinfo
->hook_entry
[hook
];
402 struct ipt_entry
*e
= (struct ipt_entry
*)(entry0
+ pos
);
404 if (!(valid_hooks
& (1 << hook
)))
407 /* Set initial back pointer. */
408 e
->counters
.pcnt
= pos
;
411 const struct xt_standard_target
*t
412 = (void *)ipt_get_target_c(e
);
413 int visited
= e
->comefrom
& (1 << hook
);
415 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
))
418 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
420 /* Unconditional return/END. */
421 if ((unconditional(e
) &&
422 (strcmp(t
->target
.u
.user
.name
,
423 XT_STANDARD_TARGET
) == 0) &&
424 t
->verdict
< 0) || visited
) {
425 unsigned int oldpos
, size
;
427 if ((strcmp(t
->target
.u
.user
.name
,
428 XT_STANDARD_TARGET
) == 0) &&
429 t
->verdict
< -NF_MAX_VERDICT
- 1)
432 /* Return: backtrack through the last
435 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
437 pos
= e
->counters
.pcnt
;
438 e
->counters
.pcnt
= 0;
440 /* We're at the start. */
444 e
= (struct ipt_entry
*)
446 } while (oldpos
== pos
+ e
->next_offset
);
449 size
= e
->next_offset
;
450 e
= (struct ipt_entry
*)
451 (entry0
+ pos
+ size
);
452 if (pos
+ size
>= newinfo
->size
)
454 e
->counters
.pcnt
= pos
;
457 int newpos
= t
->verdict
;
459 if (strcmp(t
->target
.u
.user
.name
,
460 XT_STANDARD_TARGET
) == 0 &&
462 /* This a jump; chase it. */
463 e
= (struct ipt_entry
*)
465 if (!find_jump_target(newinfo
, e
))
468 /* ... this is a fallthru */
469 newpos
= pos
+ e
->next_offset
;
470 if (newpos
>= newinfo
->size
)
473 e
= (struct ipt_entry
*)
475 e
->counters
.pcnt
= pos
;
484 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
486 struct xt_mtdtor_param par
;
489 par
.match
= m
->u
.kernel
.match
;
490 par
.matchinfo
= m
->data
;
491 par
.family
= NFPROTO_IPV4
;
492 if (par
.match
->destroy
!= NULL
)
493 par
.match
->destroy(&par
);
494 module_put(par
.match
->me
);
498 check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
500 const struct ipt_ip
*ip
= par
->entryinfo
;
502 par
->match
= m
->u
.kernel
.match
;
503 par
->matchinfo
= m
->data
;
505 return xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
506 ip
->proto
, ip
->invflags
& IPT_INV_PROTO
);
510 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
512 struct xt_match
*match
;
515 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
518 return PTR_ERR(match
);
519 m
->u
.kernel
.match
= match
;
521 ret
= check_match(m
, par
);
527 module_put(m
->u
.kernel
.match
->me
);
531 static int check_target(struct ipt_entry
*e
, struct net
*net
, const char *name
)
533 struct xt_entry_target
*t
= ipt_get_target(e
);
534 struct xt_tgchk_param par
= {
538 .target
= t
->u
.kernel
.target
,
540 .hook_mask
= e
->comefrom
,
541 .family
= NFPROTO_IPV4
,
544 return xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
545 e
->ip
.proto
, e
->ip
.invflags
& IPT_INV_PROTO
);
549 find_check_entry(struct ipt_entry
*e
, struct net
*net
, const char *name
,
552 struct xt_entry_target
*t
;
553 struct xt_target
*target
;
556 struct xt_mtchk_param mtpar
;
557 struct xt_entry_match
*ematch
;
560 pcnt
= xt_percpu_counter_alloc();
561 if (IS_ERR_VALUE(pcnt
))
563 e
->counters
.pcnt
= pcnt
;
568 mtpar
.entryinfo
= &e
->ip
;
569 mtpar
.hook_mask
= e
->comefrom
;
570 mtpar
.family
= NFPROTO_IPV4
;
571 xt_ematch_foreach(ematch
, e
) {
572 ret
= find_check_match(ematch
, &mtpar
);
574 goto cleanup_matches
;
578 t
= ipt_get_target(e
);
579 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
581 if (IS_ERR(target
)) {
582 ret
= PTR_ERR(target
);
583 goto cleanup_matches
;
585 t
->u
.kernel
.target
= target
;
587 ret
= check_target(e
, net
, name
);
593 module_put(t
->u
.kernel
.target
->me
);
595 xt_ematch_foreach(ematch
, e
) {
598 cleanup_match(ematch
, net
);
601 xt_percpu_counter_free(e
->counters
.pcnt
);
606 static bool check_underflow(const struct ipt_entry
*e
)
608 const struct xt_entry_target
*t
;
609 unsigned int verdict
;
611 if (!unconditional(e
))
613 t
= ipt_get_target_c(e
);
614 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
616 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
617 verdict
= -verdict
- 1;
618 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
622 check_entry_size_and_hooks(struct ipt_entry
*e
,
623 struct xt_table_info
*newinfo
,
624 const unsigned char *base
,
625 const unsigned char *limit
,
626 const unsigned int *hook_entries
,
627 const unsigned int *underflows
,
628 unsigned int valid_hooks
)
633 if ((unsigned long)e
% __alignof__(struct ipt_entry
) != 0 ||
634 (unsigned char *)e
+ sizeof(struct ipt_entry
) >= limit
||
635 (unsigned char *)e
+ e
->next_offset
> limit
)
639 < sizeof(struct ipt_entry
) + sizeof(struct xt_entry_target
))
642 if (!ip_checkentry(&e
->ip
))
645 err
= xt_check_entry_offsets(e
, e
->elems
, e
->target_offset
,
650 /* Check hooks & underflows */
651 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
652 if (!(valid_hooks
& (1 << h
)))
654 if ((unsigned char *)e
- base
== hook_entries
[h
])
655 newinfo
->hook_entry
[h
] = hook_entries
[h
];
656 if ((unsigned char *)e
- base
== underflows
[h
]) {
657 if (!check_underflow(e
))
660 newinfo
->underflow
[h
] = underflows
[h
];
664 /* Clear counters and comefrom */
665 e
->counters
= ((struct xt_counters
) { 0, 0 });
671 cleanup_entry(struct ipt_entry
*e
, struct net
*net
)
673 struct xt_tgdtor_param par
;
674 struct xt_entry_target
*t
;
675 struct xt_entry_match
*ematch
;
677 /* Cleanup all matches */
678 xt_ematch_foreach(ematch
, e
)
679 cleanup_match(ematch
, net
);
680 t
= ipt_get_target(e
);
683 par
.target
= t
->u
.kernel
.target
;
684 par
.targinfo
= t
->data
;
685 par
.family
= NFPROTO_IPV4
;
686 if (par
.target
->destroy
!= NULL
)
687 par
.target
->destroy(&par
);
688 module_put(par
.target
->me
);
689 xt_percpu_counter_free(e
->counters
.pcnt
);
692 /* Checks and translates the user-supplied table segment (held in
695 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
696 const struct ipt_replace
*repl
)
698 struct ipt_entry
*iter
;
702 newinfo
->size
= repl
->size
;
703 newinfo
->number
= repl
->num_entries
;
705 /* Init all hooks to impossible value. */
706 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
707 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
708 newinfo
->underflow
[i
] = 0xFFFFFFFF;
712 /* Walk through entries, checking offsets. */
713 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
714 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
722 if (strcmp(ipt_get_target(iter
)->u
.user
.name
,
723 XT_ERROR_TARGET
) == 0)
724 ++newinfo
->stacksize
;
727 if (i
!= repl
->num_entries
)
730 /* Check hooks all assigned */
731 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
732 /* Only hooks which are valid */
733 if (!(repl
->valid_hooks
& (1 << i
)))
735 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF)
737 if (newinfo
->underflow
[i
] == 0xFFFFFFFF)
741 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
744 /* Finally, each sanity check must pass */
746 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
747 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
754 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
757 cleanup_entry(iter
, net
);
766 get_counters(const struct xt_table_info
*t
,
767 struct xt_counters counters
[])
769 struct ipt_entry
*iter
;
773 for_each_possible_cpu(cpu
) {
774 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
777 xt_entry_foreach(iter
, t
->entries
, t
->size
) {
778 struct xt_counters
*tmp
;
782 tmp
= xt_get_per_cpu_counter(&iter
->counters
, cpu
);
784 start
= read_seqcount_begin(s
);
787 } while (read_seqcount_retry(s
, start
));
789 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
790 ++i
; /* macro does multi eval of i */
795 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
797 unsigned int countersize
;
798 struct xt_counters
*counters
;
799 const struct xt_table_info
*private = table
->private;
801 /* We need atomic snapshot of counters: rest doesn't change
802 (other than comefrom, which userspace doesn't care
804 countersize
= sizeof(struct xt_counters
) * private->number
;
805 counters
= vzalloc(countersize
);
807 if (counters
== NULL
)
808 return ERR_PTR(-ENOMEM
);
810 get_counters(private, counters
);
816 copy_entries_to_user(unsigned int total_size
,
817 const struct xt_table
*table
,
818 void __user
*userptr
)
820 unsigned int off
, num
;
821 const struct ipt_entry
*e
;
822 struct xt_counters
*counters
;
823 const struct xt_table_info
*private = table
->private;
825 const void *loc_cpu_entry
;
827 counters
= alloc_counters(table
);
828 if (IS_ERR(counters
))
829 return PTR_ERR(counters
);
831 loc_cpu_entry
= private->entries
;
832 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
837 /* FIXME: use iterator macros --RR */
838 /* ... then go back and fix counters and names */
839 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
841 const struct xt_entry_match
*m
;
842 const struct xt_entry_target
*t
;
844 e
= (struct ipt_entry
*)(loc_cpu_entry
+ off
);
845 if (copy_to_user(userptr
+ off
846 + offsetof(struct ipt_entry
, counters
),
848 sizeof(counters
[num
])) != 0) {
853 for (i
= sizeof(struct ipt_entry
);
854 i
< e
->target_offset
;
855 i
+= m
->u
.match_size
) {
858 if (copy_to_user(userptr
+ off
+ i
859 + offsetof(struct xt_entry_match
,
861 m
->u
.kernel
.match
->name
,
862 strlen(m
->u
.kernel
.match
->name
)+1)
869 t
= ipt_get_target_c(e
);
870 if (copy_to_user(userptr
+ off
+ e
->target_offset
871 + offsetof(struct xt_entry_target
,
873 t
->u
.kernel
.target
->name
,
874 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
886 static void compat_standard_from_user(void *dst
, const void *src
)
888 int v
= *(compat_int_t
*)src
;
891 v
+= xt_compat_calc_jump(AF_INET
, v
);
892 memcpy(dst
, &v
, sizeof(v
));
895 static int compat_standard_to_user(void __user
*dst
, const void *src
)
897 compat_int_t cv
= *(int *)src
;
900 cv
-= xt_compat_calc_jump(AF_INET
, cv
);
901 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
904 static int compat_calc_entry(const struct ipt_entry
*e
,
905 const struct xt_table_info
*info
,
906 const void *base
, struct xt_table_info
*newinfo
)
908 const struct xt_entry_match
*ematch
;
909 const struct xt_entry_target
*t
;
910 unsigned int entry_offset
;
913 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
914 entry_offset
= (void *)e
- base
;
915 xt_ematch_foreach(ematch
, e
)
916 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
917 t
= ipt_get_target_c(e
);
918 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
919 newinfo
->size
-= off
;
920 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
924 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
925 if (info
->hook_entry
[i
] &&
926 (e
< (struct ipt_entry
*)(base
+ info
->hook_entry
[i
])))
927 newinfo
->hook_entry
[i
] -= off
;
928 if (info
->underflow
[i
] &&
929 (e
< (struct ipt_entry
*)(base
+ info
->underflow
[i
])))
930 newinfo
->underflow
[i
] -= off
;
935 static int compat_table_info(const struct xt_table_info
*info
,
936 struct xt_table_info
*newinfo
)
938 struct ipt_entry
*iter
;
939 const void *loc_cpu_entry
;
942 if (!newinfo
|| !info
)
945 /* we dont care about newinfo->entries */
946 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
947 newinfo
->initial_entries
= 0;
948 loc_cpu_entry
= info
->entries
;
949 xt_compat_init_offsets(AF_INET
, info
->number
);
950 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
951 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
959 static int get_info(struct net
*net
, void __user
*user
,
960 const int *len
, int compat
)
962 char name
[XT_TABLE_MAXNAMELEN
];
966 if (*len
!= sizeof(struct ipt_getinfo
))
969 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
972 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
975 xt_compat_lock(AF_INET
);
977 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
979 if (!IS_ERR_OR_NULL(t
)) {
980 struct ipt_getinfo info
;
981 const struct xt_table_info
*private = t
->private;
983 struct xt_table_info tmp
;
986 ret
= compat_table_info(private, &tmp
);
987 xt_compat_flush_offsets(AF_INET
);
991 memset(&info
, 0, sizeof(info
));
992 info
.valid_hooks
= t
->valid_hooks
;
993 memcpy(info
.hook_entry
, private->hook_entry
,
994 sizeof(info
.hook_entry
));
995 memcpy(info
.underflow
, private->underflow
,
996 sizeof(info
.underflow
));
997 info
.num_entries
= private->number
;
998 info
.size
= private->size
;
999 strcpy(info
.name
, name
);
1001 if (copy_to_user(user
, &info
, *len
) != 0)
1009 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1010 #ifdef CONFIG_COMPAT
1012 xt_compat_unlock(AF_INET
);
1018 get_entries(struct net
*net
, struct ipt_get_entries __user
*uptr
,
1022 struct ipt_get_entries get
;
1025 if (*len
< sizeof(get
))
1027 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1029 if (*len
!= sizeof(struct ipt_get_entries
) + get
.size
)
1031 get
.name
[sizeof(get
.name
) - 1] = '\0';
1033 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1034 if (!IS_ERR_OR_NULL(t
)) {
1035 const struct xt_table_info
*private = t
->private;
1036 if (get
.size
== private->size
)
1037 ret
= copy_entries_to_user(private->size
,
1038 t
, uptr
->entrytable
);
1045 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1051 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1052 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1053 void __user
*counters_ptr
)
1057 struct xt_table_info
*oldinfo
;
1058 struct xt_counters
*counters
;
1059 struct ipt_entry
*iter
;
1062 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1068 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET
, name
),
1069 "iptable_%s", name
);
1070 if (IS_ERR_OR_NULL(t
)) {
1071 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1072 goto free_newinfo_counters_untrans
;
1076 if (valid_hooks
!= t
->valid_hooks
) {
1081 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1085 /* Update module usage count based on number of rules */
1086 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1087 (newinfo
->number
<= oldinfo
->initial_entries
))
1089 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1090 (newinfo
->number
<= oldinfo
->initial_entries
))
1093 /* Get the old counters, and synchronize with replace */
1094 get_counters(oldinfo
, counters
);
1096 /* Decrease module usage counts and free resource */
1097 xt_entry_foreach(iter
, oldinfo
->entries
, oldinfo
->size
)
1098 cleanup_entry(iter
, net
);
1100 xt_free_table_info(oldinfo
);
1101 if (copy_to_user(counters_ptr
, counters
,
1102 sizeof(struct xt_counters
) * num_counters
) != 0) {
1103 /* Silent error, can't fail, new table is already in place */
1104 net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1113 free_newinfo_counters_untrans
:
1120 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1123 struct ipt_replace tmp
;
1124 struct xt_table_info
*newinfo
;
1125 void *loc_cpu_entry
;
1126 struct ipt_entry
*iter
;
1128 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1131 /* overflow check */
1132 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1134 if (tmp
.num_counters
== 0)
1137 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1139 newinfo
= xt_alloc_table_info(tmp
.size
);
1143 loc_cpu_entry
= newinfo
->entries
;
1144 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1150 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1154 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1155 tmp
.num_counters
, tmp
.counters
);
1157 goto free_newinfo_untrans
;
1160 free_newinfo_untrans
:
1161 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1162 cleanup_entry(iter
, net
);
1164 xt_free_table_info(newinfo
);
1169 do_add_counters(struct net
*net
, const void __user
*user
,
1170 unsigned int len
, int compat
)
1173 struct xt_counters_info tmp
;
1174 struct xt_counters
*paddc
;
1176 const struct xt_table_info
*private;
1178 struct ipt_entry
*iter
;
1179 unsigned int addend
;
1181 paddc
= xt_copy_counters_from_user(user
, len
, &tmp
, compat
);
1183 return PTR_ERR(paddc
);
1185 t
= xt_find_table_lock(net
, AF_INET
, tmp
.name
);
1186 if (IS_ERR_OR_NULL(t
)) {
1187 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1192 private = t
->private;
1193 if (private->number
!= tmp
.num_counters
) {
1195 goto unlock_up_free
;
1199 addend
= xt_write_recseq_begin();
1200 xt_entry_foreach(iter
, private->entries
, private->size
) {
1201 struct xt_counters
*tmp
;
1203 tmp
= xt_get_this_cpu_counter(&iter
->counters
);
1204 ADD_COUNTER(*tmp
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1207 xt_write_recseq_end(addend
);
1218 #ifdef CONFIG_COMPAT
1219 struct compat_ipt_replace
{
1220 char name
[XT_TABLE_MAXNAMELEN
];
1224 u32 hook_entry
[NF_INET_NUMHOOKS
];
1225 u32 underflow
[NF_INET_NUMHOOKS
];
1227 compat_uptr_t counters
; /* struct xt_counters * */
1228 struct compat_ipt_entry entries
[0];
1232 compat_copy_entry_to_user(struct ipt_entry
*e
, void __user
**dstptr
,
1233 unsigned int *size
, struct xt_counters
*counters
,
1236 struct xt_entry_target
*t
;
1237 struct compat_ipt_entry __user
*ce
;
1238 u_int16_t target_offset
, next_offset
;
1239 compat_uint_t origsize
;
1240 const struct xt_entry_match
*ematch
;
1244 ce
= (struct compat_ipt_entry __user
*)*dstptr
;
1245 if (copy_to_user(ce
, e
, sizeof(struct ipt_entry
)) != 0 ||
1246 copy_to_user(&ce
->counters
, &counters
[i
],
1247 sizeof(counters
[i
])) != 0)
1250 *dstptr
+= sizeof(struct compat_ipt_entry
);
1251 *size
-= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1253 xt_ematch_foreach(ematch
, e
) {
1254 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1258 target_offset
= e
->target_offset
- (origsize
- *size
);
1259 t
= ipt_get_target(e
);
1260 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1263 next_offset
= e
->next_offset
- (origsize
- *size
);
1264 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1265 put_user(next_offset
, &ce
->next_offset
) != 0)
1271 compat_find_calc_match(struct xt_entry_match
*m
,
1272 const struct ipt_ip
*ip
,
1275 struct xt_match
*match
;
1277 match
= xt_request_find_match(NFPROTO_IPV4
, m
->u
.user
.name
,
1278 m
->u
.user
.revision
);
1280 return PTR_ERR(match
);
1282 m
->u
.kernel
.match
= match
;
1283 *size
+= xt_compat_match_offset(match
);
1287 static void compat_release_entry(struct compat_ipt_entry
*e
)
1289 struct xt_entry_target
*t
;
1290 struct xt_entry_match
*ematch
;
1292 /* Cleanup all matches */
1293 xt_ematch_foreach(ematch
, e
)
1294 module_put(ematch
->u
.kernel
.match
->me
);
1295 t
= compat_ipt_get_target(e
);
1296 module_put(t
->u
.kernel
.target
->me
);
1300 check_compat_entry_size_and_hooks(struct compat_ipt_entry
*e
,
1301 struct xt_table_info
*newinfo
,
1303 const unsigned char *base
,
1304 const unsigned char *limit
)
1306 struct xt_entry_match
*ematch
;
1307 struct xt_entry_target
*t
;
1308 struct xt_target
*target
;
1309 unsigned int entry_offset
;
1313 if ((unsigned long)e
% __alignof__(struct compat_ipt_entry
) != 0 ||
1314 (unsigned char *)e
+ sizeof(struct compat_ipt_entry
) >= limit
||
1315 (unsigned char *)e
+ e
->next_offset
> limit
)
1318 if (e
->next_offset
< sizeof(struct compat_ipt_entry
) +
1319 sizeof(struct compat_xt_entry_target
))
1322 if (!ip_checkentry(&e
->ip
))
1325 ret
= xt_compat_check_entry_offsets(e
, e
->elems
,
1326 e
->target_offset
, e
->next_offset
);
1330 off
= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1331 entry_offset
= (void *)e
- (void *)base
;
1333 xt_ematch_foreach(ematch
, e
) {
1334 ret
= compat_find_calc_match(ematch
, &e
->ip
, &off
);
1336 goto release_matches
;
1340 t
= compat_ipt_get_target(e
);
1341 target
= xt_request_find_target(NFPROTO_IPV4
, t
->u
.user
.name
,
1342 t
->u
.user
.revision
);
1343 if (IS_ERR(target
)) {
1344 ret
= PTR_ERR(target
);
1345 goto release_matches
;
1347 t
->u
.kernel
.target
= target
;
1349 off
+= xt_compat_target_offset(target
);
1351 ret
= xt_compat_add_offset(AF_INET
, entry_offset
, off
);
1358 module_put(t
->u
.kernel
.target
->me
);
1360 xt_ematch_foreach(ematch
, e
) {
1363 module_put(ematch
->u
.kernel
.match
->me
);
1369 compat_copy_entry_from_user(struct compat_ipt_entry
*e
, void **dstptr
,
1371 struct xt_table_info
*newinfo
, unsigned char *base
)
1373 struct xt_entry_target
*t
;
1374 struct xt_target
*target
;
1375 struct ipt_entry
*de
;
1376 unsigned int origsize
;
1378 struct xt_entry_match
*ematch
;
1381 de
= (struct ipt_entry
*)*dstptr
;
1382 memcpy(de
, e
, sizeof(struct ipt_entry
));
1383 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1385 *dstptr
+= sizeof(struct ipt_entry
);
1386 *size
+= sizeof(struct ipt_entry
) - sizeof(struct compat_ipt_entry
);
1388 xt_ematch_foreach(ematch
, e
)
1389 xt_compat_match_from_user(ematch
, dstptr
, size
);
1391 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1392 t
= compat_ipt_get_target(e
);
1393 target
= t
->u
.kernel
.target
;
1394 xt_compat_target_from_user(t
, dstptr
, size
);
1396 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1398 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1399 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1400 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1401 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1402 newinfo
->underflow
[h
] -= origsize
- *size
;
1407 translate_compat_table(struct net
*net
,
1408 struct xt_table_info
**pinfo
,
1410 const struct compat_ipt_replace
*compatr
)
1413 struct xt_table_info
*newinfo
, *info
;
1414 void *pos
, *entry0
, *entry1
;
1415 struct compat_ipt_entry
*iter0
;
1416 struct ipt_replace repl
;
1422 size
= compatr
->size
;
1423 info
->number
= compatr
->num_entries
;
1426 xt_compat_lock(AF_INET
);
1427 xt_compat_init_offsets(AF_INET
, compatr
->num_entries
);
1428 /* Walk through entries, checking offsets. */
1429 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1430 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1432 entry0
+ compatr
->size
);
1439 if (j
!= compatr
->num_entries
)
1443 newinfo
= xt_alloc_table_info(size
);
1447 newinfo
->number
= compatr
->num_entries
;
1448 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1449 newinfo
->hook_entry
[i
] = compatr
->hook_entry
[i
];
1450 newinfo
->underflow
[i
] = compatr
->underflow
[i
];
1452 entry1
= newinfo
->entries
;
1454 size
= compatr
->size
;
1455 xt_entry_foreach(iter0
, entry0
, compatr
->size
)
1456 compat_copy_entry_from_user(iter0
, &pos
, &size
,
1459 /* all module references in entry0 are now gone.
1460 * entry1/newinfo contains a 64bit ruleset that looks exactly as
1461 * generated by 64bit userspace.
1463 * Call standard translate_table() to validate all hook_entrys,
1464 * underflows, check for loops, etc.
1466 xt_compat_flush_offsets(AF_INET
);
1467 xt_compat_unlock(AF_INET
);
1469 memcpy(&repl
, compatr
, sizeof(*compatr
));
1471 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1472 repl
.hook_entry
[i
] = newinfo
->hook_entry
[i
];
1473 repl
.underflow
[i
] = newinfo
->underflow
[i
];
1476 repl
.num_counters
= 0;
1477 repl
.counters
= NULL
;
1478 repl
.size
= newinfo
->size
;
1479 ret
= translate_table(net
, newinfo
, entry1
, &repl
);
1485 xt_free_table_info(info
);
1489 xt_free_table_info(newinfo
);
1492 xt_compat_flush_offsets(AF_INET
);
1493 xt_compat_unlock(AF_INET
);
1494 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1497 compat_release_entry(iter0
);
1503 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1506 struct compat_ipt_replace tmp
;
1507 struct xt_table_info
*newinfo
;
1508 void *loc_cpu_entry
;
1509 struct ipt_entry
*iter
;
1511 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1514 /* overflow check */
1515 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1517 if (tmp
.num_counters
== 0)
1520 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1522 newinfo
= xt_alloc_table_info(tmp
.size
);
1526 loc_cpu_entry
= newinfo
->entries
;
1527 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1533 ret
= translate_compat_table(net
, &newinfo
, &loc_cpu_entry
, &tmp
);
1537 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1538 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1540 goto free_newinfo_untrans
;
1543 free_newinfo_untrans
:
1544 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1545 cleanup_entry(iter
, net
);
1547 xt_free_table_info(newinfo
);
1552 compat_do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1557 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1561 case IPT_SO_SET_REPLACE
:
1562 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1565 case IPT_SO_SET_ADD_COUNTERS
:
1566 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1576 struct compat_ipt_get_entries
{
1577 char name
[XT_TABLE_MAXNAMELEN
];
1579 struct compat_ipt_entry entrytable
[0];
1583 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1584 void __user
*userptr
)
1586 struct xt_counters
*counters
;
1587 const struct xt_table_info
*private = table
->private;
1592 struct ipt_entry
*iter
;
1594 counters
= alloc_counters(table
);
1595 if (IS_ERR(counters
))
1596 return PTR_ERR(counters
);
1600 xt_entry_foreach(iter
, private->entries
, total_size
) {
1601 ret
= compat_copy_entry_to_user(iter
, &pos
,
1602 &size
, counters
, i
++);
1612 compat_get_entries(struct net
*net
, struct compat_ipt_get_entries __user
*uptr
,
1616 struct compat_ipt_get_entries get
;
1619 if (*len
< sizeof(get
))
1622 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1625 if (*len
!= sizeof(struct compat_ipt_get_entries
) + get
.size
)
1628 get
.name
[sizeof(get
.name
) - 1] = '\0';
1630 xt_compat_lock(AF_INET
);
1631 t
= xt_find_table_lock(net
, AF_INET
, get
.name
);
1632 if (!IS_ERR_OR_NULL(t
)) {
1633 const struct xt_table_info
*private = t
->private;
1634 struct xt_table_info info
;
1635 ret
= compat_table_info(private, &info
);
1636 if (!ret
&& get
.size
== info
.size
)
1637 ret
= compat_copy_entries_to_user(private->size
,
1638 t
, uptr
->entrytable
);
1642 xt_compat_flush_offsets(AF_INET
);
1646 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1648 xt_compat_unlock(AF_INET
);
1652 static int do_ipt_get_ctl(struct sock
*, int, void __user
*, int *);
1655 compat_do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1659 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1663 case IPT_SO_GET_INFO
:
1664 ret
= get_info(sock_net(sk
), user
, len
, 1);
1666 case IPT_SO_GET_ENTRIES
:
1667 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1670 ret
= do_ipt_get_ctl(sk
, cmd
, user
, len
);
1677 do_ipt_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1681 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1685 case IPT_SO_SET_REPLACE
:
1686 ret
= do_replace(sock_net(sk
), user
, len
);
1689 case IPT_SO_SET_ADD_COUNTERS
:
1690 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1701 do_ipt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1705 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1709 case IPT_SO_GET_INFO
:
1710 ret
= get_info(sock_net(sk
), user
, len
, 0);
1713 case IPT_SO_GET_ENTRIES
:
1714 ret
= get_entries(sock_net(sk
), user
, len
);
1717 case IPT_SO_GET_REVISION_MATCH
:
1718 case IPT_SO_GET_REVISION_TARGET
: {
1719 struct xt_get_revision rev
;
1722 if (*len
!= sizeof(rev
)) {
1726 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1730 rev
.name
[sizeof(rev
.name
)-1] = 0;
1732 if (cmd
== IPT_SO_GET_REVISION_TARGET
)
1737 try_then_request_module(xt_find_revision(AF_INET
, rev
.name
,
1740 "ipt_%s", rev
.name
);
1751 static void __ipt_unregister_table(struct net
*net
, struct xt_table
*table
)
1753 struct xt_table_info
*private;
1754 void *loc_cpu_entry
;
1755 struct module
*table_owner
= table
->me
;
1756 struct ipt_entry
*iter
;
1758 private = xt_unregister_table(table
);
1760 /* Decrease module usage counts and free resources */
1761 loc_cpu_entry
= private->entries
;
1762 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
1763 cleanup_entry(iter
, net
);
1764 if (private->number
> private->initial_entries
)
1765 module_put(table_owner
);
1766 xt_free_table_info(private);
1769 int ipt_register_table(struct net
*net
, const struct xt_table
*table
,
1770 const struct ipt_replace
*repl
,
1771 const struct nf_hook_ops
*ops
, struct xt_table
**res
)
1774 struct xt_table_info
*newinfo
;
1775 struct xt_table_info bootstrap
= {0};
1776 void *loc_cpu_entry
;
1777 struct xt_table
*new_table
;
1779 newinfo
= xt_alloc_table_info(repl
->size
);
1783 loc_cpu_entry
= newinfo
->entries
;
1784 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1786 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
1790 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1791 if (IS_ERR(new_table
)) {
1792 ret
= PTR_ERR(new_table
);
1796 /* set res now, will see skbs right after nf_register_net_hooks */
1797 WRITE_ONCE(*res
, new_table
);
1799 ret
= nf_register_net_hooks(net
, ops
, hweight32(table
->valid_hooks
));
1801 __ipt_unregister_table(net
, new_table
);
1808 xt_free_table_info(newinfo
);
1812 void ipt_unregister_table(struct net
*net
, struct xt_table
*table
,
1813 const struct nf_hook_ops
*ops
)
1815 nf_unregister_net_hooks(net
, ops
, hweight32(table
->valid_hooks
));
1816 __ipt_unregister_table(net
, table
);
1819 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1821 icmp_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
1822 u_int8_t type
, u_int8_t code
,
1825 return ((test_type
== 0xFF) ||
1826 (type
== test_type
&& code
>= min_code
&& code
<= max_code
))
1831 icmp_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
1833 const struct icmphdr
*ic
;
1834 struct icmphdr _icmph
;
1835 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
1837 /* Must not be a fragment. */
1838 if (par
->fragoff
!= 0)
1841 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
1843 /* We've been asked to examine this packet, and we
1844 * can't. Hence, no choice but to drop.
1846 par
->hotdrop
= true;
1850 return icmp_type_code_match(icmpinfo
->type
,
1854 !!(icmpinfo
->invflags
&IPT_ICMP_INV
));
1857 static int icmp_checkentry(const struct xt_mtchk_param
*par
)
1859 const struct ipt_icmp
*icmpinfo
= par
->matchinfo
;
1861 /* Must specify no unknown invflags */
1862 return (icmpinfo
->invflags
& ~IPT_ICMP_INV
) ? -EINVAL
: 0;
1865 static struct xt_target ipt_builtin_tg
[] __read_mostly
= {
1867 .name
= XT_STANDARD_TARGET
,
1868 .targetsize
= sizeof(int),
1869 .family
= NFPROTO_IPV4
,
1870 #ifdef CONFIG_COMPAT
1871 .compatsize
= sizeof(compat_int_t
),
1872 .compat_from_user
= compat_standard_from_user
,
1873 .compat_to_user
= compat_standard_to_user
,
1877 .name
= XT_ERROR_TARGET
,
1878 .target
= ipt_error
,
1879 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
1880 .family
= NFPROTO_IPV4
,
1884 static struct nf_sockopt_ops ipt_sockopts
= {
1886 .set_optmin
= IPT_BASE_CTL
,
1887 .set_optmax
= IPT_SO_SET_MAX
+1,
1888 .set
= do_ipt_set_ctl
,
1889 #ifdef CONFIG_COMPAT
1890 .compat_set
= compat_do_ipt_set_ctl
,
1892 .get_optmin
= IPT_BASE_CTL
,
1893 .get_optmax
= IPT_SO_GET_MAX
+1,
1894 .get
= do_ipt_get_ctl
,
1895 #ifdef CONFIG_COMPAT
1896 .compat_get
= compat_do_ipt_get_ctl
,
1898 .owner
= THIS_MODULE
,
1901 static struct xt_match ipt_builtin_mt
[] __read_mostly
= {
1904 .match
= icmp_match
,
1905 .matchsize
= sizeof(struct ipt_icmp
),
1906 .checkentry
= icmp_checkentry
,
1907 .proto
= IPPROTO_ICMP
,
1908 .family
= NFPROTO_IPV4
,
1912 static int __net_init
ip_tables_net_init(struct net
*net
)
1914 return xt_proto_init(net
, NFPROTO_IPV4
);
1917 static void __net_exit
ip_tables_net_exit(struct net
*net
)
1919 xt_proto_fini(net
, NFPROTO_IPV4
);
1922 static struct pernet_operations ip_tables_net_ops
= {
1923 .init
= ip_tables_net_init
,
1924 .exit
= ip_tables_net_exit
,
1927 static int __init
ip_tables_init(void)
1931 ret
= register_pernet_subsys(&ip_tables_net_ops
);
1935 /* No one else will be downing sem now, so we won't sleep */
1936 ret
= xt_register_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
1939 ret
= xt_register_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
1943 /* Register setsockopt */
1944 ret
= nf_register_sockopt(&ipt_sockopts
);
1948 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1952 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
1954 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
1956 unregister_pernet_subsys(&ip_tables_net_ops
);
1961 static void __exit
ip_tables_fini(void)
1963 nf_unregister_sockopt(&ipt_sockopts
);
1965 xt_unregister_matches(ipt_builtin_mt
, ARRAY_SIZE(ipt_builtin_mt
));
1966 xt_unregister_targets(ipt_builtin_tg
, ARRAY_SIZE(ipt_builtin_tg
));
1967 unregister_pernet_subsys(&ip_tables_net_ops
);
1970 EXPORT_SYMBOL(ipt_register_table
);
1971 EXPORT_SYMBOL(ipt_unregister_table
);
1972 EXPORT_SYMBOL(ipt_do_table
);
1973 module_init(ip_tables_init
);
1974 module_exit(ip_tables_fini
);