2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
42 #ifdef CONFIG_NETFILTER_DEBUG
43 #define IP_NF_ASSERT(x) WARN_ON(!(x))
45 #define IP_NF_ASSERT(x)
48 void *ip6t_alloc_initial_table(const struct xt_table
*info
)
50 return xt_alloc_initial_table(ip6t
, IP6T
);
52 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table
);
55 We keep a set of rules for each CPU, so we can avoid write-locking
56 them in the softirq when updating the counters and therefore
57 only need to read-lock in the softirq; doing a write_lock_bh() in user
58 context stops packets coming through and allows user context to read
59 the counters or update the rules.
61 Hence the start of any table is given by get_table() below. */
63 /* Returns whether matches rule or not. */
64 /* Performance critical - called for every packet */
66 ip6_packet_match(const struct sk_buff
*skb
,
69 const struct ip6t_ip6
*ip6info
,
70 unsigned int *protoff
,
71 int *fragoff
, bool *hotdrop
)
74 const struct ipv6hdr
*ipv6
= ipv6_hdr(skb
);
76 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
78 if (FWINV(ipv6_masked_addr_cmp(&ipv6
->saddr
, &ip6info
->smsk
,
79 &ip6info
->src
), IP6T_INV_SRCIP
) ||
80 FWINV(ipv6_masked_addr_cmp(&ipv6
->daddr
, &ip6info
->dmsk
,
81 &ip6info
->dst
), IP6T_INV_DSTIP
))
84 ret
= ifname_compare_aligned(indev
, ip6info
->iniface
, ip6info
->iniface_mask
);
86 if (FWINV(ret
!= 0, IP6T_INV_VIA_IN
))
89 ret
= ifname_compare_aligned(outdev
, ip6info
->outiface
, ip6info
->outiface_mask
);
91 if (FWINV(ret
!= 0, IP6T_INV_VIA_OUT
))
94 /* ... might want to do something with class and flowlabel here ... */
96 /* look for the desired protocol header */
97 if (ip6info
->flags
& IP6T_F_PROTO
) {
99 unsigned short _frag_off
;
101 protohdr
= ipv6_find_hdr(skb
, protoff
, -1, &_frag_off
, NULL
);
107 *fragoff
= _frag_off
;
109 if (ip6info
->proto
== protohdr
) {
110 if (ip6info
->invflags
& IP6T_INV_PROTO
)
116 /* We need match for the '-p all', too! */
117 if ((ip6info
->proto
!= 0) &&
118 !(ip6info
->invflags
& IP6T_INV_PROTO
))
124 /* should be ip6 safe */
126 ip6_checkentry(const struct ip6t_ip6
*ipv6
)
128 if (ipv6
->flags
& ~IP6T_F_MASK
)
130 if (ipv6
->invflags
& ~IP6T_INV_MASK
)
137 ip6t_error(struct sk_buff
*skb
, const struct xt_action_param
*par
)
139 net_info_ratelimited("error: `%s'\n", (const char *)par
->targinfo
);
144 static inline struct ip6t_entry
*
145 get_entry(const void *base
, unsigned int offset
)
147 return (struct ip6t_entry
*)(base
+ offset
);
150 /* All zeroes == unconditional rule. */
151 /* Mildly perf critical (only if packet tracing is on) */
152 static inline bool unconditional(const struct ip6t_entry
*e
)
154 static const struct ip6t_ip6 uncond
;
156 return e
->target_offset
== sizeof(struct ip6t_entry
) &&
157 memcmp(&e
->ipv6
, &uncond
, sizeof(uncond
)) == 0;
160 static inline const struct xt_entry_target
*
161 ip6t_get_target_c(const struct ip6t_entry
*e
)
163 return ip6t_get_target((struct ip6t_entry
*)e
);
166 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
167 /* This cries for unification! */
168 static const char *const hooknames
[] = {
169 [NF_INET_PRE_ROUTING
] = "PREROUTING",
170 [NF_INET_LOCAL_IN
] = "INPUT",
171 [NF_INET_FORWARD
] = "FORWARD",
172 [NF_INET_LOCAL_OUT
] = "OUTPUT",
173 [NF_INET_POST_ROUTING
] = "POSTROUTING",
176 enum nf_ip_trace_comments
{
177 NF_IP6_TRACE_COMMENT_RULE
,
178 NF_IP6_TRACE_COMMENT_RETURN
,
179 NF_IP6_TRACE_COMMENT_POLICY
,
182 static const char *const comments
[] = {
183 [NF_IP6_TRACE_COMMENT_RULE
] = "rule",
184 [NF_IP6_TRACE_COMMENT_RETURN
] = "return",
185 [NF_IP6_TRACE_COMMENT_POLICY
] = "policy",
188 static struct nf_loginfo trace_loginfo
= {
189 .type
= NF_LOG_TYPE_LOG
,
192 .level
= LOGLEVEL_WARNING
,
193 .logflags
= NF_LOG_MASK
,
198 /* Mildly perf critical (only if packet tracing is on) */
200 get_chainname_rulenum(const struct ip6t_entry
*s
, const struct ip6t_entry
*e
,
201 const char *hookname
, const char **chainname
,
202 const char **comment
, unsigned int *rulenum
)
204 const struct xt_standard_target
*t
= (void *)ip6t_get_target_c(s
);
206 if (strcmp(t
->target
.u
.kernel
.target
->name
, XT_ERROR_TARGET
) == 0) {
207 /* Head of user chain: ERROR target with chainname */
208 *chainname
= t
->target
.data
;
213 if (unconditional(s
) &&
214 strcmp(t
->target
.u
.kernel
.target
->name
,
215 XT_STANDARD_TARGET
) == 0 &&
217 /* Tail of chains: STANDARD target (return/policy) */
218 *comment
= *chainname
== hookname
219 ? comments
[NF_IP6_TRACE_COMMENT_POLICY
]
220 : comments
[NF_IP6_TRACE_COMMENT_RETURN
];
229 static void trace_packet(struct net
*net
,
230 const struct sk_buff
*skb
,
232 const struct net_device
*in
,
233 const struct net_device
*out
,
234 const char *tablename
,
235 const struct xt_table_info
*private,
236 const struct ip6t_entry
*e
)
238 const struct ip6t_entry
*root
;
239 const char *hookname
, *chainname
, *comment
;
240 const struct ip6t_entry
*iter
;
241 unsigned int rulenum
= 0;
243 root
= get_entry(private->entries
, private->hook_entry
[hook
]);
245 hookname
= chainname
= hooknames
[hook
];
246 comment
= comments
[NF_IP6_TRACE_COMMENT_RULE
];
248 xt_entry_foreach(iter
, root
, private->size
- private->hook_entry
[hook
])
249 if (get_chainname_rulenum(iter
, e
, hookname
,
250 &chainname
, &comment
, &rulenum
) != 0)
253 nf_log_trace(net
, AF_INET6
, hook
, skb
, in
, out
, &trace_loginfo
,
254 "TRACE: %s:%s:%s:%u ",
255 tablename
, chainname
, comment
, rulenum
);
259 static inline struct ip6t_entry
*
260 ip6t_next_entry(const struct ip6t_entry
*entry
)
262 return (void *)entry
+ entry
->next_offset
;
265 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
267 ip6t_do_table(struct sk_buff
*skb
,
268 const struct nf_hook_state
*state
,
269 struct xt_table
*table
)
271 unsigned int hook
= state
->hook
;
272 static const char nulldevname
[IFNAMSIZ
] __attribute__((aligned(sizeof(long))));
273 /* Initializing verdict to NF_DROP keeps gcc happy. */
274 unsigned int verdict
= NF_DROP
;
275 const char *indev
, *outdev
;
276 const void *table_base
;
277 struct ip6t_entry
*e
, **jumpstack
;
278 unsigned int stackidx
, cpu
;
279 const struct xt_table_info
*private;
280 struct xt_action_param acpar
;
285 indev
= state
->in
? state
->in
->name
: nulldevname
;
286 outdev
= state
->out
? state
->out
->name
: nulldevname
;
287 /* We handle fragments by dealing with the first fragment as
288 * if it was a normal packet. All other fragments are treated
289 * normally, except that they will NEVER match rules that ask
290 * things we don't know, ie. tcp syn flag or ports). If the
291 * rule is also a fragment-specific rule, non-fragments won't
293 acpar
.hotdrop
= false;
294 acpar
.net
= state
->net
;
295 acpar
.in
= state
->in
;
296 acpar
.out
= state
->out
;
297 acpar
.family
= NFPROTO_IPV6
;
298 acpar
.hooknum
= hook
;
300 IP_NF_ASSERT(table
->valid_hooks
& (1 << hook
));
303 addend
= xt_write_recseq_begin();
304 private = table
->private;
306 * Ensure we load private-> members after we've fetched the base
309 smp_read_barrier_depends();
310 cpu
= smp_processor_id();
311 table_base
= private->entries
;
312 jumpstack
= (struct ip6t_entry
**)private->jumpstack
[cpu
];
314 /* Switch to alternate jumpstack if we're being invoked via TEE.
315 * TEE issues XT_CONTINUE verdict on original skb so we must not
316 * clobber the jumpstack.
318 * For recursion via REJECT or SYNPROXY the stack will be clobbered
319 * but it is no problem since absolute verdict is issued by these.
321 if (static_key_false(&xt_tee_enabled
))
322 jumpstack
+= private->stacksize
* __this_cpu_read(nf_skb_duplicated
);
324 e
= get_entry(table_base
, private->hook_entry
[hook
]);
327 const struct xt_entry_target
*t
;
328 const struct xt_entry_match
*ematch
;
329 struct xt_counters
*counter
;
333 if (!ip6_packet_match(skb
, indev
, outdev
, &e
->ipv6
,
334 &acpar
.thoff
, &acpar
.fragoff
, &acpar
.hotdrop
)) {
336 e
= ip6t_next_entry(e
);
340 xt_ematch_foreach(ematch
, e
) {
341 acpar
.match
= ematch
->u
.kernel
.match
;
342 acpar
.matchinfo
= ematch
->data
;
343 if (!acpar
.match
->match(skb
, &acpar
))
347 counter
= xt_get_this_cpu_counter(&e
->counters
);
348 ADD_COUNTER(*counter
, skb
->len
, 1);
350 t
= ip6t_get_target_c(e
);
351 IP_NF_ASSERT(t
->u
.kernel
.target
);
353 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
354 /* The packet is traced: log it */
355 if (unlikely(skb
->nf_trace
))
356 trace_packet(state
->net
, skb
, hook
, state
->in
,
357 state
->out
, table
->name
, private, e
);
359 /* Standard target? */
360 if (!t
->u
.kernel
.target
->target
) {
363 v
= ((struct xt_standard_target
*)t
)->verdict
;
365 /* Pop from stack? */
366 if (v
!= XT_RETURN
) {
367 verdict
= (unsigned int)(-v
) - 1;
371 e
= get_entry(table_base
,
372 private->underflow
[hook
]);
374 e
= ip6t_next_entry(jumpstack
[--stackidx
]);
377 if (table_base
+ v
!= ip6t_next_entry(e
) &&
378 !(e
->ipv6
.flags
& IP6T_F_GOTO
)) {
379 jumpstack
[stackidx
++] = e
;
382 e
= get_entry(table_base
, v
);
386 acpar
.target
= t
->u
.kernel
.target
;
387 acpar
.targinfo
= t
->data
;
389 verdict
= t
->u
.kernel
.target
->target(skb
, &acpar
);
390 if (verdict
== XT_CONTINUE
)
391 e
= ip6t_next_entry(e
);
395 } while (!acpar
.hotdrop
);
397 xt_write_recseq_end(addend
);
405 static bool find_jump_target(const struct xt_table_info
*t
,
406 const struct ip6t_entry
*target
)
408 struct ip6t_entry
*iter
;
410 xt_entry_foreach(iter
, t
->entries
, t
->size
) {
417 /* Figures out from what hook each rule can be called: returns 0 if
418 there are loops. Puts hook bitmask in comefrom. */
420 mark_source_chains(const struct xt_table_info
*newinfo
,
421 unsigned int valid_hooks
, void *entry0
)
425 /* No recursion; use packet counter to save back ptrs (reset
426 to 0 as we leave), and comefrom to save source hook bitmask */
427 for (hook
= 0; hook
< NF_INET_NUMHOOKS
; hook
++) {
428 unsigned int pos
= newinfo
->hook_entry
[hook
];
429 struct ip6t_entry
*e
= (struct ip6t_entry
*)(entry0
+ pos
);
431 if (!(valid_hooks
& (1 << hook
)))
434 /* Set initial back pointer. */
435 e
->counters
.pcnt
= pos
;
438 const struct xt_standard_target
*t
439 = (void *)ip6t_get_target_c(e
);
440 int visited
= e
->comefrom
& (1 << hook
);
442 if (e
->comefrom
& (1 << NF_INET_NUMHOOKS
))
445 e
->comefrom
|= ((1 << hook
) | (1 << NF_INET_NUMHOOKS
));
447 /* Unconditional return/END. */
448 if ((unconditional(e
) &&
449 (strcmp(t
->target
.u
.user
.name
,
450 XT_STANDARD_TARGET
) == 0) &&
451 t
->verdict
< 0) || visited
) {
452 unsigned int oldpos
, size
;
454 if ((strcmp(t
->target
.u
.user
.name
,
455 XT_STANDARD_TARGET
) == 0) &&
456 t
->verdict
< -NF_MAX_VERDICT
- 1)
459 /* Return: backtrack through the last
462 e
->comefrom
^= (1<<NF_INET_NUMHOOKS
);
464 pos
= e
->counters
.pcnt
;
465 e
->counters
.pcnt
= 0;
467 /* We're at the start. */
471 e
= (struct ip6t_entry
*)
473 } while (oldpos
== pos
+ e
->next_offset
);
476 size
= e
->next_offset
;
477 e
= (struct ip6t_entry
*)
478 (entry0
+ pos
+ size
);
479 if (pos
+ size
>= newinfo
->size
)
481 e
->counters
.pcnt
= pos
;
484 int newpos
= t
->verdict
;
486 if (strcmp(t
->target
.u
.user
.name
,
487 XT_STANDARD_TARGET
) == 0 &&
489 /* This a jump; chase it. */
490 e
= (struct ip6t_entry
*)
492 if (!find_jump_target(newinfo
, e
))
495 /* ... this is a fallthru */
496 newpos
= pos
+ e
->next_offset
;
497 if (newpos
>= newinfo
->size
)
500 e
= (struct ip6t_entry
*)
502 e
->counters
.pcnt
= pos
;
511 static void cleanup_match(struct xt_entry_match
*m
, struct net
*net
)
513 struct xt_mtdtor_param par
;
516 par
.match
= m
->u
.kernel
.match
;
517 par
.matchinfo
= m
->data
;
518 par
.family
= NFPROTO_IPV6
;
519 if (par
.match
->destroy
!= NULL
)
520 par
.match
->destroy(&par
);
521 module_put(par
.match
->me
);
524 static int check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
526 const struct ip6t_ip6
*ipv6
= par
->entryinfo
;
528 par
->match
= m
->u
.kernel
.match
;
529 par
->matchinfo
= m
->data
;
531 return xt_check_match(par
, m
->u
.match_size
- sizeof(*m
),
532 ipv6
->proto
, ipv6
->invflags
& IP6T_INV_PROTO
);
536 find_check_match(struct xt_entry_match
*m
, struct xt_mtchk_param
*par
)
538 struct xt_match
*match
;
541 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
544 return PTR_ERR(match
);
546 m
->u
.kernel
.match
= match
;
548 ret
= check_match(m
, par
);
554 module_put(m
->u
.kernel
.match
->me
);
558 static int check_target(struct ip6t_entry
*e
, struct net
*net
, const char *name
)
560 struct xt_entry_target
*t
= ip6t_get_target(e
);
561 struct xt_tgchk_param par
= {
565 .target
= t
->u
.kernel
.target
,
567 .hook_mask
= e
->comefrom
,
568 .family
= NFPROTO_IPV6
,
571 t
= ip6t_get_target(e
);
572 return xt_check_target(&par
, t
->u
.target_size
- sizeof(*t
),
574 e
->ipv6
.invflags
& IP6T_INV_PROTO
);
578 find_check_entry(struct ip6t_entry
*e
, struct net
*net
, const char *name
,
581 struct xt_entry_target
*t
;
582 struct xt_target
*target
;
585 struct xt_mtchk_param mtpar
;
586 struct xt_entry_match
*ematch
;
589 pcnt
= xt_percpu_counter_alloc();
590 if (IS_ERR_VALUE(pcnt
))
592 e
->counters
.pcnt
= pcnt
;
597 mtpar
.entryinfo
= &e
->ipv6
;
598 mtpar
.hook_mask
= e
->comefrom
;
599 mtpar
.family
= NFPROTO_IPV6
;
600 xt_ematch_foreach(ematch
, e
) {
601 ret
= find_check_match(ematch
, &mtpar
);
603 goto cleanup_matches
;
607 t
= ip6t_get_target(e
);
608 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
610 if (IS_ERR(target
)) {
611 ret
= PTR_ERR(target
);
612 goto cleanup_matches
;
614 t
->u
.kernel
.target
= target
;
616 ret
= check_target(e
, net
, name
);
621 module_put(t
->u
.kernel
.target
->me
);
623 xt_ematch_foreach(ematch
, e
) {
626 cleanup_match(ematch
, net
);
629 xt_percpu_counter_free(e
->counters
.pcnt
);
634 static bool check_underflow(const struct ip6t_entry
*e
)
636 const struct xt_entry_target
*t
;
637 unsigned int verdict
;
639 if (!unconditional(e
))
641 t
= ip6t_get_target_c(e
);
642 if (strcmp(t
->u
.user
.name
, XT_STANDARD_TARGET
) != 0)
644 verdict
= ((struct xt_standard_target
*)t
)->verdict
;
645 verdict
= -verdict
- 1;
646 return verdict
== NF_DROP
|| verdict
== NF_ACCEPT
;
650 check_entry_size_and_hooks(struct ip6t_entry
*e
,
651 struct xt_table_info
*newinfo
,
652 const unsigned char *base
,
653 const unsigned char *limit
,
654 const unsigned int *hook_entries
,
655 const unsigned int *underflows
,
656 unsigned int valid_hooks
)
661 if ((unsigned long)e
% __alignof__(struct ip6t_entry
) != 0 ||
662 (unsigned char *)e
+ sizeof(struct ip6t_entry
) >= limit
||
663 (unsigned char *)e
+ e
->next_offset
> limit
)
667 < sizeof(struct ip6t_entry
) + sizeof(struct xt_entry_target
))
670 if (!ip6_checkentry(&e
->ipv6
))
673 err
= xt_check_entry_offsets(e
, e
->elems
, e
->target_offset
,
678 /* Check hooks & underflows */
679 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
680 if (!(valid_hooks
& (1 << h
)))
682 if ((unsigned char *)e
- base
== hook_entries
[h
])
683 newinfo
->hook_entry
[h
] = hook_entries
[h
];
684 if ((unsigned char *)e
- base
== underflows
[h
]) {
685 if (!check_underflow(e
))
688 newinfo
->underflow
[h
] = underflows
[h
];
692 /* Clear counters and comefrom */
693 e
->counters
= ((struct xt_counters
) { 0, 0 });
698 static void cleanup_entry(struct ip6t_entry
*e
, struct net
*net
)
700 struct xt_tgdtor_param par
;
701 struct xt_entry_target
*t
;
702 struct xt_entry_match
*ematch
;
704 /* Cleanup all matches */
705 xt_ematch_foreach(ematch
, e
)
706 cleanup_match(ematch
, net
);
707 t
= ip6t_get_target(e
);
710 par
.target
= t
->u
.kernel
.target
;
711 par
.targinfo
= t
->data
;
712 par
.family
= NFPROTO_IPV6
;
713 if (par
.target
->destroy
!= NULL
)
714 par
.target
->destroy(&par
);
715 module_put(par
.target
->me
);
717 xt_percpu_counter_free(e
->counters
.pcnt
);
720 /* Checks and translates the user-supplied table segment (held in
723 translate_table(struct net
*net
, struct xt_table_info
*newinfo
, void *entry0
,
724 const struct ip6t_replace
*repl
)
726 struct ip6t_entry
*iter
;
730 newinfo
->size
= repl
->size
;
731 newinfo
->number
= repl
->num_entries
;
733 /* Init all hooks to impossible value. */
734 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
735 newinfo
->hook_entry
[i
] = 0xFFFFFFFF;
736 newinfo
->underflow
[i
] = 0xFFFFFFFF;
740 /* Walk through entries, checking offsets. */
741 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
742 ret
= check_entry_size_and_hooks(iter
, newinfo
, entry0
,
750 if (strcmp(ip6t_get_target(iter
)->u
.user
.name
,
751 XT_ERROR_TARGET
) == 0)
752 ++newinfo
->stacksize
;
755 if (i
!= repl
->num_entries
)
758 /* Check hooks all assigned */
759 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
760 /* Only hooks which are valid */
761 if (!(repl
->valid_hooks
& (1 << i
)))
763 if (newinfo
->hook_entry
[i
] == 0xFFFFFFFF)
765 if (newinfo
->underflow
[i
] == 0xFFFFFFFF)
769 if (!mark_source_chains(newinfo
, repl
->valid_hooks
, entry0
))
772 /* Finally, each sanity check must pass */
774 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
775 ret
= find_check_entry(iter
, net
, repl
->name
, repl
->size
);
782 xt_entry_foreach(iter
, entry0
, newinfo
->size
) {
785 cleanup_entry(iter
, net
);
794 get_counters(const struct xt_table_info
*t
,
795 struct xt_counters counters
[])
797 struct ip6t_entry
*iter
;
801 for_each_possible_cpu(cpu
) {
802 seqcount_t
*s
= &per_cpu(xt_recseq
, cpu
);
805 xt_entry_foreach(iter
, t
->entries
, t
->size
) {
806 struct xt_counters
*tmp
;
810 tmp
= xt_get_per_cpu_counter(&iter
->counters
, cpu
);
812 start
= read_seqcount_begin(s
);
815 } while (read_seqcount_retry(s
, start
));
817 ADD_COUNTER(counters
[i
], bcnt
, pcnt
);
823 static struct xt_counters
*alloc_counters(const struct xt_table
*table
)
825 unsigned int countersize
;
826 struct xt_counters
*counters
;
827 const struct xt_table_info
*private = table
->private;
829 /* We need atomic snapshot of counters: rest doesn't change
830 (other than comefrom, which userspace doesn't care
832 countersize
= sizeof(struct xt_counters
) * private->number
;
833 counters
= vzalloc(countersize
);
835 if (counters
== NULL
)
836 return ERR_PTR(-ENOMEM
);
838 get_counters(private, counters
);
844 copy_entries_to_user(unsigned int total_size
,
845 const struct xt_table
*table
,
846 void __user
*userptr
)
848 unsigned int off
, num
;
849 const struct ip6t_entry
*e
;
850 struct xt_counters
*counters
;
851 const struct xt_table_info
*private = table
->private;
853 const void *loc_cpu_entry
;
855 counters
= alloc_counters(table
);
856 if (IS_ERR(counters
))
857 return PTR_ERR(counters
);
859 loc_cpu_entry
= private->entries
;
860 if (copy_to_user(userptr
, loc_cpu_entry
, total_size
) != 0) {
865 /* FIXME: use iterator macros --RR */
866 /* ... then go back and fix counters and names */
867 for (off
= 0, num
= 0; off
< total_size
; off
+= e
->next_offset
, num
++){
869 const struct xt_entry_match
*m
;
870 const struct xt_entry_target
*t
;
872 e
= (struct ip6t_entry
*)(loc_cpu_entry
+ off
);
873 if (copy_to_user(userptr
+ off
874 + offsetof(struct ip6t_entry
, counters
),
876 sizeof(counters
[num
])) != 0) {
881 for (i
= sizeof(struct ip6t_entry
);
882 i
< e
->target_offset
;
883 i
+= m
->u
.match_size
) {
886 if (copy_to_user(userptr
+ off
+ i
887 + offsetof(struct xt_entry_match
,
889 m
->u
.kernel
.match
->name
,
890 strlen(m
->u
.kernel
.match
->name
)+1)
897 t
= ip6t_get_target_c(e
);
898 if (copy_to_user(userptr
+ off
+ e
->target_offset
899 + offsetof(struct xt_entry_target
,
901 t
->u
.kernel
.target
->name
,
902 strlen(t
->u
.kernel
.target
->name
)+1) != 0) {
914 static void compat_standard_from_user(void *dst
, const void *src
)
916 int v
= *(compat_int_t
*)src
;
919 v
+= xt_compat_calc_jump(AF_INET6
, v
);
920 memcpy(dst
, &v
, sizeof(v
));
923 static int compat_standard_to_user(void __user
*dst
, const void *src
)
925 compat_int_t cv
= *(int *)src
;
928 cv
-= xt_compat_calc_jump(AF_INET6
, cv
);
929 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
932 static int compat_calc_entry(const struct ip6t_entry
*e
,
933 const struct xt_table_info
*info
,
934 const void *base
, struct xt_table_info
*newinfo
)
936 const struct xt_entry_match
*ematch
;
937 const struct xt_entry_target
*t
;
938 unsigned int entry_offset
;
941 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
942 entry_offset
= (void *)e
- base
;
943 xt_ematch_foreach(ematch
, e
)
944 off
+= xt_compat_match_offset(ematch
->u
.kernel
.match
);
945 t
= ip6t_get_target_c(e
);
946 off
+= xt_compat_target_offset(t
->u
.kernel
.target
);
947 newinfo
->size
-= off
;
948 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
952 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
953 if (info
->hook_entry
[i
] &&
954 (e
< (struct ip6t_entry
*)(base
+ info
->hook_entry
[i
])))
955 newinfo
->hook_entry
[i
] -= off
;
956 if (info
->underflow
[i
] &&
957 (e
< (struct ip6t_entry
*)(base
+ info
->underflow
[i
])))
958 newinfo
->underflow
[i
] -= off
;
963 static int compat_table_info(const struct xt_table_info
*info
,
964 struct xt_table_info
*newinfo
)
966 struct ip6t_entry
*iter
;
967 const void *loc_cpu_entry
;
970 if (!newinfo
|| !info
)
973 /* we dont care about newinfo->entries */
974 memcpy(newinfo
, info
, offsetof(struct xt_table_info
, entries
));
975 newinfo
->initial_entries
= 0;
976 loc_cpu_entry
= info
->entries
;
977 xt_compat_init_offsets(AF_INET6
, info
->number
);
978 xt_entry_foreach(iter
, loc_cpu_entry
, info
->size
) {
979 ret
= compat_calc_entry(iter
, info
, loc_cpu_entry
, newinfo
);
987 static int get_info(struct net
*net
, void __user
*user
,
988 const int *len
, int compat
)
990 char name
[XT_TABLE_MAXNAMELEN
];
994 if (*len
!= sizeof(struct ip6t_getinfo
))
997 if (copy_from_user(name
, user
, sizeof(name
)) != 0)
1000 name
[XT_TABLE_MAXNAMELEN
-1] = '\0';
1001 #ifdef CONFIG_COMPAT
1003 xt_compat_lock(AF_INET6
);
1005 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1006 "ip6table_%s", name
);
1007 if (!IS_ERR_OR_NULL(t
)) {
1008 struct ip6t_getinfo info
;
1009 const struct xt_table_info
*private = t
->private;
1010 #ifdef CONFIG_COMPAT
1011 struct xt_table_info tmp
;
1014 ret
= compat_table_info(private, &tmp
);
1015 xt_compat_flush_offsets(AF_INET6
);
1019 memset(&info
, 0, sizeof(info
));
1020 info
.valid_hooks
= t
->valid_hooks
;
1021 memcpy(info
.hook_entry
, private->hook_entry
,
1022 sizeof(info
.hook_entry
));
1023 memcpy(info
.underflow
, private->underflow
,
1024 sizeof(info
.underflow
));
1025 info
.num_entries
= private->number
;
1026 info
.size
= private->size
;
1027 strcpy(info
.name
, name
);
1029 if (copy_to_user(user
, &info
, *len
) != 0)
1037 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1038 #ifdef CONFIG_COMPAT
1040 xt_compat_unlock(AF_INET6
);
1046 get_entries(struct net
*net
, struct ip6t_get_entries __user
*uptr
,
1050 struct ip6t_get_entries get
;
1053 if (*len
< sizeof(get
))
1055 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1057 if (*len
!= sizeof(struct ip6t_get_entries
) + get
.size
)
1060 get
.name
[sizeof(get
.name
) - 1] = '\0';
1062 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1063 if (!IS_ERR_OR_NULL(t
)) {
1064 struct xt_table_info
*private = t
->private;
1065 if (get
.size
== private->size
)
1066 ret
= copy_entries_to_user(private->size
,
1067 t
, uptr
->entrytable
);
1074 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1080 __do_replace(struct net
*net
, const char *name
, unsigned int valid_hooks
,
1081 struct xt_table_info
*newinfo
, unsigned int num_counters
,
1082 void __user
*counters_ptr
)
1086 struct xt_table_info
*oldinfo
;
1087 struct xt_counters
*counters
;
1088 struct ip6t_entry
*iter
;
1091 counters
= vzalloc(num_counters
* sizeof(struct xt_counters
));
1097 t
= try_then_request_module(xt_find_table_lock(net
, AF_INET6
, name
),
1098 "ip6table_%s", name
);
1099 if (IS_ERR_OR_NULL(t
)) {
1100 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1101 goto free_newinfo_counters_untrans
;
1105 if (valid_hooks
!= t
->valid_hooks
) {
1110 oldinfo
= xt_replace_table(t
, num_counters
, newinfo
, &ret
);
1114 /* Update module usage count based on number of rules */
1115 if ((oldinfo
->number
> oldinfo
->initial_entries
) ||
1116 (newinfo
->number
<= oldinfo
->initial_entries
))
1118 if ((oldinfo
->number
> oldinfo
->initial_entries
) &&
1119 (newinfo
->number
<= oldinfo
->initial_entries
))
1122 /* Get the old counters, and synchronize with replace */
1123 get_counters(oldinfo
, counters
);
1125 /* Decrease module usage counts and free resource */
1126 xt_entry_foreach(iter
, oldinfo
->entries
, oldinfo
->size
)
1127 cleanup_entry(iter
, net
);
1129 xt_free_table_info(oldinfo
);
1130 if (copy_to_user(counters_ptr
, counters
,
1131 sizeof(struct xt_counters
) * num_counters
) != 0) {
1132 /* Silent error, can't fail, new table is already in place */
1133 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1142 free_newinfo_counters_untrans
:
1149 do_replace(struct net
*net
, const void __user
*user
, unsigned int len
)
1152 struct ip6t_replace tmp
;
1153 struct xt_table_info
*newinfo
;
1154 void *loc_cpu_entry
;
1155 struct ip6t_entry
*iter
;
1157 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1160 /* overflow check */
1161 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1163 if (tmp
.num_counters
== 0)
1166 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1168 newinfo
= xt_alloc_table_info(tmp
.size
);
1172 loc_cpu_entry
= newinfo
->entries
;
1173 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1179 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, &tmp
);
1183 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1184 tmp
.num_counters
, tmp
.counters
);
1186 goto free_newinfo_untrans
;
1189 free_newinfo_untrans
:
1190 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1191 cleanup_entry(iter
, net
);
1193 xt_free_table_info(newinfo
);
1198 do_add_counters(struct net
*net
, const void __user
*user
, unsigned int len
,
1202 struct xt_counters_info tmp
;
1203 struct xt_counters
*paddc
;
1205 const struct xt_table_info
*private;
1207 struct ip6t_entry
*iter
;
1208 unsigned int addend
;
1210 paddc
= xt_copy_counters_from_user(user
, len
, &tmp
, compat
);
1212 return PTR_ERR(paddc
);
1213 t
= xt_find_table_lock(net
, AF_INET6
, tmp
.name
);
1214 if (IS_ERR_OR_NULL(t
)) {
1215 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1220 private = t
->private;
1221 if (private->number
!= tmp
.num_counters
) {
1223 goto unlock_up_free
;
1227 addend
= xt_write_recseq_begin();
1228 xt_entry_foreach(iter
, private->entries
, private->size
) {
1229 struct xt_counters
*tmp
;
1231 tmp
= xt_get_this_cpu_counter(&iter
->counters
);
1232 ADD_COUNTER(*tmp
, paddc
[i
].bcnt
, paddc
[i
].pcnt
);
1235 xt_write_recseq_end(addend
);
1246 #ifdef CONFIG_COMPAT
1247 struct compat_ip6t_replace
{
1248 char name
[XT_TABLE_MAXNAMELEN
];
1252 u32 hook_entry
[NF_INET_NUMHOOKS
];
1253 u32 underflow
[NF_INET_NUMHOOKS
];
1255 compat_uptr_t counters
; /* struct xt_counters * */
1256 struct compat_ip6t_entry entries
[0];
1260 compat_copy_entry_to_user(struct ip6t_entry
*e
, void __user
**dstptr
,
1261 unsigned int *size
, struct xt_counters
*counters
,
1264 struct xt_entry_target
*t
;
1265 struct compat_ip6t_entry __user
*ce
;
1266 u_int16_t target_offset
, next_offset
;
1267 compat_uint_t origsize
;
1268 const struct xt_entry_match
*ematch
;
1272 ce
= (struct compat_ip6t_entry __user
*)*dstptr
;
1273 if (copy_to_user(ce
, e
, sizeof(struct ip6t_entry
)) != 0 ||
1274 copy_to_user(&ce
->counters
, &counters
[i
],
1275 sizeof(counters
[i
])) != 0)
1278 *dstptr
+= sizeof(struct compat_ip6t_entry
);
1279 *size
-= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1281 xt_ematch_foreach(ematch
, e
) {
1282 ret
= xt_compat_match_to_user(ematch
, dstptr
, size
);
1286 target_offset
= e
->target_offset
- (origsize
- *size
);
1287 t
= ip6t_get_target(e
);
1288 ret
= xt_compat_target_to_user(t
, dstptr
, size
);
1291 next_offset
= e
->next_offset
- (origsize
- *size
);
1292 if (put_user(target_offset
, &ce
->target_offset
) != 0 ||
1293 put_user(next_offset
, &ce
->next_offset
) != 0)
1299 compat_find_calc_match(struct xt_entry_match
*m
,
1300 const struct ip6t_ip6
*ipv6
,
1303 struct xt_match
*match
;
1305 match
= xt_request_find_match(NFPROTO_IPV6
, m
->u
.user
.name
,
1306 m
->u
.user
.revision
);
1308 return PTR_ERR(match
);
1310 m
->u
.kernel
.match
= match
;
1311 *size
+= xt_compat_match_offset(match
);
1315 static void compat_release_entry(struct compat_ip6t_entry
*e
)
1317 struct xt_entry_target
*t
;
1318 struct xt_entry_match
*ematch
;
1320 /* Cleanup all matches */
1321 xt_ematch_foreach(ematch
, e
)
1322 module_put(ematch
->u
.kernel
.match
->me
);
1323 t
= compat_ip6t_get_target(e
);
1324 module_put(t
->u
.kernel
.target
->me
);
1328 check_compat_entry_size_and_hooks(struct compat_ip6t_entry
*e
,
1329 struct xt_table_info
*newinfo
,
1331 const unsigned char *base
,
1332 const unsigned char *limit
)
1334 struct xt_entry_match
*ematch
;
1335 struct xt_entry_target
*t
;
1336 struct xt_target
*target
;
1337 unsigned int entry_offset
;
1341 if ((unsigned long)e
% __alignof__(struct compat_ip6t_entry
) != 0 ||
1342 (unsigned char *)e
+ sizeof(struct compat_ip6t_entry
) >= limit
||
1343 (unsigned char *)e
+ e
->next_offset
> limit
)
1346 if (e
->next_offset
< sizeof(struct compat_ip6t_entry
) +
1347 sizeof(struct compat_xt_entry_target
))
1350 if (!ip6_checkentry(&e
->ipv6
))
1353 ret
= xt_compat_check_entry_offsets(e
, e
->elems
,
1354 e
->target_offset
, e
->next_offset
);
1358 off
= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1359 entry_offset
= (void *)e
- (void *)base
;
1361 xt_ematch_foreach(ematch
, e
) {
1362 ret
= compat_find_calc_match(ematch
, &e
->ipv6
, &off
);
1364 goto release_matches
;
1368 t
= compat_ip6t_get_target(e
);
1369 target
= xt_request_find_target(NFPROTO_IPV6
, t
->u
.user
.name
,
1370 t
->u
.user
.revision
);
1371 if (IS_ERR(target
)) {
1372 ret
= PTR_ERR(target
);
1373 goto release_matches
;
1375 t
->u
.kernel
.target
= target
;
1377 off
+= xt_compat_target_offset(target
);
1379 ret
= xt_compat_add_offset(AF_INET6
, entry_offset
, off
);
1386 module_put(t
->u
.kernel
.target
->me
);
1388 xt_ematch_foreach(ematch
, e
) {
1391 module_put(ematch
->u
.kernel
.match
->me
);
1397 compat_copy_entry_from_user(struct compat_ip6t_entry
*e
, void **dstptr
,
1399 struct xt_table_info
*newinfo
, unsigned char *base
)
1401 struct xt_entry_target
*t
;
1402 struct ip6t_entry
*de
;
1403 unsigned int origsize
;
1405 struct xt_entry_match
*ematch
;
1408 de
= (struct ip6t_entry
*)*dstptr
;
1409 memcpy(de
, e
, sizeof(struct ip6t_entry
));
1410 memcpy(&de
->counters
, &e
->counters
, sizeof(e
->counters
));
1412 *dstptr
+= sizeof(struct ip6t_entry
);
1413 *size
+= sizeof(struct ip6t_entry
) - sizeof(struct compat_ip6t_entry
);
1415 xt_ematch_foreach(ematch
, e
)
1416 xt_compat_match_from_user(ematch
, dstptr
, size
);
1418 de
->target_offset
= e
->target_offset
- (origsize
- *size
);
1419 t
= compat_ip6t_get_target(e
);
1420 xt_compat_target_from_user(t
, dstptr
, size
);
1422 de
->next_offset
= e
->next_offset
- (origsize
- *size
);
1423 for (h
= 0; h
< NF_INET_NUMHOOKS
; h
++) {
1424 if ((unsigned char *)de
- base
< newinfo
->hook_entry
[h
])
1425 newinfo
->hook_entry
[h
] -= origsize
- *size
;
1426 if ((unsigned char *)de
- base
< newinfo
->underflow
[h
])
1427 newinfo
->underflow
[h
] -= origsize
- *size
;
1432 translate_compat_table(struct net
*net
,
1433 struct xt_table_info
**pinfo
,
1435 const struct compat_ip6t_replace
*compatr
)
1438 struct xt_table_info
*newinfo
, *info
;
1439 void *pos
, *entry0
, *entry1
;
1440 struct compat_ip6t_entry
*iter0
;
1441 struct ip6t_replace repl
;
1447 size
= compatr
->size
;
1448 info
->number
= compatr
->num_entries
;
1451 xt_compat_lock(AF_INET6
);
1452 xt_compat_init_offsets(AF_INET6
, compatr
->num_entries
);
1453 /* Walk through entries, checking offsets. */
1454 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1455 ret
= check_compat_entry_size_and_hooks(iter0
, info
, &size
,
1457 entry0
+ compatr
->size
);
1464 if (j
!= compatr
->num_entries
)
1468 newinfo
= xt_alloc_table_info(size
);
1472 newinfo
->number
= compatr
->num_entries
;
1473 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1474 newinfo
->hook_entry
[i
] = compatr
->hook_entry
[i
];
1475 newinfo
->underflow
[i
] = compatr
->underflow
[i
];
1477 entry1
= newinfo
->entries
;
1479 size
= compatr
->size
;
1480 xt_entry_foreach(iter0
, entry0
, compatr
->size
)
1481 compat_copy_entry_from_user(iter0
, &pos
, &size
,
1484 /* all module references in entry0 are now gone. */
1485 xt_compat_flush_offsets(AF_INET6
);
1486 xt_compat_unlock(AF_INET6
);
1488 memcpy(&repl
, compatr
, sizeof(*compatr
));
1490 for (i
= 0; i
< NF_INET_NUMHOOKS
; i
++) {
1491 repl
.hook_entry
[i
] = newinfo
->hook_entry
[i
];
1492 repl
.underflow
[i
] = newinfo
->underflow
[i
];
1495 repl
.num_counters
= 0;
1496 repl
.counters
= NULL
;
1497 repl
.size
= newinfo
->size
;
1498 ret
= translate_table(net
, newinfo
, entry1
, &repl
);
1504 xt_free_table_info(info
);
1508 xt_free_table_info(newinfo
);
1511 xt_compat_flush_offsets(AF_INET6
);
1512 xt_compat_unlock(AF_INET6
);
1513 xt_entry_foreach(iter0
, entry0
, compatr
->size
) {
1516 compat_release_entry(iter0
);
1522 compat_do_replace(struct net
*net
, void __user
*user
, unsigned int len
)
1525 struct compat_ip6t_replace tmp
;
1526 struct xt_table_info
*newinfo
;
1527 void *loc_cpu_entry
;
1528 struct ip6t_entry
*iter
;
1530 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1533 /* overflow check */
1534 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct xt_counters
))
1536 if (tmp
.num_counters
== 0)
1539 tmp
.name
[sizeof(tmp
.name
)-1] = 0;
1541 newinfo
= xt_alloc_table_info(tmp
.size
);
1545 loc_cpu_entry
= newinfo
->entries
;
1546 if (copy_from_user(loc_cpu_entry
, user
+ sizeof(tmp
),
1552 ret
= translate_compat_table(net
, &newinfo
, &loc_cpu_entry
, &tmp
);
1556 ret
= __do_replace(net
, tmp
.name
, tmp
.valid_hooks
, newinfo
,
1557 tmp
.num_counters
, compat_ptr(tmp
.counters
));
1559 goto free_newinfo_untrans
;
1562 free_newinfo_untrans
:
1563 xt_entry_foreach(iter
, loc_cpu_entry
, newinfo
->size
)
1564 cleanup_entry(iter
, net
);
1566 xt_free_table_info(newinfo
);
1571 compat_do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
,
1576 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1580 case IP6T_SO_SET_REPLACE
:
1581 ret
= compat_do_replace(sock_net(sk
), user
, len
);
1584 case IP6T_SO_SET_ADD_COUNTERS
:
1585 ret
= do_add_counters(sock_net(sk
), user
, len
, 1);
1595 struct compat_ip6t_get_entries
{
1596 char name
[XT_TABLE_MAXNAMELEN
];
1598 struct compat_ip6t_entry entrytable
[0];
1602 compat_copy_entries_to_user(unsigned int total_size
, struct xt_table
*table
,
1603 void __user
*userptr
)
1605 struct xt_counters
*counters
;
1606 const struct xt_table_info
*private = table
->private;
1611 struct ip6t_entry
*iter
;
1613 counters
= alloc_counters(table
);
1614 if (IS_ERR(counters
))
1615 return PTR_ERR(counters
);
1619 xt_entry_foreach(iter
, private->entries
, total_size
) {
1620 ret
= compat_copy_entry_to_user(iter
, &pos
,
1621 &size
, counters
, i
++);
1631 compat_get_entries(struct net
*net
, struct compat_ip6t_get_entries __user
*uptr
,
1635 struct compat_ip6t_get_entries get
;
1638 if (*len
< sizeof(get
))
1641 if (copy_from_user(&get
, uptr
, sizeof(get
)) != 0)
1644 if (*len
!= sizeof(struct compat_ip6t_get_entries
) + get
.size
)
1647 get
.name
[sizeof(get
.name
) - 1] = '\0';
1649 xt_compat_lock(AF_INET6
);
1650 t
= xt_find_table_lock(net
, AF_INET6
, get
.name
);
1651 if (!IS_ERR_OR_NULL(t
)) {
1652 const struct xt_table_info
*private = t
->private;
1653 struct xt_table_info info
;
1654 ret
= compat_table_info(private, &info
);
1655 if (!ret
&& get
.size
== info
.size
)
1656 ret
= compat_copy_entries_to_user(private->size
,
1657 t
, uptr
->entrytable
);
1661 xt_compat_flush_offsets(AF_INET6
);
1665 ret
= t
? PTR_ERR(t
) : -ENOENT
;
1667 xt_compat_unlock(AF_INET6
);
1671 static int do_ip6t_get_ctl(struct sock
*, int, void __user
*, int *);
1674 compat_do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1678 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1682 case IP6T_SO_GET_INFO
:
1683 ret
= get_info(sock_net(sk
), user
, len
, 1);
1685 case IP6T_SO_GET_ENTRIES
:
1686 ret
= compat_get_entries(sock_net(sk
), user
, len
);
1689 ret
= do_ip6t_get_ctl(sk
, cmd
, user
, len
);
1696 do_ip6t_set_ctl(struct sock
*sk
, int cmd
, void __user
*user
, unsigned int len
)
1700 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1704 case IP6T_SO_SET_REPLACE
:
1705 ret
= do_replace(sock_net(sk
), user
, len
);
1708 case IP6T_SO_SET_ADD_COUNTERS
:
1709 ret
= do_add_counters(sock_net(sk
), user
, len
, 0);
1720 do_ip6t_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1724 if (!ns_capable(sock_net(sk
)->user_ns
, CAP_NET_ADMIN
))
1728 case IP6T_SO_GET_INFO
:
1729 ret
= get_info(sock_net(sk
), user
, len
, 0);
1732 case IP6T_SO_GET_ENTRIES
:
1733 ret
= get_entries(sock_net(sk
), user
, len
);
1736 case IP6T_SO_GET_REVISION_MATCH
:
1737 case IP6T_SO_GET_REVISION_TARGET
: {
1738 struct xt_get_revision rev
;
1741 if (*len
!= sizeof(rev
)) {
1745 if (copy_from_user(&rev
, user
, sizeof(rev
)) != 0) {
1749 rev
.name
[sizeof(rev
.name
)-1] = 0;
1751 if (cmd
== IP6T_SO_GET_REVISION_TARGET
)
1756 try_then_request_module(xt_find_revision(AF_INET6
, rev
.name
,
1759 "ip6t_%s", rev
.name
);
1770 static void __ip6t_unregister_table(struct net
*net
, struct xt_table
*table
)
1772 struct xt_table_info
*private;
1773 void *loc_cpu_entry
;
1774 struct module
*table_owner
= table
->me
;
1775 struct ip6t_entry
*iter
;
1777 private = xt_unregister_table(table
);
1779 /* Decrease module usage counts and free resources */
1780 loc_cpu_entry
= private->entries
;
1781 xt_entry_foreach(iter
, loc_cpu_entry
, private->size
)
1782 cleanup_entry(iter
, net
);
1783 if (private->number
> private->initial_entries
)
1784 module_put(table_owner
);
1785 xt_free_table_info(private);
1788 int ip6t_register_table(struct net
*net
, const struct xt_table
*table
,
1789 const struct ip6t_replace
*repl
,
1790 const struct nf_hook_ops
*ops
,
1791 struct xt_table
**res
)
1794 struct xt_table_info
*newinfo
;
1795 struct xt_table_info bootstrap
= {0};
1796 void *loc_cpu_entry
;
1797 struct xt_table
*new_table
;
1799 newinfo
= xt_alloc_table_info(repl
->size
);
1803 loc_cpu_entry
= newinfo
->entries
;
1804 memcpy(loc_cpu_entry
, repl
->entries
, repl
->size
);
1806 ret
= translate_table(net
, newinfo
, loc_cpu_entry
, repl
);
1810 new_table
= xt_register_table(net
, table
, &bootstrap
, newinfo
);
1811 if (IS_ERR(new_table
)) {
1812 ret
= PTR_ERR(new_table
);
1816 /* set res now, will see skbs right after nf_register_net_hooks */
1817 WRITE_ONCE(*res
, new_table
);
1819 ret
= nf_register_net_hooks(net
, ops
, hweight32(table
->valid_hooks
));
1821 __ip6t_unregister_table(net
, new_table
);
1828 xt_free_table_info(newinfo
);
1832 void ip6t_unregister_table(struct net
*net
, struct xt_table
*table
,
1833 const struct nf_hook_ops
*ops
)
1835 nf_unregister_net_hooks(net
, ops
, hweight32(table
->valid_hooks
));
1836 __ip6t_unregister_table(net
, table
);
1839 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1841 icmp6_type_code_match(u_int8_t test_type
, u_int8_t min_code
, u_int8_t max_code
,
1842 u_int8_t type
, u_int8_t code
,
1845 return (type
== test_type
&& code
>= min_code
&& code
<= max_code
)
1850 icmp6_match(const struct sk_buff
*skb
, struct xt_action_param
*par
)
1852 const struct icmp6hdr
*ic
;
1853 struct icmp6hdr _icmph
;
1854 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
1856 /* Must not be a fragment. */
1857 if (par
->fragoff
!= 0)
1860 ic
= skb_header_pointer(skb
, par
->thoff
, sizeof(_icmph
), &_icmph
);
1862 /* We've been asked to examine this packet, and we
1863 * can't. Hence, no choice but to drop.
1865 par
->hotdrop
= true;
1869 return icmp6_type_code_match(icmpinfo
->type
,
1872 ic
->icmp6_type
, ic
->icmp6_code
,
1873 !!(icmpinfo
->invflags
&IP6T_ICMP_INV
));
1876 /* Called when user tries to insert an entry of this type. */
1877 static int icmp6_checkentry(const struct xt_mtchk_param
*par
)
1879 const struct ip6t_icmp
*icmpinfo
= par
->matchinfo
;
1881 /* Must specify no unknown invflags */
1882 return (icmpinfo
->invflags
& ~IP6T_ICMP_INV
) ? -EINVAL
: 0;
1885 /* The built-in targets: standard (NULL) and error. */
1886 static struct xt_target ip6t_builtin_tg
[] __read_mostly
= {
1888 .name
= XT_STANDARD_TARGET
,
1889 .targetsize
= sizeof(int),
1890 .family
= NFPROTO_IPV6
,
1891 #ifdef CONFIG_COMPAT
1892 .compatsize
= sizeof(compat_int_t
),
1893 .compat_from_user
= compat_standard_from_user
,
1894 .compat_to_user
= compat_standard_to_user
,
1898 .name
= XT_ERROR_TARGET
,
1899 .target
= ip6t_error
,
1900 .targetsize
= XT_FUNCTION_MAXNAMELEN
,
1901 .family
= NFPROTO_IPV6
,
1905 static struct nf_sockopt_ops ip6t_sockopts
= {
1907 .set_optmin
= IP6T_BASE_CTL
,
1908 .set_optmax
= IP6T_SO_SET_MAX
+1,
1909 .set
= do_ip6t_set_ctl
,
1910 #ifdef CONFIG_COMPAT
1911 .compat_set
= compat_do_ip6t_set_ctl
,
1913 .get_optmin
= IP6T_BASE_CTL
,
1914 .get_optmax
= IP6T_SO_GET_MAX
+1,
1915 .get
= do_ip6t_get_ctl
,
1916 #ifdef CONFIG_COMPAT
1917 .compat_get
= compat_do_ip6t_get_ctl
,
1919 .owner
= THIS_MODULE
,
1922 static struct xt_match ip6t_builtin_mt
[] __read_mostly
= {
1925 .match
= icmp6_match
,
1926 .matchsize
= sizeof(struct ip6t_icmp
),
1927 .checkentry
= icmp6_checkentry
,
1928 .proto
= IPPROTO_ICMPV6
,
1929 .family
= NFPROTO_IPV6
,
1933 static int __net_init
ip6_tables_net_init(struct net
*net
)
1935 return xt_proto_init(net
, NFPROTO_IPV6
);
1938 static void __net_exit
ip6_tables_net_exit(struct net
*net
)
1940 xt_proto_fini(net
, NFPROTO_IPV6
);
1943 static struct pernet_operations ip6_tables_net_ops
= {
1944 .init
= ip6_tables_net_init
,
1945 .exit
= ip6_tables_net_exit
,
1948 static int __init
ip6_tables_init(void)
1952 ret
= register_pernet_subsys(&ip6_tables_net_ops
);
1956 /* No one else will be downing sem now, so we won't sleep */
1957 ret
= xt_register_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
1960 ret
= xt_register_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
1964 /* Register setsockopt */
1965 ret
= nf_register_sockopt(&ip6t_sockopts
);
1969 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1973 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
1975 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
1977 unregister_pernet_subsys(&ip6_tables_net_ops
);
1982 static void __exit
ip6_tables_fini(void)
1984 nf_unregister_sockopt(&ip6t_sockopts
);
1986 xt_unregister_matches(ip6t_builtin_mt
, ARRAY_SIZE(ip6t_builtin_mt
));
1987 xt_unregister_targets(ip6t_builtin_tg
, ARRAY_SIZE(ip6t_builtin_tg
));
1988 unregister_pernet_subsys(&ip6_tables_net_ops
);
1991 EXPORT_SYMBOL(ip6t_register_table
);
1992 EXPORT_SYMBOL(ip6t_unregister_table
);
1993 EXPORT_SYMBOL(ip6t_do_table
);
1995 module_init(ip6_tables_init
);
1996 module_exit(ip6_tables_fini
);