5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <linux/audit.h>
31 /* needed for logical [in,out]-dev filtering */
32 #include "../br_private.h"
34 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35 "report to author: "format, ## args)
36 /* #define BUGPRINT(format, args...) */
39 * Each cpu has its own set of counters, so there is no need for write_lock in
41 * For reading or updating the counters, the user context needs to
45 /* The size of each set of counters is altered to get cache alignment */
46 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49 COUNTER_OFFSET(n) * cpu))
53 static DEFINE_MUTEX(ebt_mutex
);
56 static void ebt_standard_compat_from_user(void *dst
, const void *src
)
58 int v
= *(compat_int_t
*)src
;
61 v
+= xt_compat_calc_jump(NFPROTO_BRIDGE
, v
);
62 memcpy(dst
, &v
, sizeof(v
));
65 static int ebt_standard_compat_to_user(void __user
*dst
, const void *src
)
67 compat_int_t cv
= *(int *)src
;
70 cv
-= xt_compat_calc_jump(NFPROTO_BRIDGE
, cv
);
71 return copy_to_user(dst
, &cv
, sizeof(cv
)) ? -EFAULT
: 0;
76 static struct xt_target ebt_standard_target
= {
79 .family
= NFPROTO_BRIDGE
,
80 .targetsize
= sizeof(int),
82 .compatsize
= sizeof(compat_int_t
),
83 .compat_from_user
= ebt_standard_compat_from_user
,
84 .compat_to_user
= ebt_standard_compat_to_user
,
89 ebt_do_watcher(const struct ebt_entry_watcher
*w
, struct sk_buff
*skb
,
90 struct xt_action_param
*par
)
92 par
->target
= w
->u
.watcher
;
93 par
->targinfo
= w
->data
;
94 w
->u
.watcher
->target(skb
, par
);
95 /* watchers don't give a verdict */
100 ebt_do_match(struct ebt_entry_match
*m
, const struct sk_buff
*skb
,
101 struct xt_action_param
*par
)
103 par
->match
= m
->u
.match
;
104 par
->matchinfo
= m
->data
;
105 return m
->u
.match
->match(skb
, par
) ? EBT_MATCH
: EBT_NOMATCH
;
109 ebt_dev_check(const char *entry
, const struct net_device
*device
)
118 devname
= device
->name
;
119 /* 1 is the wildcard token */
120 while (entry
[i
] != '\0' && entry
[i
] != 1 && entry
[i
] == devname
[i
])
122 return devname
[i
] != entry
[i
] && entry
[i
] != 1;
125 #define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
126 /* process standard matches */
128 ebt_basic_match(const struct ebt_entry
*e
, const struct sk_buff
*skb
,
129 const struct net_device
*in
, const struct net_device
*out
)
131 const struct ethhdr
*h
= eth_hdr(skb
);
132 const struct net_bridge_port
*p
;
136 if (skb_vlan_tag_present(skb
))
137 ethproto
= htons(ETH_P_8021Q
);
139 ethproto
= h
->h_proto
;
141 if (e
->bitmask
& EBT_802_3
) {
142 if (FWINV2(ntohs(ethproto
) >= ETH_P_802_3_MIN
, EBT_IPROTO
))
144 } else if (!(e
->bitmask
& EBT_NOPROTO
) &&
145 FWINV2(e
->ethproto
!= ethproto
, EBT_IPROTO
))
148 if (FWINV2(ebt_dev_check(e
->in
, in
), EBT_IIN
))
150 if (FWINV2(ebt_dev_check(e
->out
, out
), EBT_IOUT
))
152 /* rcu_read_lock()ed by nf_hook_slow */
153 if (in
&& (p
= br_port_get_rcu(in
)) != NULL
&&
154 FWINV2(ebt_dev_check(e
->logical_in
, p
->br
->dev
), EBT_ILOGICALIN
))
156 if (out
&& (p
= br_port_get_rcu(out
)) != NULL
&&
157 FWINV2(ebt_dev_check(e
->logical_out
, p
->br
->dev
), EBT_ILOGICALOUT
))
160 if (e
->bitmask
& EBT_SOURCEMAC
) {
162 for (i
= 0; i
< 6; i
++)
163 verdict
|= (h
->h_source
[i
] ^ e
->sourcemac
[i
]) &
165 if (FWINV2(verdict
!= 0, EBT_ISOURCE
) )
168 if (e
->bitmask
& EBT_DESTMAC
) {
170 for (i
= 0; i
< 6; i
++)
171 verdict
|= (h
->h_dest
[i
] ^ e
->destmac
[i
]) &
173 if (FWINV2(verdict
!= 0, EBT_IDEST
) )
180 struct ebt_entry
*ebt_next_entry(const struct ebt_entry
*entry
)
182 return (void *)entry
+ entry
->next_offset
;
185 /* Do some firewalling */
186 unsigned int ebt_do_table (unsigned int hook
, struct sk_buff
*skb
,
187 const struct net_device
*in
, const struct net_device
*out
,
188 struct ebt_table
*table
)
191 struct ebt_entry
*point
;
192 struct ebt_counter
*counter_base
, *cb_base
;
193 const struct ebt_entry_target
*t
;
195 struct ebt_chainstack
*cs
;
196 struct ebt_entries
*chaininfo
;
198 const struct ebt_table_info
*private;
199 struct xt_action_param acpar
;
201 acpar
.family
= NFPROTO_BRIDGE
;
204 acpar
.hotdrop
= false;
205 acpar
.hooknum
= hook
;
207 read_lock_bh(&table
->lock
);
208 private = table
->private;
209 cb_base
= COUNTER_BASE(private->counters
, private->nentries
,
211 if (private->chainstack
)
212 cs
= private->chainstack
[smp_processor_id()];
215 chaininfo
= private->hook_entry
[hook
];
216 nentries
= private->hook_entry
[hook
]->nentries
;
217 point
= (struct ebt_entry
*)(private->hook_entry
[hook
]->data
);
218 counter_base
= cb_base
+ private->hook_entry
[hook
]->counter_offset
;
219 /* base for chain jumps */
220 base
= private->entries
;
222 while (i
< nentries
) {
223 if (ebt_basic_match(point
, skb
, in
, out
))
226 if (EBT_MATCH_ITERATE(point
, ebt_do_match
, skb
, &acpar
) != 0)
229 read_unlock_bh(&table
->lock
);
233 /* increase counter */
234 (*(counter_base
+ i
)).pcnt
++;
235 (*(counter_base
+ i
)).bcnt
+= skb
->len
;
237 /* these should only watch: not modify, nor tell us
238 what to do with the packet */
239 EBT_WATCHER_ITERATE(point
, ebt_do_watcher
, skb
, &acpar
);
241 t
= (struct ebt_entry_target
*)
242 (((char *)point
) + point
->target_offset
);
243 /* standard target */
244 if (!t
->u
.target
->target
)
245 verdict
= ((struct ebt_standard_target
*)t
)->verdict
;
247 acpar
.target
= t
->u
.target
;
248 acpar
.targinfo
= t
->data
;
249 verdict
= t
->u
.target
->target(skb
, &acpar
);
251 if (verdict
== EBT_ACCEPT
) {
252 read_unlock_bh(&table
->lock
);
255 if (verdict
== EBT_DROP
) {
256 read_unlock_bh(&table
->lock
);
259 if (verdict
== EBT_RETURN
) {
261 #ifdef CONFIG_NETFILTER_DEBUG
263 BUGPRINT("RETURN on base chain");
264 /* act like this is EBT_CONTINUE */
269 /* put all the local variables right */
271 chaininfo
= cs
[sp
].chaininfo
;
272 nentries
= chaininfo
->nentries
;
274 counter_base
= cb_base
+
275 chaininfo
->counter_offset
;
278 if (verdict
== EBT_CONTINUE
)
280 #ifdef CONFIG_NETFILTER_DEBUG
282 BUGPRINT("bogus standard verdict\n");
283 read_unlock_bh(&table
->lock
);
289 cs
[sp
].chaininfo
= chaininfo
;
290 cs
[sp
].e
= ebt_next_entry(point
);
292 chaininfo
= (struct ebt_entries
*) (base
+ verdict
);
293 #ifdef CONFIG_NETFILTER_DEBUG
294 if (chaininfo
->distinguisher
) {
295 BUGPRINT("jump to non-chain\n");
296 read_unlock_bh(&table
->lock
);
300 nentries
= chaininfo
->nentries
;
301 point
= (struct ebt_entry
*)chaininfo
->data
;
302 counter_base
= cb_base
+ chaininfo
->counter_offset
;
306 point
= ebt_next_entry(point
);
310 /* I actually like this :) */
311 if (chaininfo
->policy
== EBT_RETURN
)
313 if (chaininfo
->policy
== EBT_ACCEPT
) {
314 read_unlock_bh(&table
->lock
);
317 read_unlock_bh(&table
->lock
);
321 /* If it succeeds, returns element and locks mutex */
323 find_inlist_lock_noload(struct list_head
*head
, const char *name
, int *error
,
327 struct list_head list
;
328 char name
[EBT_FUNCTION_MAXNAMELEN
];
332 list_for_each_entry(e
, head
, list
) {
333 if (strcmp(e
->name
, name
) == 0)
342 find_inlist_lock(struct list_head
*head
, const char *name
, const char *prefix
,
343 int *error
, struct mutex
*mutex
)
345 return try_then_request_module(
346 find_inlist_lock_noload(head
, name
, error
, mutex
),
347 "%s%s", prefix
, name
);
350 static inline struct ebt_table
*
351 find_table_lock(struct net
*net
, const char *name
, int *error
,
354 return find_inlist_lock(&net
->xt
.tables
[NFPROTO_BRIDGE
], name
,
355 "ebtable_", error
, mutex
);
359 ebt_check_match(struct ebt_entry_match
*m
, struct xt_mtchk_param
*par
,
362 const struct ebt_entry
*e
= par
->entryinfo
;
363 struct xt_match
*match
;
364 size_t left
= ((char *)e
+ e
->watchers_offset
) - (char *)m
;
367 if (left
< sizeof(struct ebt_entry_match
) ||
368 left
- sizeof(struct ebt_entry_match
) < m
->match_size
)
371 match
= xt_request_find_match(NFPROTO_BRIDGE
, m
->u
.name
, 0);
373 return PTR_ERR(match
);
377 par
->matchinfo
= m
->data
;
378 ret
= xt_check_match(par
, m
->match_size
,
379 e
->ethproto
, e
->invflags
& EBT_IPROTO
);
381 module_put(match
->me
);
390 ebt_check_watcher(struct ebt_entry_watcher
*w
, struct xt_tgchk_param
*par
,
393 const struct ebt_entry
*e
= par
->entryinfo
;
394 struct xt_target
*watcher
;
395 size_t left
= ((char *)e
+ e
->target_offset
) - (char *)w
;
398 if (left
< sizeof(struct ebt_entry_watcher
) ||
399 left
- sizeof(struct ebt_entry_watcher
) < w
->watcher_size
)
402 watcher
= xt_request_find_target(NFPROTO_BRIDGE
, w
->u
.name
, 0);
404 return PTR_ERR(watcher
);
405 w
->u
.watcher
= watcher
;
407 par
->target
= watcher
;
408 par
->targinfo
= w
->data
;
409 ret
= xt_check_target(par
, w
->watcher_size
,
410 e
->ethproto
, e
->invflags
& EBT_IPROTO
);
412 module_put(watcher
->me
);
420 static int ebt_verify_pointers(const struct ebt_replace
*repl
,
421 struct ebt_table_info
*newinfo
)
423 unsigned int limit
= repl
->entries_size
;
424 unsigned int valid_hooks
= repl
->valid_hooks
;
425 unsigned int offset
= 0;
428 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++)
429 newinfo
->hook_entry
[i
] = NULL
;
431 newinfo
->entries_size
= repl
->entries_size
;
432 newinfo
->nentries
= repl
->nentries
;
434 while (offset
< limit
) {
435 size_t left
= limit
- offset
;
436 struct ebt_entry
*e
= (void *)newinfo
->entries
+ offset
;
438 if (left
< sizeof(unsigned int))
441 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
442 if ((valid_hooks
& (1 << i
)) == 0)
444 if ((char __user
*)repl
->hook_entry
[i
] ==
445 repl
->entries
+ offset
)
449 if (i
!= NF_BR_NUMHOOKS
|| !(e
->bitmask
& EBT_ENTRY_OR_ENTRIES
)) {
450 if (e
->bitmask
!= 0) {
451 /* we make userspace set this right,
452 so there is no misunderstanding */
453 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
454 "in distinguisher\n");
457 if (i
!= NF_BR_NUMHOOKS
)
458 newinfo
->hook_entry
[i
] = (struct ebt_entries
*)e
;
459 if (left
< sizeof(struct ebt_entries
))
461 offset
+= sizeof(struct ebt_entries
);
463 if (left
< sizeof(struct ebt_entry
))
465 if (left
< e
->next_offset
)
467 if (e
->next_offset
< sizeof(struct ebt_entry
))
469 offset
+= e
->next_offset
;
472 if (offset
!= limit
) {
473 BUGPRINT("entries_size too small\n");
477 /* check if all valid hooks have a chain */
478 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
479 if (!newinfo
->hook_entry
[i
] &&
480 (valid_hooks
& (1 << i
))) {
481 BUGPRINT("Valid hook without chain\n");
489 * this one is very careful, as it is the first function
490 * to parse the userspace data
493 ebt_check_entry_size_and_hooks(const struct ebt_entry
*e
,
494 const struct ebt_table_info
*newinfo
,
495 unsigned int *n
, unsigned int *cnt
,
496 unsigned int *totalcnt
, unsigned int *udc_cnt
)
500 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
501 if ((void *)e
== (void *)newinfo
->hook_entry
[i
])
504 /* beginning of a new chain
505 if i == NF_BR_NUMHOOKS it must be a user defined chain */
506 if (i
!= NF_BR_NUMHOOKS
|| !e
->bitmask
) {
507 /* this checks if the previous chain has as many entries
510 BUGPRINT("nentries does not equal the nr of entries "
514 if (((struct ebt_entries
*)e
)->policy
!= EBT_DROP
&&
515 ((struct ebt_entries
*)e
)->policy
!= EBT_ACCEPT
) {
516 /* only RETURN from udc */
517 if (i
!= NF_BR_NUMHOOKS
||
518 ((struct ebt_entries
*)e
)->policy
!= EBT_RETURN
) {
519 BUGPRINT("bad policy\n");
523 if (i
== NF_BR_NUMHOOKS
) /* it's a user defined chain */
525 if (((struct ebt_entries
*)e
)->counter_offset
!= *totalcnt
) {
526 BUGPRINT("counter_offset != totalcnt");
529 *n
= ((struct ebt_entries
*)e
)->nentries
;
533 /* a plain old entry, heh */
534 if (sizeof(struct ebt_entry
) > e
->watchers_offset
||
535 e
->watchers_offset
> e
->target_offset
||
536 e
->target_offset
>= e
->next_offset
) {
537 BUGPRINT("entry offsets not in right order\n");
540 /* this is not checked anywhere else */
541 if (e
->next_offset
- e
->target_offset
< sizeof(struct ebt_entry_target
)) {
542 BUGPRINT("target size too small\n");
552 struct ebt_chainstack cs
;
554 unsigned int hookmask
;
558 * we need these positions to check that the jumps to a different part of the
559 * entries is a jump to the beginning of a new chain.
562 ebt_get_udc_positions(struct ebt_entry
*e
, struct ebt_table_info
*newinfo
,
563 unsigned int *n
, struct ebt_cl_stack
*udc
)
567 /* we're only interested in chain starts */
570 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
571 if (newinfo
->hook_entry
[i
] == (struct ebt_entries
*)e
)
574 /* only care about udc */
575 if (i
!= NF_BR_NUMHOOKS
)
578 udc
[*n
].cs
.chaininfo
= (struct ebt_entries
*)e
;
579 /* these initialisations are depended on later in check_chainloops() */
581 udc
[*n
].hookmask
= 0;
588 ebt_cleanup_match(struct ebt_entry_match
*m
, struct net
*net
, unsigned int *i
)
590 struct xt_mtdtor_param par
;
592 if (i
&& (*i
)-- == 0)
596 par
.match
= m
->u
.match
;
597 par
.matchinfo
= m
->data
;
598 par
.family
= NFPROTO_BRIDGE
;
599 if (par
.match
->destroy
!= NULL
)
600 par
.match
->destroy(&par
);
601 module_put(par
.match
->me
);
606 ebt_cleanup_watcher(struct ebt_entry_watcher
*w
, struct net
*net
, unsigned int *i
)
608 struct xt_tgdtor_param par
;
610 if (i
&& (*i
)-- == 0)
614 par
.target
= w
->u
.watcher
;
615 par
.targinfo
= w
->data
;
616 par
.family
= NFPROTO_BRIDGE
;
617 if (par
.target
->destroy
!= NULL
)
618 par
.target
->destroy(&par
);
619 module_put(par
.target
->me
);
624 ebt_cleanup_entry(struct ebt_entry
*e
, struct net
*net
, unsigned int *cnt
)
626 struct xt_tgdtor_param par
;
627 struct ebt_entry_target
*t
;
632 if (cnt
&& (*cnt
)-- == 0)
634 EBT_WATCHER_ITERATE(e
, ebt_cleanup_watcher
, net
, NULL
);
635 EBT_MATCH_ITERATE(e
, ebt_cleanup_match
, net
, NULL
);
636 t
= (struct ebt_entry_target
*)(((char *)e
) + e
->target_offset
);
639 par
.target
= t
->u
.target
;
640 par
.targinfo
= t
->data
;
641 par
.family
= NFPROTO_BRIDGE
;
642 if (par
.target
->destroy
!= NULL
)
643 par
.target
->destroy(&par
);
644 module_put(par
.target
->me
);
649 ebt_check_entry(struct ebt_entry
*e
, struct net
*net
,
650 const struct ebt_table_info
*newinfo
,
651 const char *name
, unsigned int *cnt
,
652 struct ebt_cl_stack
*cl_s
, unsigned int udc_cnt
)
654 struct ebt_entry_target
*t
;
655 struct xt_target
*target
;
656 unsigned int i
, j
, hook
= 0, hookmask
= 0;
659 struct xt_mtchk_param mtpar
;
660 struct xt_tgchk_param tgpar
;
662 /* don't mess with the struct ebt_entries */
666 if (e
->bitmask
& ~EBT_F_MASK
) {
667 BUGPRINT("Unknown flag for bitmask\n");
670 if (e
->invflags
& ~EBT_INV_MASK
) {
671 BUGPRINT("Unknown flag for inv bitmask\n");
674 if ( (e
->bitmask
& EBT_NOPROTO
) && (e
->bitmask
& EBT_802_3
) ) {
675 BUGPRINT("NOPROTO & 802_3 not allowed\n");
678 /* what hook do we belong to? */
679 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
680 if (!newinfo
->hook_entry
[i
])
682 if ((char *)newinfo
->hook_entry
[i
] < (char *)e
)
687 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
689 if (i
< NF_BR_NUMHOOKS
)
690 hookmask
= (1 << hook
) | (1 << NF_BR_NUMHOOKS
);
692 for (i
= 0; i
< udc_cnt
; i
++)
693 if ((char *)(cl_s
[i
].cs
.chaininfo
) > (char *)e
)
696 hookmask
= (1 << hook
) | (1 << NF_BR_NUMHOOKS
);
698 hookmask
= cl_s
[i
- 1].hookmask
;
702 mtpar
.net
= tgpar
.net
= net
;
703 mtpar
.table
= tgpar
.table
= name
;
704 mtpar
.entryinfo
= tgpar
.entryinfo
= e
;
705 mtpar
.hook_mask
= tgpar
.hook_mask
= hookmask
;
706 mtpar
.family
= tgpar
.family
= NFPROTO_BRIDGE
;
707 ret
= EBT_MATCH_ITERATE(e
, ebt_check_match
, &mtpar
, &i
);
709 goto cleanup_matches
;
711 ret
= EBT_WATCHER_ITERATE(e
, ebt_check_watcher
, &tgpar
, &j
);
713 goto cleanup_watchers
;
714 t
= (struct ebt_entry_target
*)(((char *)e
) + e
->target_offset
);
715 gap
= e
->next_offset
- e
->target_offset
;
717 target
= xt_request_find_target(NFPROTO_BRIDGE
, t
->u
.name
, 0);
718 if (IS_ERR(target
)) {
719 ret
= PTR_ERR(target
);
720 goto cleanup_watchers
;
723 t
->u
.target
= target
;
724 if (t
->u
.target
== &ebt_standard_target
) {
725 if (gap
< sizeof(struct ebt_standard_target
)) {
726 BUGPRINT("Standard target size too big\n");
728 goto cleanup_watchers
;
730 if (((struct ebt_standard_target
*)t
)->verdict
<
731 -NUM_STANDARD_TARGETS
) {
732 BUGPRINT("Invalid standard target\n");
734 goto cleanup_watchers
;
736 } else if (t
->target_size
> gap
- sizeof(struct ebt_entry_target
)) {
737 module_put(t
->u
.target
->me
);
739 goto cleanup_watchers
;
742 tgpar
.target
= target
;
743 tgpar
.targinfo
= t
->data
;
744 ret
= xt_check_target(&tgpar
, t
->target_size
,
745 e
->ethproto
, e
->invflags
& EBT_IPROTO
);
747 module_put(target
->me
);
748 goto cleanup_watchers
;
753 EBT_WATCHER_ITERATE(e
, ebt_cleanup_watcher
, net
, &j
);
755 EBT_MATCH_ITERATE(e
, ebt_cleanup_match
, net
, &i
);
760 * checks for loops and sets the hook mask for udc
761 * the hook mask for udc tells us from which base chains the udc can be
762 * accessed. This mask is a parameter to the check() functions of the extensions
764 static int check_chainloops(const struct ebt_entries
*chain
, struct ebt_cl_stack
*cl_s
,
765 unsigned int udc_cnt
, unsigned int hooknr
, char *base
)
767 int i
, chain_nr
= -1, pos
= 0, nentries
= chain
->nentries
, verdict
;
768 const struct ebt_entry
*e
= (struct ebt_entry
*)chain
->data
;
769 const struct ebt_entry_target
*t
;
771 while (pos
< nentries
|| chain_nr
!= -1) {
772 /* end of udc, go back one 'recursion' step */
773 if (pos
== nentries
) {
774 /* put back values of the time when this chain was called */
775 e
= cl_s
[chain_nr
].cs
.e
;
776 if (cl_s
[chain_nr
].from
!= -1)
778 cl_s
[cl_s
[chain_nr
].from
].cs
.chaininfo
->nentries
;
780 nentries
= chain
->nentries
;
781 pos
= cl_s
[chain_nr
].cs
.n
;
782 /* make sure we won't see a loop that isn't one */
783 cl_s
[chain_nr
].cs
.n
= 0;
784 chain_nr
= cl_s
[chain_nr
].from
;
788 t
= (struct ebt_entry_target
*)
789 (((char *)e
) + e
->target_offset
);
790 if (strcmp(t
->u
.name
, EBT_STANDARD_TARGET
))
792 if (e
->target_offset
+ sizeof(struct ebt_standard_target
) >
794 BUGPRINT("Standard target size too big\n");
797 verdict
= ((struct ebt_standard_target
*)t
)->verdict
;
798 if (verdict
>= 0) { /* jump to another chain */
799 struct ebt_entries
*hlp2
=
800 (struct ebt_entries
*)(base
+ verdict
);
801 for (i
= 0; i
< udc_cnt
; i
++)
802 if (hlp2
== cl_s
[i
].cs
.chaininfo
)
804 /* bad destination or loop */
806 BUGPRINT("bad destination\n");
813 if (cl_s
[i
].hookmask
& (1 << hooknr
))
815 /* this can't be 0, so the loop test is correct */
816 cl_s
[i
].cs
.n
= pos
+ 1;
818 cl_s
[i
].cs
.e
= ebt_next_entry(e
);
819 e
= (struct ebt_entry
*)(hlp2
->data
);
820 nentries
= hlp2
->nentries
;
821 cl_s
[i
].from
= chain_nr
;
823 /* this udc is accessible from the base chain for hooknr */
824 cl_s
[i
].hookmask
|= (1 << hooknr
);
828 e
= ebt_next_entry(e
);
834 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
835 static int translate_table(struct net
*net
, const char *name
,
836 struct ebt_table_info
*newinfo
)
838 unsigned int i
, j
, k
, udc_cnt
;
840 struct ebt_cl_stack
*cl_s
= NULL
; /* used in the checking for chain loops */
843 while (i
< NF_BR_NUMHOOKS
&& !newinfo
->hook_entry
[i
])
845 if (i
== NF_BR_NUMHOOKS
) {
846 BUGPRINT("No valid hooks specified\n");
849 if (newinfo
->hook_entry
[i
] != (struct ebt_entries
*)newinfo
->entries
) {
850 BUGPRINT("Chains don't start at beginning\n");
853 /* make sure chains are ordered after each other in same order
854 as their corresponding hooks */
855 for (j
= i
+ 1; j
< NF_BR_NUMHOOKS
; j
++) {
856 if (!newinfo
->hook_entry
[j
])
858 if (newinfo
->hook_entry
[j
] <= newinfo
->hook_entry
[i
]) {
859 BUGPRINT("Hook order must be followed\n");
865 /* do some early checkings and initialize some things */
866 i
= 0; /* holds the expected nr. of entries for the chain */
867 j
= 0; /* holds the up to now counted entries for the chain */
868 k
= 0; /* holds the total nr. of entries, should equal
869 newinfo->nentries afterwards */
870 udc_cnt
= 0; /* will hold the nr. of user defined chains (udc) */
871 ret
= EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
872 ebt_check_entry_size_and_hooks
, newinfo
,
873 &i
, &j
, &k
, &udc_cnt
);
879 BUGPRINT("nentries does not equal the nr of entries in the "
883 if (k
!= newinfo
->nentries
) {
884 BUGPRINT("Total nentries is wrong\n");
888 /* get the location of the udc, put them in an array
889 while we're at it, allocate the chainstack */
891 /* this will get free'd in do_replace()/ebt_register_table()
892 if an error occurs */
893 newinfo
->chainstack
=
894 vmalloc(nr_cpu_ids
* sizeof(*(newinfo
->chainstack
)));
895 if (!newinfo
->chainstack
)
897 for_each_possible_cpu(i
) {
898 newinfo
->chainstack
[i
] =
899 vmalloc(udc_cnt
* sizeof(*(newinfo
->chainstack
[0])));
900 if (!newinfo
->chainstack
[i
]) {
902 vfree(newinfo
->chainstack
[--i
]);
903 vfree(newinfo
->chainstack
);
904 newinfo
->chainstack
= NULL
;
909 cl_s
= vmalloc(udc_cnt
* sizeof(*cl_s
));
912 i
= 0; /* the i'th udc */
913 EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
914 ebt_get_udc_positions
, newinfo
, &i
, cl_s
);
917 BUGPRINT("i != udc_cnt\n");
923 /* Check for loops */
924 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++)
925 if (newinfo
->hook_entry
[i
])
926 if (check_chainloops(newinfo
->hook_entry
[i
],
927 cl_s
, udc_cnt
, i
, newinfo
->entries
)) {
932 /* we now know the following (along with E=mc²):
933 - the nr of entries in each chain is right
934 - the size of the allocated space is right
935 - all valid hooks have a corresponding chain
937 - wrong data can still be on the level of a single entry
938 - could be there are jumps to places that are not the
939 beginning of a chain. This can only occur in chains that
940 are not accessible from any base chains, so we don't care. */
942 /* used to know what we need to clean up if something goes wrong */
944 ret
= EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
945 ebt_check_entry
, net
, newinfo
, name
, &i
, cl_s
, udc_cnt
);
947 EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
948 ebt_cleanup_entry
, net
, &i
);
954 /* called under write_lock */
955 static void get_counters(const struct ebt_counter
*oldcounters
,
956 struct ebt_counter
*counters
, unsigned int nentries
)
959 struct ebt_counter
*counter_base
;
961 /* counters of cpu 0 */
962 memcpy(counters
, oldcounters
,
963 sizeof(struct ebt_counter
) * nentries
);
965 /* add other counters to those of cpu 0 */
966 for_each_possible_cpu(cpu
) {
969 counter_base
= COUNTER_BASE(oldcounters
, nentries
, cpu
);
970 for (i
= 0; i
< nentries
; i
++) {
971 counters
[i
].pcnt
+= counter_base
[i
].pcnt
;
972 counters
[i
].bcnt
+= counter_base
[i
].bcnt
;
977 static int do_replace_finish(struct net
*net
, struct ebt_replace
*repl
,
978 struct ebt_table_info
*newinfo
)
981 struct ebt_counter
*counterstmp
= NULL
;
982 /* used to be able to unlock earlier */
983 struct ebt_table_info
*table
;
986 /* the user wants counters back
987 the check on the size is done later, when we have the lock */
988 if (repl
->num_counters
) {
989 unsigned long size
= repl
->num_counters
* sizeof(*counterstmp
);
990 counterstmp
= vmalloc(size
);
995 newinfo
->chainstack
= NULL
;
996 ret
= ebt_verify_pointers(repl
, newinfo
);
998 goto free_counterstmp
;
1000 ret
= translate_table(net
, repl
->name
, newinfo
);
1003 goto free_counterstmp
;
1005 t
= find_table_lock(net
, repl
->name
, &ret
, &ebt_mutex
);
1011 /* the table doesn't like it */
1012 if (t
->check
&& (ret
= t
->check(newinfo
, repl
->valid_hooks
)))
1015 if (repl
->num_counters
&& repl
->num_counters
!= t
->private->nentries
) {
1016 BUGPRINT("Wrong nr. of counters requested\n");
1021 /* we have the mutex lock, so no danger in reading this pointer */
1023 /* make sure the table can only be rmmod'ed if it contains no rules */
1024 if (!table
->nentries
&& newinfo
->nentries
&& !try_module_get(t
->me
)) {
1027 } else if (table
->nentries
&& !newinfo
->nentries
)
1029 /* we need an atomic snapshot of the counters */
1030 write_lock_bh(&t
->lock
);
1031 if (repl
->num_counters
)
1032 get_counters(t
->private->counters
, counterstmp
,
1033 t
->private->nentries
);
1035 t
->private = newinfo
;
1036 write_unlock_bh(&t
->lock
);
1037 mutex_unlock(&ebt_mutex
);
1038 /* so, a user can change the chains while having messed up her counter
1039 allocation. Only reason why this is done is because this way the lock
1040 is held only once, while this doesn't bring the kernel into a
1042 if (repl
->num_counters
&&
1043 copy_to_user(repl
->counters
, counterstmp
,
1044 repl
->num_counters
* sizeof(struct ebt_counter
))) {
1045 /* Silent error, can't fail, new table is already in place */
1046 net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1049 /* decrease module count and free resources */
1050 EBT_ENTRY_ITERATE(table
->entries
, table
->entries_size
,
1051 ebt_cleanup_entry
, net
, NULL
);
1053 vfree(table
->entries
);
1054 if (table
->chainstack
) {
1055 for_each_possible_cpu(i
)
1056 vfree(table
->chainstack
[i
]);
1057 vfree(table
->chainstack
);
1064 if (audit_enabled
) {
1065 struct audit_buffer
*ab
;
1067 ab
= audit_log_start(current
->audit_context
, GFP_KERNEL
,
1068 AUDIT_NETFILTER_CFG
);
1070 audit_log_format(ab
, "table=%s family=%u entries=%u",
1071 repl
->name
, AF_BRIDGE
, repl
->nentries
);
1079 mutex_unlock(&ebt_mutex
);
1081 EBT_ENTRY_ITERATE(newinfo
->entries
, newinfo
->entries_size
,
1082 ebt_cleanup_entry
, net
, NULL
);
1085 /* can be initialized in translate_table() */
1086 if (newinfo
->chainstack
) {
1087 for_each_possible_cpu(i
)
1088 vfree(newinfo
->chainstack
[i
]);
1089 vfree(newinfo
->chainstack
);
1094 /* replace the table */
1095 static int do_replace(struct net
*net
, const void __user
*user
,
1098 int ret
, countersize
;
1099 struct ebt_table_info
*newinfo
;
1100 struct ebt_replace tmp
;
1102 if (copy_from_user(&tmp
, user
, sizeof(tmp
)) != 0)
1105 if (len
!= sizeof(tmp
) + tmp
.entries_size
) {
1106 BUGPRINT("Wrong len argument\n");
1110 if (tmp
.entries_size
== 0) {
1111 BUGPRINT("Entries_size never zero\n");
1114 /* overflow check */
1115 if (tmp
.nentries
>= ((INT_MAX
- sizeof(struct ebt_table_info
)) /
1116 NR_CPUS
- SMP_CACHE_BYTES
) / sizeof(struct ebt_counter
))
1118 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct ebt_counter
))
1120 if (tmp
.num_counters
== 0)
1123 tmp
.name
[sizeof(tmp
.name
) - 1] = 0;
1125 countersize
= COUNTER_OFFSET(tmp
.nentries
) * nr_cpu_ids
;
1126 newinfo
= vmalloc(sizeof(*newinfo
) + countersize
);
1131 memset(newinfo
->counters
, 0, countersize
);
1133 newinfo
->entries
= vmalloc(tmp
.entries_size
);
1134 if (!newinfo
->entries
) {
1139 newinfo
->entries
, tmp
.entries
, tmp
.entries_size
) != 0) {
1140 BUGPRINT("Couldn't copy entries from userspace\n");
1145 ret
= do_replace_finish(net
, &tmp
, newinfo
);
1149 vfree(newinfo
->entries
);
1156 ebt_register_table(struct net
*net
, const struct ebt_table
*input_table
)
1158 struct ebt_table_info
*newinfo
;
1159 struct ebt_table
*t
, *table
;
1160 struct ebt_replace_kernel
*repl
;
1161 int ret
, i
, countersize
;
1164 if (input_table
== NULL
|| (repl
= input_table
->table
) == NULL
||
1165 repl
->entries
== NULL
|| repl
->entries_size
== 0 ||
1166 repl
->counters
!= NULL
|| input_table
->private != NULL
) {
1167 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1168 return ERR_PTR(-EINVAL
);
1171 /* Don't add one table to multiple lists. */
1172 table
= kmemdup(input_table
, sizeof(struct ebt_table
), GFP_KERNEL
);
1178 countersize
= COUNTER_OFFSET(repl
->nentries
) * nr_cpu_ids
;
1179 newinfo
= vmalloc(sizeof(*newinfo
) + countersize
);
1184 p
= vmalloc(repl
->entries_size
);
1188 memcpy(p
, repl
->entries
, repl
->entries_size
);
1189 newinfo
->entries
= p
;
1191 newinfo
->entries_size
= repl
->entries_size
;
1192 newinfo
->nentries
= repl
->nentries
;
1195 memset(newinfo
->counters
, 0, countersize
);
1197 /* fill in newinfo and parse the entries */
1198 newinfo
->chainstack
= NULL
;
1199 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
1200 if ((repl
->valid_hooks
& (1 << i
)) == 0)
1201 newinfo
->hook_entry
[i
] = NULL
;
1203 newinfo
->hook_entry
[i
] = p
+
1204 ((char *)repl
->hook_entry
[i
] - repl
->entries
);
1206 ret
= translate_table(net
, repl
->name
, newinfo
);
1208 BUGPRINT("Translate_table failed\n");
1209 goto free_chainstack
;
1212 if (table
->check
&& table
->check(newinfo
, table
->valid_hooks
)) {
1213 BUGPRINT("The table doesn't like its own initial data, lol\n");
1215 goto free_chainstack
;
1218 table
->private = newinfo
;
1219 rwlock_init(&table
->lock
);
1220 mutex_lock(&ebt_mutex
);
1221 list_for_each_entry(t
, &net
->xt
.tables
[NFPROTO_BRIDGE
], list
) {
1222 if (strcmp(t
->name
, table
->name
) == 0) {
1224 BUGPRINT("Table name already exists\n");
1229 /* Hold a reference count if the chains aren't empty */
1230 if (newinfo
->nentries
&& !try_module_get(table
->me
)) {
1234 list_add(&table
->list
, &net
->xt
.tables
[NFPROTO_BRIDGE
]);
1235 mutex_unlock(&ebt_mutex
);
1238 mutex_unlock(&ebt_mutex
);
1240 if (newinfo
->chainstack
) {
1241 for_each_possible_cpu(i
)
1242 vfree(newinfo
->chainstack
[i
]);
1243 vfree(newinfo
->chainstack
);
1245 vfree(newinfo
->entries
);
1251 return ERR_PTR(ret
);
1254 void ebt_unregister_table(struct net
*net
, struct ebt_table
*table
)
1259 BUGPRINT("Request to unregister NULL table!!!\n");
1262 mutex_lock(&ebt_mutex
);
1263 list_del(&table
->list
);
1264 mutex_unlock(&ebt_mutex
);
1265 EBT_ENTRY_ITERATE(table
->private->entries
, table
->private->entries_size
,
1266 ebt_cleanup_entry
, net
, NULL
);
1267 if (table
->private->nentries
)
1268 module_put(table
->me
);
1269 vfree(table
->private->entries
);
1270 if (table
->private->chainstack
) {
1271 for_each_possible_cpu(i
)
1272 vfree(table
->private->chainstack
[i
]);
1273 vfree(table
->private->chainstack
);
1275 vfree(table
->private);
1279 /* userspace just supplied us with counters */
1280 static int do_update_counters(struct net
*net
, const char *name
,
1281 struct ebt_counter __user
*counters
,
1282 unsigned int num_counters
,
1283 const void __user
*user
, unsigned int len
)
1286 struct ebt_counter
*tmp
;
1287 struct ebt_table
*t
;
1289 if (num_counters
== 0)
1292 tmp
= vmalloc(num_counters
* sizeof(*tmp
));
1296 t
= find_table_lock(net
, name
, &ret
, &ebt_mutex
);
1300 if (num_counters
!= t
->private->nentries
) {
1301 BUGPRINT("Wrong nr of counters\n");
1306 if (copy_from_user(tmp
, counters
, num_counters
* sizeof(*counters
))) {
1311 /* we want an atomic add of the counters */
1312 write_lock_bh(&t
->lock
);
1314 /* we add to the counters of the first cpu */
1315 for (i
= 0; i
< num_counters
; i
++) {
1316 t
->private->counters
[i
].pcnt
+= tmp
[i
].pcnt
;
1317 t
->private->counters
[i
].bcnt
+= tmp
[i
].bcnt
;
1320 write_unlock_bh(&t
->lock
);
1323 mutex_unlock(&ebt_mutex
);
1329 static int update_counters(struct net
*net
, const void __user
*user
,
1332 struct ebt_replace hlp
;
1334 if (copy_from_user(&hlp
, user
, sizeof(hlp
)))
1337 if (len
!= sizeof(hlp
) + hlp
.num_counters
* sizeof(struct ebt_counter
))
1340 return do_update_counters(net
, hlp
.name
, hlp
.counters
,
1341 hlp
.num_counters
, user
, len
);
1344 static inline int ebt_make_matchname(const struct ebt_entry_match
*m
,
1345 const char *base
, char __user
*ubase
)
1347 char __user
*hlp
= ubase
+ ((char *)m
- base
);
1348 char name
[EBT_FUNCTION_MAXNAMELEN
] = {};
1350 /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1351 long. Copy 29 bytes and fill remaining bytes with zeroes. */
1352 strlcpy(name
, m
->u
.match
->name
, sizeof(name
));
1353 if (copy_to_user(hlp
, name
, EBT_FUNCTION_MAXNAMELEN
))
1358 static inline int ebt_make_watchername(const struct ebt_entry_watcher
*w
,
1359 const char *base
, char __user
*ubase
)
1361 char __user
*hlp
= ubase
+ ((char *)w
- base
);
1362 char name
[EBT_FUNCTION_MAXNAMELEN
] = {};
1364 strlcpy(name
, w
->u
.watcher
->name
, sizeof(name
));
1365 if (copy_to_user(hlp
, name
, EBT_FUNCTION_MAXNAMELEN
))
1371 ebt_make_names(struct ebt_entry
*e
, const char *base
, char __user
*ubase
)
1375 const struct ebt_entry_target
*t
;
1376 char name
[EBT_FUNCTION_MAXNAMELEN
] = {};
1378 if (e
->bitmask
== 0)
1381 hlp
= ubase
+ (((char *)e
+ e
->target_offset
) - base
);
1382 t
= (struct ebt_entry_target
*)(((char *)e
) + e
->target_offset
);
1384 ret
= EBT_MATCH_ITERATE(e
, ebt_make_matchname
, base
, ubase
);
1387 ret
= EBT_WATCHER_ITERATE(e
, ebt_make_watchername
, base
, ubase
);
1390 strlcpy(name
, t
->u
.target
->name
, sizeof(name
));
1391 if (copy_to_user(hlp
, name
, EBT_FUNCTION_MAXNAMELEN
))
1396 static int copy_counters_to_user(struct ebt_table
*t
,
1397 const struct ebt_counter
*oldcounters
,
1398 void __user
*user
, unsigned int num_counters
,
1399 unsigned int nentries
)
1401 struct ebt_counter
*counterstmp
;
1404 /* userspace might not need the counters */
1405 if (num_counters
== 0)
1408 if (num_counters
!= nentries
) {
1409 BUGPRINT("Num_counters wrong\n");
1413 counterstmp
= vmalloc(nentries
* sizeof(*counterstmp
));
1417 write_lock_bh(&t
->lock
);
1418 get_counters(oldcounters
, counterstmp
, nentries
);
1419 write_unlock_bh(&t
->lock
);
1421 if (copy_to_user(user
, counterstmp
,
1422 nentries
* sizeof(struct ebt_counter
)))
1428 /* called with ebt_mutex locked */
1429 static int copy_everything_to_user(struct ebt_table
*t
, void __user
*user
,
1430 const int *len
, int cmd
)
1432 struct ebt_replace tmp
;
1433 const struct ebt_counter
*oldcounters
;
1434 unsigned int entries_size
, nentries
;
1438 if (cmd
== EBT_SO_GET_ENTRIES
) {
1439 entries_size
= t
->private->entries_size
;
1440 nentries
= t
->private->nentries
;
1441 entries
= t
->private->entries
;
1442 oldcounters
= t
->private->counters
;
1444 entries_size
= t
->table
->entries_size
;
1445 nentries
= t
->table
->nentries
;
1446 entries
= t
->table
->entries
;
1447 oldcounters
= t
->table
->counters
;
1450 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
1453 if (*len
!= sizeof(struct ebt_replace
) + entries_size
+
1454 (tmp
.num_counters
? nentries
* sizeof(struct ebt_counter
) : 0))
1457 if (tmp
.nentries
!= nentries
) {
1458 BUGPRINT("Nentries wrong\n");
1462 if (tmp
.entries_size
!= entries_size
) {
1463 BUGPRINT("Wrong size\n");
1467 ret
= copy_counters_to_user(t
, oldcounters
, tmp
.counters
,
1468 tmp
.num_counters
, nentries
);
1472 if (copy_to_user(tmp
.entries
, entries
, entries_size
)) {
1473 BUGPRINT("Couldn't copy entries to userspace\n");
1476 /* set the match/watcher/target names right */
1477 return EBT_ENTRY_ITERATE(entries
, entries_size
,
1478 ebt_make_names
, entries
, tmp
.entries
);
1481 static int do_ebt_set_ctl(struct sock
*sk
,
1482 int cmd
, void __user
*user
, unsigned int len
)
1485 struct net
*net
= sock_net(sk
);
1487 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1491 case EBT_SO_SET_ENTRIES
:
1492 ret
= do_replace(net
, user
, len
);
1494 case EBT_SO_SET_COUNTERS
:
1495 ret
= update_counters(net
, user
, len
);
1503 static int do_ebt_get_ctl(struct sock
*sk
, int cmd
, void __user
*user
, int *len
)
1506 struct ebt_replace tmp
;
1507 struct ebt_table
*t
;
1508 struct net
*net
= sock_net(sk
);
1510 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1513 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
1516 t
= find_table_lock(net
, tmp
.name
, &ret
, &ebt_mutex
);
1521 case EBT_SO_GET_INFO
:
1522 case EBT_SO_GET_INIT_INFO
:
1523 if (*len
!= sizeof(struct ebt_replace
)) {
1525 mutex_unlock(&ebt_mutex
);
1528 if (cmd
== EBT_SO_GET_INFO
) {
1529 tmp
.nentries
= t
->private->nentries
;
1530 tmp
.entries_size
= t
->private->entries_size
;
1531 tmp
.valid_hooks
= t
->valid_hooks
;
1533 tmp
.nentries
= t
->table
->nentries
;
1534 tmp
.entries_size
= t
->table
->entries_size
;
1535 tmp
.valid_hooks
= t
->table
->valid_hooks
;
1537 mutex_unlock(&ebt_mutex
);
1538 if (copy_to_user(user
, &tmp
, *len
) != 0) {
1539 BUGPRINT("c2u Didn't work\n");
1546 case EBT_SO_GET_ENTRIES
:
1547 case EBT_SO_GET_INIT_ENTRIES
:
1548 ret
= copy_everything_to_user(t
, user
, len
, cmd
);
1549 mutex_unlock(&ebt_mutex
);
1553 mutex_unlock(&ebt_mutex
);
1560 #ifdef CONFIG_COMPAT
1561 /* 32 bit-userspace compatibility definitions. */
1562 struct compat_ebt_replace
{
1563 char name
[EBT_TABLE_MAXNAMELEN
];
1564 compat_uint_t valid_hooks
;
1565 compat_uint_t nentries
;
1566 compat_uint_t entries_size
;
1567 /* start of the chains */
1568 compat_uptr_t hook_entry
[NF_BR_NUMHOOKS
];
1569 /* nr of counters userspace expects back */
1570 compat_uint_t num_counters
;
1571 /* where the kernel will put the old counters. */
1572 compat_uptr_t counters
;
1573 compat_uptr_t entries
;
1576 /* struct ebt_entry_match, _target and _watcher have same layout */
1577 struct compat_ebt_entry_mwt
{
1579 char name
[EBT_FUNCTION_MAXNAMELEN
];
1582 compat_uint_t match_size
;
1583 compat_uint_t data
[0];
1586 /* account for possible padding between match_size and ->data */
1587 static int ebt_compat_entry_padsize(void)
1589 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match
)) <
1590 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt
)));
1591 return (int) XT_ALIGN(sizeof(struct ebt_entry_match
)) -
1592 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt
));
1595 static int ebt_compat_match_offset(const struct xt_match
*match
,
1596 unsigned int userlen
)
1599 * ebt_among needs special handling. The kernel .matchsize is
1600 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1601 * value is expected.
1602 * Example: userspace sends 4500, ebt_among.c wants 4504.
1604 if (unlikely(match
->matchsize
== -1))
1605 return XT_ALIGN(userlen
) - COMPAT_XT_ALIGN(userlen
);
1606 return xt_compat_match_offset(match
);
1609 static int compat_match_to_user(struct ebt_entry_match
*m
, void __user
**dstptr
,
1612 const struct xt_match
*match
= m
->u
.match
;
1613 struct compat_ebt_entry_mwt __user
*cm
= *dstptr
;
1614 int off
= ebt_compat_match_offset(match
, m
->match_size
);
1615 compat_uint_t msize
= m
->match_size
- off
;
1617 BUG_ON(off
>= m
->match_size
);
1619 if (copy_to_user(cm
->u
.name
, match
->name
,
1620 strlen(match
->name
) + 1) || put_user(msize
, &cm
->match_size
))
1623 if (match
->compat_to_user
) {
1624 if (match
->compat_to_user(cm
->data
, m
->data
))
1626 } else if (copy_to_user(cm
->data
, m
->data
, msize
))
1629 *size
-= ebt_compat_entry_padsize() + off
;
1635 static int compat_target_to_user(struct ebt_entry_target
*t
,
1636 void __user
**dstptr
,
1639 const struct xt_target
*target
= t
->u
.target
;
1640 struct compat_ebt_entry_mwt __user
*cm
= *dstptr
;
1641 int off
= xt_compat_target_offset(target
);
1642 compat_uint_t tsize
= t
->target_size
- off
;
1644 BUG_ON(off
>= t
->target_size
);
1646 if (copy_to_user(cm
->u
.name
, target
->name
,
1647 strlen(target
->name
) + 1) || put_user(tsize
, &cm
->match_size
))
1650 if (target
->compat_to_user
) {
1651 if (target
->compat_to_user(cm
->data
, t
->data
))
1653 } else if (copy_to_user(cm
->data
, t
->data
, tsize
))
1656 *size
-= ebt_compat_entry_padsize() + off
;
1662 static int compat_watcher_to_user(struct ebt_entry_watcher
*w
,
1663 void __user
**dstptr
,
1666 return compat_target_to_user((struct ebt_entry_target
*)w
,
1670 static int compat_copy_entry_to_user(struct ebt_entry
*e
, void __user
**dstptr
,
1673 struct ebt_entry_target
*t
;
1674 struct ebt_entry __user
*ce
;
1675 u32 watchers_offset
, target_offset
, next_offset
;
1676 compat_uint_t origsize
;
1679 if (e
->bitmask
== 0) {
1680 if (*size
< sizeof(struct ebt_entries
))
1682 if (copy_to_user(*dstptr
, e
, sizeof(struct ebt_entries
)))
1685 *dstptr
+= sizeof(struct ebt_entries
);
1686 *size
-= sizeof(struct ebt_entries
);
1690 if (*size
< sizeof(*ce
))
1693 ce
= (struct ebt_entry __user
*)*dstptr
;
1694 if (copy_to_user(ce
, e
, sizeof(*ce
)))
1698 *dstptr
+= sizeof(*ce
);
1700 ret
= EBT_MATCH_ITERATE(e
, compat_match_to_user
, dstptr
, size
);
1703 watchers_offset
= e
->watchers_offset
- (origsize
- *size
);
1705 ret
= EBT_WATCHER_ITERATE(e
, compat_watcher_to_user
, dstptr
, size
);
1708 target_offset
= e
->target_offset
- (origsize
- *size
);
1710 t
= (struct ebt_entry_target
*) ((char *) e
+ e
->target_offset
);
1712 ret
= compat_target_to_user(t
, dstptr
, size
);
1715 next_offset
= e
->next_offset
- (origsize
- *size
);
1717 if (put_user(watchers_offset
, &ce
->watchers_offset
) ||
1718 put_user(target_offset
, &ce
->target_offset
) ||
1719 put_user(next_offset
, &ce
->next_offset
))
1722 *size
-= sizeof(*ce
);
1726 static int compat_calc_match(struct ebt_entry_match
*m
, int *off
)
1728 *off
+= ebt_compat_match_offset(m
->u
.match
, m
->match_size
);
1729 *off
+= ebt_compat_entry_padsize();
1733 static int compat_calc_watcher(struct ebt_entry_watcher
*w
, int *off
)
1735 *off
+= xt_compat_target_offset(w
->u
.watcher
);
1736 *off
+= ebt_compat_entry_padsize();
1740 static int compat_calc_entry(const struct ebt_entry
*e
,
1741 const struct ebt_table_info
*info
,
1743 struct compat_ebt_replace
*newinfo
)
1745 const struct ebt_entry_target
*t
;
1746 unsigned int entry_offset
;
1749 if (e
->bitmask
== 0)
1753 entry_offset
= (void *)e
- base
;
1755 EBT_MATCH_ITERATE(e
, compat_calc_match
, &off
);
1756 EBT_WATCHER_ITERATE(e
, compat_calc_watcher
, &off
);
1758 t
= (const struct ebt_entry_target
*) ((char *) e
+ e
->target_offset
);
1760 off
+= xt_compat_target_offset(t
->u
.target
);
1761 off
+= ebt_compat_entry_padsize();
1763 newinfo
->entries_size
-= off
;
1765 ret
= xt_compat_add_offset(NFPROTO_BRIDGE
, entry_offset
, off
);
1769 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
1770 const void *hookptr
= info
->hook_entry
[i
];
1771 if (info
->hook_entry
[i
] &&
1772 (e
< (struct ebt_entry
*)(base
- hookptr
))) {
1773 newinfo
->hook_entry
[i
] -= off
;
1774 pr_debug("0x%08X -> 0x%08X\n",
1775 newinfo
->hook_entry
[i
] + off
,
1776 newinfo
->hook_entry
[i
]);
1784 static int compat_table_info(const struct ebt_table_info
*info
,
1785 struct compat_ebt_replace
*newinfo
)
1787 unsigned int size
= info
->entries_size
;
1788 const void *entries
= info
->entries
;
1790 newinfo
->entries_size
= size
;
1792 xt_compat_init_offsets(NFPROTO_BRIDGE
, info
->nentries
);
1793 return EBT_ENTRY_ITERATE(entries
, size
, compat_calc_entry
, info
,
1797 static int compat_copy_everything_to_user(struct ebt_table
*t
,
1798 void __user
*user
, int *len
, int cmd
)
1800 struct compat_ebt_replace repl
, tmp
;
1801 struct ebt_counter
*oldcounters
;
1802 struct ebt_table_info tinfo
;
1806 memset(&tinfo
, 0, sizeof(tinfo
));
1808 if (cmd
== EBT_SO_GET_ENTRIES
) {
1809 tinfo
.entries_size
= t
->private->entries_size
;
1810 tinfo
.nentries
= t
->private->nentries
;
1811 tinfo
.entries
= t
->private->entries
;
1812 oldcounters
= t
->private->counters
;
1814 tinfo
.entries_size
= t
->table
->entries_size
;
1815 tinfo
.nentries
= t
->table
->nentries
;
1816 tinfo
.entries
= t
->table
->entries
;
1817 oldcounters
= t
->table
->counters
;
1820 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
1823 if (tmp
.nentries
!= tinfo
.nentries
||
1824 (tmp
.num_counters
&& tmp
.num_counters
!= tinfo
.nentries
))
1827 memcpy(&repl
, &tmp
, sizeof(repl
));
1828 if (cmd
== EBT_SO_GET_ENTRIES
)
1829 ret
= compat_table_info(t
->private, &repl
);
1831 ret
= compat_table_info(&tinfo
, &repl
);
1835 if (*len
!= sizeof(tmp
) + repl
.entries_size
+
1836 (tmp
.num_counters
? tinfo
.nentries
* sizeof(struct ebt_counter
): 0)) {
1837 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1838 *len
, tinfo
.entries_size
, repl
.entries_size
);
1842 /* userspace might not need the counters */
1843 ret
= copy_counters_to_user(t
, oldcounters
, compat_ptr(tmp
.counters
),
1844 tmp
.num_counters
, tinfo
.nentries
);
1848 pos
= compat_ptr(tmp
.entries
);
1849 return EBT_ENTRY_ITERATE(tinfo
.entries
, tinfo
.entries_size
,
1850 compat_copy_entry_to_user
, &pos
, &tmp
.entries_size
);
1853 struct ebt_entries_buf_state
{
1854 char *buf_kern_start
; /* kernel buffer to copy (translated) data to */
1855 u32 buf_kern_len
; /* total size of kernel buffer */
1856 u32 buf_kern_offset
; /* amount of data copied so far */
1857 u32 buf_user_offset
; /* read position in userspace buffer */
1860 static int ebt_buf_count(struct ebt_entries_buf_state
*state
, unsigned int sz
)
1862 state
->buf_kern_offset
+= sz
;
1863 return state
->buf_kern_offset
>= sz
? 0 : -EINVAL
;
1866 static int ebt_buf_add(struct ebt_entries_buf_state
*state
,
1867 void *data
, unsigned int sz
)
1869 if (state
->buf_kern_start
== NULL
)
1872 BUG_ON(state
->buf_kern_offset
+ sz
> state
->buf_kern_len
);
1874 memcpy(state
->buf_kern_start
+ state
->buf_kern_offset
, data
, sz
);
1877 state
->buf_user_offset
+= sz
;
1878 return ebt_buf_count(state
, sz
);
1881 static int ebt_buf_add_pad(struct ebt_entries_buf_state
*state
, unsigned int sz
)
1883 char *b
= state
->buf_kern_start
;
1885 BUG_ON(b
&& state
->buf_kern_offset
> state
->buf_kern_len
);
1887 if (b
!= NULL
&& sz
> 0)
1888 memset(b
+ state
->buf_kern_offset
, 0, sz
);
1889 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1890 return ebt_buf_count(state
, sz
);
1899 static int compat_mtw_from_user(struct compat_ebt_entry_mwt
*mwt
,
1900 enum compat_mwt compat_mwt
,
1901 struct ebt_entries_buf_state
*state
,
1902 const unsigned char *base
)
1904 char name
[EBT_FUNCTION_MAXNAMELEN
];
1905 struct xt_match
*match
;
1906 struct xt_target
*wt
;
1909 unsigned int size_kern
, match_size
= mwt
->match_size
;
1911 strlcpy(name
, mwt
->u
.name
, sizeof(name
));
1913 if (state
->buf_kern_start
)
1914 dst
= state
->buf_kern_start
+ state
->buf_kern_offset
;
1916 switch (compat_mwt
) {
1917 case EBT_COMPAT_MATCH
:
1918 match
= xt_request_find_match(NFPROTO_BRIDGE
, name
, 0);
1920 return PTR_ERR(match
);
1922 off
= ebt_compat_match_offset(match
, match_size
);
1924 if (match
->compat_from_user
)
1925 match
->compat_from_user(dst
, mwt
->data
);
1927 memcpy(dst
, mwt
->data
, match_size
);
1930 size_kern
= match
->matchsize
;
1931 if (unlikely(size_kern
== -1))
1932 size_kern
= match_size
;
1933 module_put(match
->me
);
1935 case EBT_COMPAT_WATCHER
: /* fallthrough */
1936 case EBT_COMPAT_TARGET
:
1937 wt
= xt_request_find_target(NFPROTO_BRIDGE
, name
, 0);
1940 off
= xt_compat_target_offset(wt
);
1943 if (wt
->compat_from_user
)
1944 wt
->compat_from_user(dst
, mwt
->data
);
1946 memcpy(dst
, mwt
->data
, match_size
);
1949 size_kern
= wt
->targetsize
;
1957 state
->buf_kern_offset
+= match_size
+ off
;
1958 state
->buf_user_offset
+= match_size
;
1959 pad
= XT_ALIGN(size_kern
) - size_kern
;
1961 if (pad
> 0 && dst
) {
1962 BUG_ON(state
->buf_kern_len
<= pad
);
1963 BUG_ON(state
->buf_kern_offset
- (match_size
+ off
) + size_kern
> state
->buf_kern_len
- pad
);
1964 memset(dst
+ size_kern
, 0, pad
);
1966 return off
+ match_size
;
1970 * return size of all matches, watchers or target, including necessary
1971 * alignment and padding.
1973 static int ebt_size_mwt(struct compat_ebt_entry_mwt
*match32
,
1974 unsigned int size_left
, enum compat_mwt type
,
1975 struct ebt_entries_buf_state
*state
, const void *base
)
1983 buf
= (char *) match32
;
1985 while (size_left
>= sizeof(*match32
)) {
1986 struct ebt_entry_match
*match_kern
;
1989 match_kern
= (struct ebt_entry_match
*) state
->buf_kern_start
;
1992 tmp
= state
->buf_kern_start
+ state
->buf_kern_offset
;
1993 match_kern
= (struct ebt_entry_match
*) tmp
;
1995 ret
= ebt_buf_add(state
, buf
, sizeof(*match32
));
1998 size_left
-= sizeof(*match32
);
2000 /* add padding before match->data (if any) */
2001 ret
= ebt_buf_add_pad(state
, ebt_compat_entry_padsize());
2005 if (match32
->match_size
> size_left
)
2008 size_left
-= match32
->match_size
;
2010 ret
= compat_mtw_from_user(match32
, type
, state
, base
);
2014 BUG_ON(ret
< match32
->match_size
);
2015 growth
+= ret
- match32
->match_size
;
2016 growth
+= ebt_compat_entry_padsize();
2018 buf
+= sizeof(*match32
);
2019 buf
+= match32
->match_size
;
2022 match_kern
->match_size
= ret
;
2024 WARN_ON(type
== EBT_COMPAT_TARGET
&& size_left
);
2025 match32
= (struct compat_ebt_entry_mwt
*) buf
;
2031 /* called for all ebt_entry structures. */
2032 static int size_entry_mwt(struct ebt_entry
*entry
, const unsigned char *base
,
2033 unsigned int *total
,
2034 struct ebt_entries_buf_state
*state
)
2036 unsigned int i
, j
, startoff
, new_offset
= 0;
2037 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2038 unsigned int offsets
[4];
2039 unsigned int *offsets_update
= NULL
;
2043 if (*total
< sizeof(struct ebt_entries
))
2046 if (!entry
->bitmask
) {
2047 *total
-= sizeof(struct ebt_entries
);
2048 return ebt_buf_add(state
, entry
, sizeof(struct ebt_entries
));
2050 if (*total
< sizeof(*entry
) || entry
->next_offset
< sizeof(*entry
))
2053 startoff
= state
->buf_user_offset
;
2054 /* pull in most part of ebt_entry, it does not need to be changed. */
2055 ret
= ebt_buf_add(state
, entry
,
2056 offsetof(struct ebt_entry
, watchers_offset
));
2060 offsets
[0] = sizeof(struct ebt_entry
); /* matches come first */
2061 memcpy(&offsets
[1], &entry
->watchers_offset
,
2062 sizeof(offsets
) - sizeof(offsets
[0]));
2064 if (state
->buf_kern_start
) {
2065 buf_start
= state
->buf_kern_start
+ state
->buf_kern_offset
;
2066 offsets_update
= (unsigned int *) buf_start
;
2068 ret
= ebt_buf_add(state
, &offsets
[1],
2069 sizeof(offsets
) - sizeof(offsets
[0]));
2072 buf_start
= (char *) entry
;
2074 * 0: matches offset, always follows ebt_entry.
2075 * 1: watchers offset, from ebt_entry structure
2076 * 2: target offset, from ebt_entry structure
2077 * 3: next ebt_entry offset, from ebt_entry structure
2079 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2081 for (i
= 0, j
= 1 ; j
< 4 ; j
++, i
++) {
2082 struct compat_ebt_entry_mwt
*match32
;
2084 char *buf
= buf_start
;
2086 buf
= buf_start
+ offsets
[i
];
2087 if (offsets
[i
] > offsets
[j
])
2090 match32
= (struct compat_ebt_entry_mwt
*) buf
;
2091 size
= offsets
[j
] - offsets
[i
];
2092 ret
= ebt_size_mwt(match32
, size
, i
, state
, base
);
2096 if (offsets_update
&& new_offset
) {
2097 pr_debug("change offset %d to %d\n",
2098 offsets_update
[i
], offsets
[j
] + new_offset
);
2099 offsets_update
[i
] = offsets
[j
] + new_offset
;
2103 if (state
->buf_kern_start
== NULL
) {
2104 unsigned int offset
= buf_start
- (char *) base
;
2106 ret
= xt_compat_add_offset(NFPROTO_BRIDGE
, offset
, new_offset
);
2111 startoff
= state
->buf_user_offset
- startoff
;
2113 BUG_ON(*total
< startoff
);
2119 * repl->entries_size is the size of the ebt_entry blob in userspace.
2120 * It might need more memory when copied to a 64 bit kernel in case
2121 * userspace is 32-bit. So, first task: find out how much memory is needed.
2123 * Called before validation is performed.
2125 static int compat_copy_entries(unsigned char *data
, unsigned int size_user
,
2126 struct ebt_entries_buf_state
*state
)
2128 unsigned int size_remaining
= size_user
;
2131 ret
= EBT_ENTRY_ITERATE(data
, size_user
, size_entry_mwt
, data
,
2132 &size_remaining
, state
);
2136 WARN_ON(size_remaining
);
2137 return state
->buf_kern_offset
;
2141 static int compat_copy_ebt_replace_from_user(struct ebt_replace
*repl
,
2142 void __user
*user
, unsigned int len
)
2144 struct compat_ebt_replace tmp
;
2147 if (len
< sizeof(tmp
))
2150 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
2153 if (len
!= sizeof(tmp
) + tmp
.entries_size
)
2156 if (tmp
.entries_size
== 0)
2159 if (tmp
.nentries
>= ((INT_MAX
- sizeof(struct ebt_table_info
)) /
2160 NR_CPUS
- SMP_CACHE_BYTES
) / sizeof(struct ebt_counter
))
2162 if (tmp
.num_counters
>= INT_MAX
/ sizeof(struct ebt_counter
))
2164 if (tmp
.num_counters
== 0)
2167 memcpy(repl
, &tmp
, offsetof(struct ebt_replace
, hook_entry
));
2169 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2170 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++)
2171 repl
->hook_entry
[i
] = compat_ptr(tmp
.hook_entry
[i
]);
2173 repl
->num_counters
= tmp
.num_counters
;
2174 repl
->counters
= compat_ptr(tmp
.counters
);
2175 repl
->entries
= compat_ptr(tmp
.entries
);
2179 static int compat_do_replace(struct net
*net
, void __user
*user
,
2182 int ret
, i
, countersize
, size64
;
2183 struct ebt_table_info
*newinfo
;
2184 struct ebt_replace tmp
;
2185 struct ebt_entries_buf_state state
;
2188 ret
= compat_copy_ebt_replace_from_user(&tmp
, user
, len
);
2190 /* try real handler in case userland supplied needed padding */
2191 if (ret
== -EINVAL
&& do_replace(net
, user
, len
) == 0)
2196 countersize
= COUNTER_OFFSET(tmp
.nentries
) * nr_cpu_ids
;
2197 newinfo
= vmalloc(sizeof(*newinfo
) + countersize
);
2202 memset(newinfo
->counters
, 0, countersize
);
2204 memset(&state
, 0, sizeof(state
));
2206 newinfo
->entries
= vmalloc(tmp
.entries_size
);
2207 if (!newinfo
->entries
) {
2212 newinfo
->entries
, tmp
.entries
, tmp
.entries_size
) != 0) {
2217 entries_tmp
= newinfo
->entries
;
2219 xt_compat_lock(NFPROTO_BRIDGE
);
2221 xt_compat_init_offsets(NFPROTO_BRIDGE
, tmp
.nentries
);
2222 ret
= compat_copy_entries(entries_tmp
, tmp
.entries_size
, &state
);
2226 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2227 tmp
.entries_size
, state
.buf_kern_offset
, state
.buf_user_offset
,
2228 xt_compat_calc_jump(NFPROTO_BRIDGE
, tmp
.entries_size
));
2231 newinfo
->entries
= vmalloc(size64
);
2232 if (!newinfo
->entries
) {
2238 memset(&state
, 0, sizeof(state
));
2239 state
.buf_kern_start
= newinfo
->entries
;
2240 state
.buf_kern_len
= size64
;
2242 ret
= compat_copy_entries(entries_tmp
, tmp
.entries_size
, &state
);
2243 BUG_ON(ret
< 0); /* parses same data again */
2246 tmp
.entries_size
= size64
;
2248 for (i
= 0; i
< NF_BR_NUMHOOKS
; i
++) {
2249 char __user
*usrptr
;
2250 if (tmp
.hook_entry
[i
]) {
2252 usrptr
= (char __user
*) tmp
.hook_entry
[i
];
2253 delta
= usrptr
- tmp
.entries
;
2254 usrptr
+= xt_compat_calc_jump(NFPROTO_BRIDGE
, delta
);
2255 tmp
.hook_entry
[i
] = (struct ebt_entries __user
*)usrptr
;
2259 xt_compat_flush_offsets(NFPROTO_BRIDGE
);
2260 xt_compat_unlock(NFPROTO_BRIDGE
);
2262 ret
= do_replace_finish(net
, &tmp
, newinfo
);
2266 vfree(newinfo
->entries
);
2271 xt_compat_flush_offsets(NFPROTO_BRIDGE
);
2272 xt_compat_unlock(NFPROTO_BRIDGE
);
2276 static int compat_update_counters(struct net
*net
, void __user
*user
,
2279 struct compat_ebt_replace hlp
;
2281 if (copy_from_user(&hlp
, user
, sizeof(hlp
)))
2284 /* try real handler in case userland supplied needed padding */
2285 if (len
!= sizeof(hlp
) + hlp
.num_counters
* sizeof(struct ebt_counter
))
2286 return update_counters(net
, user
, len
);
2288 return do_update_counters(net
, hlp
.name
, compat_ptr(hlp
.counters
),
2289 hlp
.num_counters
, user
, len
);
2292 static int compat_do_ebt_set_ctl(struct sock
*sk
,
2293 int cmd
, void __user
*user
, unsigned int len
)
2296 struct net
*net
= sock_net(sk
);
2298 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2302 case EBT_SO_SET_ENTRIES
:
2303 ret
= compat_do_replace(net
, user
, len
);
2305 case EBT_SO_SET_COUNTERS
:
2306 ret
= compat_update_counters(net
, user
, len
);
2314 static int compat_do_ebt_get_ctl(struct sock
*sk
, int cmd
,
2315 void __user
*user
, int *len
)
2318 struct compat_ebt_replace tmp
;
2319 struct ebt_table
*t
;
2320 struct net
*net
= sock_net(sk
);
2322 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
2325 /* try real handler in case userland supplied needed padding */
2326 if ((cmd
== EBT_SO_GET_INFO
||
2327 cmd
== EBT_SO_GET_INIT_INFO
) && *len
!= sizeof(tmp
))
2328 return do_ebt_get_ctl(sk
, cmd
, user
, len
);
2330 if (copy_from_user(&tmp
, user
, sizeof(tmp
)))
2333 t
= find_table_lock(net
, tmp
.name
, &ret
, &ebt_mutex
);
2337 xt_compat_lock(NFPROTO_BRIDGE
);
2339 case EBT_SO_GET_INFO
:
2340 tmp
.nentries
= t
->private->nentries
;
2341 ret
= compat_table_info(t
->private, &tmp
);
2344 tmp
.valid_hooks
= t
->valid_hooks
;
2346 if (copy_to_user(user
, &tmp
, *len
) != 0) {
2352 case EBT_SO_GET_INIT_INFO
:
2353 tmp
.nentries
= t
->table
->nentries
;
2354 tmp
.entries_size
= t
->table
->entries_size
;
2355 tmp
.valid_hooks
= t
->table
->valid_hooks
;
2357 if (copy_to_user(user
, &tmp
, *len
) != 0) {
2363 case EBT_SO_GET_ENTRIES
:
2364 case EBT_SO_GET_INIT_ENTRIES
:
2366 * try real handler first in case of userland-side padding.
2367 * in case we are dealing with an 'ordinary' 32 bit binary
2368 * without 64bit compatibility padding, this will fail right
2369 * after copy_from_user when the *len argument is validated.
2371 * the compat_ variant needs to do one pass over the kernel
2372 * data set to adjust for size differences before it the check.
2374 if (copy_everything_to_user(t
, user
, len
, cmd
) == 0)
2377 ret
= compat_copy_everything_to_user(t
, user
, len
, cmd
);
2383 xt_compat_flush_offsets(NFPROTO_BRIDGE
);
2384 xt_compat_unlock(NFPROTO_BRIDGE
);
2385 mutex_unlock(&ebt_mutex
);
2390 static struct nf_sockopt_ops ebt_sockopts
= {
2392 .set_optmin
= EBT_BASE_CTL
,
2393 .set_optmax
= EBT_SO_SET_MAX
+ 1,
2394 .set
= do_ebt_set_ctl
,
2395 #ifdef CONFIG_COMPAT
2396 .compat_set
= compat_do_ebt_set_ctl
,
2398 .get_optmin
= EBT_BASE_CTL
,
2399 .get_optmax
= EBT_SO_GET_MAX
+ 1,
2400 .get
= do_ebt_get_ctl
,
2401 #ifdef CONFIG_COMPAT
2402 .compat_get
= compat_do_ebt_get_ctl
,
2404 .owner
= THIS_MODULE
,
2407 static int __init
ebtables_init(void)
2411 ret
= xt_register_target(&ebt_standard_target
);
2414 ret
= nf_register_sockopt(&ebt_sockopts
);
2416 xt_unregister_target(&ebt_standard_target
);
2420 printk(KERN_INFO
"Ebtables v2.0 registered\n");
2424 static void __exit
ebtables_fini(void)
2426 nf_unregister_sockopt(&ebt_sockopts
);
2427 xt_unregister_target(&ebt_standard_target
);
2428 printk(KERN_INFO
"Ebtables v2.0 unregistered\n");
2431 EXPORT_SYMBOL(ebt_register_table
);
2432 EXPORT_SYMBOL(ebt_unregister_table
);
2433 EXPORT_SYMBOL(ebt_do_table
);
2434 module_init(ebtables_init
);
2435 module_exit(ebtables_fini
);
2436 MODULE_LICENSE("GPL");