netfilter: ensure number of counters is >0 in do_replace()
[deliverable/linux.git] / net / bridge / netfilter / ebtables.c
1 /*
2 * ebtables
3 *
4 * Author:
5 * Bart De Schuymer <bdschuym@pandora.be>
6 *
7 * ebtables.c,v 2.0, July, 2002
8 *
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <linux/audit.h>
30 #include <net/sock.h>
31 /* needed for logical [in,out]-dev filtering */
32 #include "../br_private.h"
33
34 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35 "report to author: "format, ## args)
36 /* #define BUGPRINT(format, args...) */
37
38 /*
39 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * the softirq
41 * For reading or updating the counters, the user context needs to
42 * get a write_lock
43 */
44
45 /* The size of each set of counters is altered to get cache alignment */
46 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49 COUNTER_OFFSET(n) * cpu))
50
51
52
53 static DEFINE_MUTEX(ebt_mutex);
54
55 #ifdef CONFIG_COMPAT
56 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 {
58 int v = *(compat_int_t *)src;
59
60 if (v >= 0)
61 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 memcpy(dst, &v, sizeof(v));
63 }
64
65 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 {
67 compat_int_t cv = *(int *)src;
68
69 if (cv >= 0)
70 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72 }
73 #endif
74
75
76 static struct xt_target ebt_standard_target = {
77 .name = "standard",
78 .revision = 0,
79 .family = NFPROTO_BRIDGE,
80 .targetsize = sizeof(int),
81 #ifdef CONFIG_COMPAT
82 .compatsize = sizeof(compat_int_t),
83 .compat_from_user = ebt_standard_compat_from_user,
84 .compat_to_user = ebt_standard_compat_to_user,
85 #endif
86 };
87
88 static inline int
89 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
90 struct xt_action_param *par)
91 {
92 par->target = w->u.watcher;
93 par->targinfo = w->data;
94 w->u.watcher->target(skb, par);
95 /* watchers don't give a verdict */
96 return 0;
97 }
98
99 static inline int
100 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
101 struct xt_action_param *par)
102 {
103 par->match = m->u.match;
104 par->matchinfo = m->data;
105 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
106 }
107
108 static inline int
109 ebt_dev_check(const char *entry, const struct net_device *device)
110 {
111 int i = 0;
112 const char *devname;
113
114 if (*entry == '\0')
115 return 0;
116 if (!device)
117 return 1;
118 devname = device->name;
119 /* 1 is the wildcard token */
120 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121 i++;
122 return devname[i] != entry[i] && entry[i] != 1;
123 }
124
125 #define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
126 /* process standard matches */
127 static inline int
128 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
129 const struct net_device *in, const struct net_device *out)
130 {
131 const struct ethhdr *h = eth_hdr(skb);
132 const struct net_bridge_port *p;
133 __be16 ethproto;
134 int verdict, i;
135
136 if (skb_vlan_tag_present(skb))
137 ethproto = htons(ETH_P_8021Q);
138 else
139 ethproto = h->h_proto;
140
141 if (e->bitmask & EBT_802_3) {
142 if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
143 return 1;
144 } else if (!(e->bitmask & EBT_NOPROTO) &&
145 FWINV2(e->ethproto != ethproto, EBT_IPROTO))
146 return 1;
147
148 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
149 return 1;
150 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
151 return 1;
152 /* rcu_read_lock()ed by nf_hook_slow */
153 if (in && (p = br_port_get_rcu(in)) != NULL &&
154 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
155 return 1;
156 if (out && (p = br_port_get_rcu(out)) != NULL &&
157 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
158 return 1;
159
160 if (e->bitmask & EBT_SOURCEMAC) {
161 verdict = 0;
162 for (i = 0; i < 6; i++)
163 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
164 e->sourcemsk[i];
165 if (FWINV2(verdict != 0, EBT_ISOURCE) )
166 return 1;
167 }
168 if (e->bitmask & EBT_DESTMAC) {
169 verdict = 0;
170 for (i = 0; i < 6; i++)
171 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
172 e->destmsk[i];
173 if (FWINV2(verdict != 0, EBT_IDEST) )
174 return 1;
175 }
176 return 0;
177 }
178
179 static inline __pure
180 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
181 {
182 return (void *)entry + entry->next_offset;
183 }
184
185 /* Do some firewalling */
186 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
187 const struct net_device *in, const struct net_device *out,
188 struct ebt_table *table)
189 {
190 int i, nentries;
191 struct ebt_entry *point;
192 struct ebt_counter *counter_base, *cb_base;
193 const struct ebt_entry_target *t;
194 int verdict, sp = 0;
195 struct ebt_chainstack *cs;
196 struct ebt_entries *chaininfo;
197 const char *base;
198 const struct ebt_table_info *private;
199 struct xt_action_param acpar;
200
201 acpar.family = NFPROTO_BRIDGE;
202 acpar.in = in;
203 acpar.out = out;
204 acpar.hotdrop = false;
205 acpar.hooknum = hook;
206
207 read_lock_bh(&table->lock);
208 private = table->private;
209 cb_base = COUNTER_BASE(private->counters, private->nentries,
210 smp_processor_id());
211 if (private->chainstack)
212 cs = private->chainstack[smp_processor_id()];
213 else
214 cs = NULL;
215 chaininfo = private->hook_entry[hook];
216 nentries = private->hook_entry[hook]->nentries;
217 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
218 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
219 /* base for chain jumps */
220 base = private->entries;
221 i = 0;
222 while (i < nentries) {
223 if (ebt_basic_match(point, skb, in, out))
224 goto letscontinue;
225
226 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
227 goto letscontinue;
228 if (acpar.hotdrop) {
229 read_unlock_bh(&table->lock);
230 return NF_DROP;
231 }
232
233 /* increase counter */
234 (*(counter_base + i)).pcnt++;
235 (*(counter_base + i)).bcnt += skb->len;
236
237 /* these should only watch: not modify, nor tell us
238 what to do with the packet */
239 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
240
241 t = (struct ebt_entry_target *)
242 (((char *)point) + point->target_offset);
243 /* standard target */
244 if (!t->u.target->target)
245 verdict = ((struct ebt_standard_target *)t)->verdict;
246 else {
247 acpar.target = t->u.target;
248 acpar.targinfo = t->data;
249 verdict = t->u.target->target(skb, &acpar);
250 }
251 if (verdict == EBT_ACCEPT) {
252 read_unlock_bh(&table->lock);
253 return NF_ACCEPT;
254 }
255 if (verdict == EBT_DROP) {
256 read_unlock_bh(&table->lock);
257 return NF_DROP;
258 }
259 if (verdict == EBT_RETURN) {
260 letsreturn:
261 #ifdef CONFIG_NETFILTER_DEBUG
262 if (sp == 0) {
263 BUGPRINT("RETURN on base chain");
264 /* act like this is EBT_CONTINUE */
265 goto letscontinue;
266 }
267 #endif
268 sp--;
269 /* put all the local variables right */
270 i = cs[sp].n;
271 chaininfo = cs[sp].chaininfo;
272 nentries = chaininfo->nentries;
273 point = cs[sp].e;
274 counter_base = cb_base +
275 chaininfo->counter_offset;
276 continue;
277 }
278 if (verdict == EBT_CONTINUE)
279 goto letscontinue;
280 #ifdef CONFIG_NETFILTER_DEBUG
281 if (verdict < 0) {
282 BUGPRINT("bogus standard verdict\n");
283 read_unlock_bh(&table->lock);
284 return NF_DROP;
285 }
286 #endif
287 /* jump to a udc */
288 cs[sp].n = i + 1;
289 cs[sp].chaininfo = chaininfo;
290 cs[sp].e = ebt_next_entry(point);
291 i = 0;
292 chaininfo = (struct ebt_entries *) (base + verdict);
293 #ifdef CONFIG_NETFILTER_DEBUG
294 if (chaininfo->distinguisher) {
295 BUGPRINT("jump to non-chain\n");
296 read_unlock_bh(&table->lock);
297 return NF_DROP;
298 }
299 #endif
300 nentries = chaininfo->nentries;
301 point = (struct ebt_entry *)chaininfo->data;
302 counter_base = cb_base + chaininfo->counter_offset;
303 sp++;
304 continue;
305 letscontinue:
306 point = ebt_next_entry(point);
307 i++;
308 }
309
310 /* I actually like this :) */
311 if (chaininfo->policy == EBT_RETURN)
312 goto letsreturn;
313 if (chaininfo->policy == EBT_ACCEPT) {
314 read_unlock_bh(&table->lock);
315 return NF_ACCEPT;
316 }
317 read_unlock_bh(&table->lock);
318 return NF_DROP;
319 }
320
321 /* If it succeeds, returns element and locks mutex */
322 static inline void *
323 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
324 struct mutex *mutex)
325 {
326 struct {
327 struct list_head list;
328 char name[EBT_FUNCTION_MAXNAMELEN];
329 } *e;
330
331 mutex_lock(mutex);
332 list_for_each_entry(e, head, list) {
333 if (strcmp(e->name, name) == 0)
334 return e;
335 }
336 *error = -ENOENT;
337 mutex_unlock(mutex);
338 return NULL;
339 }
340
341 static void *
342 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
343 int *error, struct mutex *mutex)
344 {
345 return try_then_request_module(
346 find_inlist_lock_noload(head, name, error, mutex),
347 "%s%s", prefix, name);
348 }
349
350 static inline struct ebt_table *
351 find_table_lock(struct net *net, const char *name, int *error,
352 struct mutex *mutex)
353 {
354 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
355 "ebtable_", error, mutex);
356 }
357
358 static inline int
359 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
360 unsigned int *cnt)
361 {
362 const struct ebt_entry *e = par->entryinfo;
363 struct xt_match *match;
364 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
365 int ret;
366
367 if (left < sizeof(struct ebt_entry_match) ||
368 left - sizeof(struct ebt_entry_match) < m->match_size)
369 return -EINVAL;
370
371 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
372 if (IS_ERR(match))
373 return PTR_ERR(match);
374 m->u.match = match;
375
376 par->match = match;
377 par->matchinfo = m->data;
378 ret = xt_check_match(par, m->match_size,
379 e->ethproto, e->invflags & EBT_IPROTO);
380 if (ret < 0) {
381 module_put(match->me);
382 return ret;
383 }
384
385 (*cnt)++;
386 return 0;
387 }
388
389 static inline int
390 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
391 unsigned int *cnt)
392 {
393 const struct ebt_entry *e = par->entryinfo;
394 struct xt_target *watcher;
395 size_t left = ((char *)e + e->target_offset) - (char *)w;
396 int ret;
397
398 if (left < sizeof(struct ebt_entry_watcher) ||
399 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
400 return -EINVAL;
401
402 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
403 if (IS_ERR(watcher))
404 return PTR_ERR(watcher);
405 w->u.watcher = watcher;
406
407 par->target = watcher;
408 par->targinfo = w->data;
409 ret = xt_check_target(par, w->watcher_size,
410 e->ethproto, e->invflags & EBT_IPROTO);
411 if (ret < 0) {
412 module_put(watcher->me);
413 return ret;
414 }
415
416 (*cnt)++;
417 return 0;
418 }
419
420 static int ebt_verify_pointers(const struct ebt_replace *repl,
421 struct ebt_table_info *newinfo)
422 {
423 unsigned int limit = repl->entries_size;
424 unsigned int valid_hooks = repl->valid_hooks;
425 unsigned int offset = 0;
426 int i;
427
428 for (i = 0; i < NF_BR_NUMHOOKS; i++)
429 newinfo->hook_entry[i] = NULL;
430
431 newinfo->entries_size = repl->entries_size;
432 newinfo->nentries = repl->nentries;
433
434 while (offset < limit) {
435 size_t left = limit - offset;
436 struct ebt_entry *e = (void *)newinfo->entries + offset;
437
438 if (left < sizeof(unsigned int))
439 break;
440
441 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
442 if ((valid_hooks & (1 << i)) == 0)
443 continue;
444 if ((char __user *)repl->hook_entry[i] ==
445 repl->entries + offset)
446 break;
447 }
448
449 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
450 if (e->bitmask != 0) {
451 /* we make userspace set this right,
452 so there is no misunderstanding */
453 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
454 "in distinguisher\n");
455 return -EINVAL;
456 }
457 if (i != NF_BR_NUMHOOKS)
458 newinfo->hook_entry[i] = (struct ebt_entries *)e;
459 if (left < sizeof(struct ebt_entries))
460 break;
461 offset += sizeof(struct ebt_entries);
462 } else {
463 if (left < sizeof(struct ebt_entry))
464 break;
465 if (left < e->next_offset)
466 break;
467 if (e->next_offset < sizeof(struct ebt_entry))
468 return -EINVAL;
469 offset += e->next_offset;
470 }
471 }
472 if (offset != limit) {
473 BUGPRINT("entries_size too small\n");
474 return -EINVAL;
475 }
476
477 /* check if all valid hooks have a chain */
478 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
479 if (!newinfo->hook_entry[i] &&
480 (valid_hooks & (1 << i))) {
481 BUGPRINT("Valid hook without chain\n");
482 return -EINVAL;
483 }
484 }
485 return 0;
486 }
487
488 /*
489 * this one is very careful, as it is the first function
490 * to parse the userspace data
491 */
492 static inline int
493 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
494 const struct ebt_table_info *newinfo,
495 unsigned int *n, unsigned int *cnt,
496 unsigned int *totalcnt, unsigned int *udc_cnt)
497 {
498 int i;
499
500 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
501 if ((void *)e == (void *)newinfo->hook_entry[i])
502 break;
503 }
504 /* beginning of a new chain
505 if i == NF_BR_NUMHOOKS it must be a user defined chain */
506 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
507 /* this checks if the previous chain has as many entries
508 as it said it has */
509 if (*n != *cnt) {
510 BUGPRINT("nentries does not equal the nr of entries "
511 "in the chain\n");
512 return -EINVAL;
513 }
514 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
515 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
516 /* only RETURN from udc */
517 if (i != NF_BR_NUMHOOKS ||
518 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
519 BUGPRINT("bad policy\n");
520 return -EINVAL;
521 }
522 }
523 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
524 (*udc_cnt)++;
525 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
526 BUGPRINT("counter_offset != totalcnt");
527 return -EINVAL;
528 }
529 *n = ((struct ebt_entries *)e)->nentries;
530 *cnt = 0;
531 return 0;
532 }
533 /* a plain old entry, heh */
534 if (sizeof(struct ebt_entry) > e->watchers_offset ||
535 e->watchers_offset > e->target_offset ||
536 e->target_offset >= e->next_offset) {
537 BUGPRINT("entry offsets not in right order\n");
538 return -EINVAL;
539 }
540 /* this is not checked anywhere else */
541 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
542 BUGPRINT("target size too small\n");
543 return -EINVAL;
544 }
545 (*cnt)++;
546 (*totalcnt)++;
547 return 0;
548 }
549
550 struct ebt_cl_stack
551 {
552 struct ebt_chainstack cs;
553 int from;
554 unsigned int hookmask;
555 };
556
557 /*
558 * we need these positions to check that the jumps to a different part of the
559 * entries is a jump to the beginning of a new chain.
560 */
561 static inline int
562 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
563 unsigned int *n, struct ebt_cl_stack *udc)
564 {
565 int i;
566
567 /* we're only interested in chain starts */
568 if (e->bitmask)
569 return 0;
570 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
571 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
572 break;
573 }
574 /* only care about udc */
575 if (i != NF_BR_NUMHOOKS)
576 return 0;
577
578 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
579 /* these initialisations are depended on later in check_chainloops() */
580 udc[*n].cs.n = 0;
581 udc[*n].hookmask = 0;
582
583 (*n)++;
584 return 0;
585 }
586
587 static inline int
588 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
589 {
590 struct xt_mtdtor_param par;
591
592 if (i && (*i)-- == 0)
593 return 1;
594
595 par.net = net;
596 par.match = m->u.match;
597 par.matchinfo = m->data;
598 par.family = NFPROTO_BRIDGE;
599 if (par.match->destroy != NULL)
600 par.match->destroy(&par);
601 module_put(par.match->me);
602 return 0;
603 }
604
605 static inline int
606 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
607 {
608 struct xt_tgdtor_param par;
609
610 if (i && (*i)-- == 0)
611 return 1;
612
613 par.net = net;
614 par.target = w->u.watcher;
615 par.targinfo = w->data;
616 par.family = NFPROTO_BRIDGE;
617 if (par.target->destroy != NULL)
618 par.target->destroy(&par);
619 module_put(par.target->me);
620 return 0;
621 }
622
623 static inline int
624 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
625 {
626 struct xt_tgdtor_param par;
627 struct ebt_entry_target *t;
628
629 if (e->bitmask == 0)
630 return 0;
631 /* we're done */
632 if (cnt && (*cnt)-- == 0)
633 return 1;
634 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
635 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
636 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
637
638 par.net = net;
639 par.target = t->u.target;
640 par.targinfo = t->data;
641 par.family = NFPROTO_BRIDGE;
642 if (par.target->destroy != NULL)
643 par.target->destroy(&par);
644 module_put(par.target->me);
645 return 0;
646 }
647
648 static inline int
649 ebt_check_entry(struct ebt_entry *e, struct net *net,
650 const struct ebt_table_info *newinfo,
651 const char *name, unsigned int *cnt,
652 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
653 {
654 struct ebt_entry_target *t;
655 struct xt_target *target;
656 unsigned int i, j, hook = 0, hookmask = 0;
657 size_t gap;
658 int ret;
659 struct xt_mtchk_param mtpar;
660 struct xt_tgchk_param tgpar;
661
662 /* don't mess with the struct ebt_entries */
663 if (e->bitmask == 0)
664 return 0;
665
666 if (e->bitmask & ~EBT_F_MASK) {
667 BUGPRINT("Unknown flag for bitmask\n");
668 return -EINVAL;
669 }
670 if (e->invflags & ~EBT_INV_MASK) {
671 BUGPRINT("Unknown flag for inv bitmask\n");
672 return -EINVAL;
673 }
674 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
675 BUGPRINT("NOPROTO & 802_3 not allowed\n");
676 return -EINVAL;
677 }
678 /* what hook do we belong to? */
679 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
680 if (!newinfo->hook_entry[i])
681 continue;
682 if ((char *)newinfo->hook_entry[i] < (char *)e)
683 hook = i;
684 else
685 break;
686 }
687 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
688 a base chain */
689 if (i < NF_BR_NUMHOOKS)
690 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
691 else {
692 for (i = 0; i < udc_cnt; i++)
693 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
694 break;
695 if (i == 0)
696 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
697 else
698 hookmask = cl_s[i - 1].hookmask;
699 }
700 i = 0;
701
702 mtpar.net = tgpar.net = net;
703 mtpar.table = tgpar.table = name;
704 mtpar.entryinfo = tgpar.entryinfo = e;
705 mtpar.hook_mask = tgpar.hook_mask = hookmask;
706 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
707 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
708 if (ret != 0)
709 goto cleanup_matches;
710 j = 0;
711 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
712 if (ret != 0)
713 goto cleanup_watchers;
714 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
715 gap = e->next_offset - e->target_offset;
716
717 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
718 if (IS_ERR(target)) {
719 ret = PTR_ERR(target);
720 goto cleanup_watchers;
721 }
722
723 t->u.target = target;
724 if (t->u.target == &ebt_standard_target) {
725 if (gap < sizeof(struct ebt_standard_target)) {
726 BUGPRINT("Standard target size too big\n");
727 ret = -EFAULT;
728 goto cleanup_watchers;
729 }
730 if (((struct ebt_standard_target *)t)->verdict <
731 -NUM_STANDARD_TARGETS) {
732 BUGPRINT("Invalid standard target\n");
733 ret = -EFAULT;
734 goto cleanup_watchers;
735 }
736 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
737 module_put(t->u.target->me);
738 ret = -EFAULT;
739 goto cleanup_watchers;
740 }
741
742 tgpar.target = target;
743 tgpar.targinfo = t->data;
744 ret = xt_check_target(&tgpar, t->target_size,
745 e->ethproto, e->invflags & EBT_IPROTO);
746 if (ret < 0) {
747 module_put(target->me);
748 goto cleanup_watchers;
749 }
750 (*cnt)++;
751 return 0;
752 cleanup_watchers:
753 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
754 cleanup_matches:
755 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
756 return ret;
757 }
758
759 /*
760 * checks for loops and sets the hook mask for udc
761 * the hook mask for udc tells us from which base chains the udc can be
762 * accessed. This mask is a parameter to the check() functions of the extensions
763 */
764 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
765 unsigned int udc_cnt, unsigned int hooknr, char *base)
766 {
767 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
768 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
769 const struct ebt_entry_target *t;
770
771 while (pos < nentries || chain_nr != -1) {
772 /* end of udc, go back one 'recursion' step */
773 if (pos == nentries) {
774 /* put back values of the time when this chain was called */
775 e = cl_s[chain_nr].cs.e;
776 if (cl_s[chain_nr].from != -1)
777 nentries =
778 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
779 else
780 nentries = chain->nentries;
781 pos = cl_s[chain_nr].cs.n;
782 /* make sure we won't see a loop that isn't one */
783 cl_s[chain_nr].cs.n = 0;
784 chain_nr = cl_s[chain_nr].from;
785 if (pos == nentries)
786 continue;
787 }
788 t = (struct ebt_entry_target *)
789 (((char *)e) + e->target_offset);
790 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
791 goto letscontinue;
792 if (e->target_offset + sizeof(struct ebt_standard_target) >
793 e->next_offset) {
794 BUGPRINT("Standard target size too big\n");
795 return -1;
796 }
797 verdict = ((struct ebt_standard_target *)t)->verdict;
798 if (verdict >= 0) { /* jump to another chain */
799 struct ebt_entries *hlp2 =
800 (struct ebt_entries *)(base + verdict);
801 for (i = 0; i < udc_cnt; i++)
802 if (hlp2 == cl_s[i].cs.chaininfo)
803 break;
804 /* bad destination or loop */
805 if (i == udc_cnt) {
806 BUGPRINT("bad destination\n");
807 return -1;
808 }
809 if (cl_s[i].cs.n) {
810 BUGPRINT("loop\n");
811 return -1;
812 }
813 if (cl_s[i].hookmask & (1 << hooknr))
814 goto letscontinue;
815 /* this can't be 0, so the loop test is correct */
816 cl_s[i].cs.n = pos + 1;
817 pos = 0;
818 cl_s[i].cs.e = ebt_next_entry(e);
819 e = (struct ebt_entry *)(hlp2->data);
820 nentries = hlp2->nentries;
821 cl_s[i].from = chain_nr;
822 chain_nr = i;
823 /* this udc is accessible from the base chain for hooknr */
824 cl_s[i].hookmask |= (1 << hooknr);
825 continue;
826 }
827 letscontinue:
828 e = ebt_next_entry(e);
829 pos++;
830 }
831 return 0;
832 }
833
834 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
835 static int translate_table(struct net *net, const char *name,
836 struct ebt_table_info *newinfo)
837 {
838 unsigned int i, j, k, udc_cnt;
839 int ret;
840 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
841
842 i = 0;
843 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
844 i++;
845 if (i == NF_BR_NUMHOOKS) {
846 BUGPRINT("No valid hooks specified\n");
847 return -EINVAL;
848 }
849 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
850 BUGPRINT("Chains don't start at beginning\n");
851 return -EINVAL;
852 }
853 /* make sure chains are ordered after each other in same order
854 as their corresponding hooks */
855 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
856 if (!newinfo->hook_entry[j])
857 continue;
858 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
859 BUGPRINT("Hook order must be followed\n");
860 return -EINVAL;
861 }
862 i = j;
863 }
864
865 /* do some early checkings and initialize some things */
866 i = 0; /* holds the expected nr. of entries for the chain */
867 j = 0; /* holds the up to now counted entries for the chain */
868 k = 0; /* holds the total nr. of entries, should equal
869 newinfo->nentries afterwards */
870 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
871 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
872 ebt_check_entry_size_and_hooks, newinfo,
873 &i, &j, &k, &udc_cnt);
874
875 if (ret != 0)
876 return ret;
877
878 if (i != j) {
879 BUGPRINT("nentries does not equal the nr of entries in the "
880 "(last) chain\n");
881 return -EINVAL;
882 }
883 if (k != newinfo->nentries) {
884 BUGPRINT("Total nentries is wrong\n");
885 return -EINVAL;
886 }
887
888 /* get the location of the udc, put them in an array
889 while we're at it, allocate the chainstack */
890 if (udc_cnt) {
891 /* this will get free'd in do_replace()/ebt_register_table()
892 if an error occurs */
893 newinfo->chainstack =
894 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
895 if (!newinfo->chainstack)
896 return -ENOMEM;
897 for_each_possible_cpu(i) {
898 newinfo->chainstack[i] =
899 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
900 if (!newinfo->chainstack[i]) {
901 while (i)
902 vfree(newinfo->chainstack[--i]);
903 vfree(newinfo->chainstack);
904 newinfo->chainstack = NULL;
905 return -ENOMEM;
906 }
907 }
908
909 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
910 if (!cl_s)
911 return -ENOMEM;
912 i = 0; /* the i'th udc */
913 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
914 ebt_get_udc_positions, newinfo, &i, cl_s);
915 /* sanity check */
916 if (i != udc_cnt) {
917 BUGPRINT("i != udc_cnt\n");
918 vfree(cl_s);
919 return -EFAULT;
920 }
921 }
922
923 /* Check for loops */
924 for (i = 0; i < NF_BR_NUMHOOKS; i++)
925 if (newinfo->hook_entry[i])
926 if (check_chainloops(newinfo->hook_entry[i],
927 cl_s, udc_cnt, i, newinfo->entries)) {
928 vfree(cl_s);
929 return -EINVAL;
930 }
931
932 /* we now know the following (along with E=mc²):
933 - the nr of entries in each chain is right
934 - the size of the allocated space is right
935 - all valid hooks have a corresponding chain
936 - there are no loops
937 - wrong data can still be on the level of a single entry
938 - could be there are jumps to places that are not the
939 beginning of a chain. This can only occur in chains that
940 are not accessible from any base chains, so we don't care. */
941
942 /* used to know what we need to clean up if something goes wrong */
943 i = 0;
944 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
945 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
946 if (ret != 0) {
947 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
948 ebt_cleanup_entry, net, &i);
949 }
950 vfree(cl_s);
951 return ret;
952 }
953
954 /* called under write_lock */
955 static void get_counters(const struct ebt_counter *oldcounters,
956 struct ebt_counter *counters, unsigned int nentries)
957 {
958 int i, cpu;
959 struct ebt_counter *counter_base;
960
961 /* counters of cpu 0 */
962 memcpy(counters, oldcounters,
963 sizeof(struct ebt_counter) * nentries);
964
965 /* add other counters to those of cpu 0 */
966 for_each_possible_cpu(cpu) {
967 if (cpu == 0)
968 continue;
969 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
970 for (i = 0; i < nentries; i++) {
971 counters[i].pcnt += counter_base[i].pcnt;
972 counters[i].bcnt += counter_base[i].bcnt;
973 }
974 }
975 }
976
977 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
978 struct ebt_table_info *newinfo)
979 {
980 int ret, i;
981 struct ebt_counter *counterstmp = NULL;
982 /* used to be able to unlock earlier */
983 struct ebt_table_info *table;
984 struct ebt_table *t;
985
986 /* the user wants counters back
987 the check on the size is done later, when we have the lock */
988 if (repl->num_counters) {
989 unsigned long size = repl->num_counters * sizeof(*counterstmp);
990 counterstmp = vmalloc(size);
991 if (!counterstmp)
992 return -ENOMEM;
993 }
994
995 newinfo->chainstack = NULL;
996 ret = ebt_verify_pointers(repl, newinfo);
997 if (ret != 0)
998 goto free_counterstmp;
999
1000 ret = translate_table(net, repl->name, newinfo);
1001
1002 if (ret != 0)
1003 goto free_counterstmp;
1004
1005 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1006 if (!t) {
1007 ret = -ENOENT;
1008 goto free_iterate;
1009 }
1010
1011 /* the table doesn't like it */
1012 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1013 goto free_unlock;
1014
1015 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1016 BUGPRINT("Wrong nr. of counters requested\n");
1017 ret = -EINVAL;
1018 goto free_unlock;
1019 }
1020
1021 /* we have the mutex lock, so no danger in reading this pointer */
1022 table = t->private;
1023 /* make sure the table can only be rmmod'ed if it contains no rules */
1024 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1025 ret = -ENOENT;
1026 goto free_unlock;
1027 } else if (table->nentries && !newinfo->nentries)
1028 module_put(t->me);
1029 /* we need an atomic snapshot of the counters */
1030 write_lock_bh(&t->lock);
1031 if (repl->num_counters)
1032 get_counters(t->private->counters, counterstmp,
1033 t->private->nentries);
1034
1035 t->private = newinfo;
1036 write_unlock_bh(&t->lock);
1037 mutex_unlock(&ebt_mutex);
1038 /* so, a user can change the chains while having messed up her counter
1039 allocation. Only reason why this is done is because this way the lock
1040 is held only once, while this doesn't bring the kernel into a
1041 dangerous state. */
1042 if (repl->num_counters &&
1043 copy_to_user(repl->counters, counterstmp,
1044 repl->num_counters * sizeof(struct ebt_counter))) {
1045 /* Silent error, can't fail, new table is already in place */
1046 net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1047 }
1048
1049 /* decrease module count and free resources */
1050 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1051 ebt_cleanup_entry, net, NULL);
1052
1053 vfree(table->entries);
1054 if (table->chainstack) {
1055 for_each_possible_cpu(i)
1056 vfree(table->chainstack[i]);
1057 vfree(table->chainstack);
1058 }
1059 vfree(table);
1060
1061 vfree(counterstmp);
1062
1063 #ifdef CONFIG_AUDIT
1064 if (audit_enabled) {
1065 struct audit_buffer *ab;
1066
1067 ab = audit_log_start(current->audit_context, GFP_KERNEL,
1068 AUDIT_NETFILTER_CFG);
1069 if (ab) {
1070 audit_log_format(ab, "table=%s family=%u entries=%u",
1071 repl->name, AF_BRIDGE, repl->nentries);
1072 audit_log_end(ab);
1073 }
1074 }
1075 #endif
1076 return ret;
1077
1078 free_unlock:
1079 mutex_unlock(&ebt_mutex);
1080 free_iterate:
1081 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1082 ebt_cleanup_entry, net, NULL);
1083 free_counterstmp:
1084 vfree(counterstmp);
1085 /* can be initialized in translate_table() */
1086 if (newinfo->chainstack) {
1087 for_each_possible_cpu(i)
1088 vfree(newinfo->chainstack[i]);
1089 vfree(newinfo->chainstack);
1090 }
1091 return ret;
1092 }
1093
1094 /* replace the table */
1095 static int do_replace(struct net *net, const void __user *user,
1096 unsigned int len)
1097 {
1098 int ret, countersize;
1099 struct ebt_table_info *newinfo;
1100 struct ebt_replace tmp;
1101
1102 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1103 return -EFAULT;
1104
1105 if (len != sizeof(tmp) + tmp.entries_size) {
1106 BUGPRINT("Wrong len argument\n");
1107 return -EINVAL;
1108 }
1109
1110 if (tmp.entries_size == 0) {
1111 BUGPRINT("Entries_size never zero\n");
1112 return -EINVAL;
1113 }
1114 /* overflow check */
1115 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1116 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1117 return -ENOMEM;
1118 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1119 return -ENOMEM;
1120 if (tmp.num_counters == 0)
1121 return -EINVAL;
1122
1123 tmp.name[sizeof(tmp.name) - 1] = 0;
1124
1125 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1126 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1127 if (!newinfo)
1128 return -ENOMEM;
1129
1130 if (countersize)
1131 memset(newinfo->counters, 0, countersize);
1132
1133 newinfo->entries = vmalloc(tmp.entries_size);
1134 if (!newinfo->entries) {
1135 ret = -ENOMEM;
1136 goto free_newinfo;
1137 }
1138 if (copy_from_user(
1139 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1140 BUGPRINT("Couldn't copy entries from userspace\n");
1141 ret = -EFAULT;
1142 goto free_entries;
1143 }
1144
1145 ret = do_replace_finish(net, &tmp, newinfo);
1146 if (ret == 0)
1147 return ret;
1148 free_entries:
1149 vfree(newinfo->entries);
1150 free_newinfo:
1151 vfree(newinfo);
1152 return ret;
1153 }
1154
1155 struct ebt_table *
1156 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1157 {
1158 struct ebt_table_info *newinfo;
1159 struct ebt_table *t, *table;
1160 struct ebt_replace_kernel *repl;
1161 int ret, i, countersize;
1162 void *p;
1163
1164 if (input_table == NULL || (repl = input_table->table) == NULL ||
1165 repl->entries == NULL || repl->entries_size == 0 ||
1166 repl->counters != NULL || input_table->private != NULL) {
1167 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1168 return ERR_PTR(-EINVAL);
1169 }
1170
1171 /* Don't add one table to multiple lists. */
1172 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1173 if (!table) {
1174 ret = -ENOMEM;
1175 goto out;
1176 }
1177
1178 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1179 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1180 ret = -ENOMEM;
1181 if (!newinfo)
1182 goto free_table;
1183
1184 p = vmalloc(repl->entries_size);
1185 if (!p)
1186 goto free_newinfo;
1187
1188 memcpy(p, repl->entries, repl->entries_size);
1189 newinfo->entries = p;
1190
1191 newinfo->entries_size = repl->entries_size;
1192 newinfo->nentries = repl->nentries;
1193
1194 if (countersize)
1195 memset(newinfo->counters, 0, countersize);
1196
1197 /* fill in newinfo and parse the entries */
1198 newinfo->chainstack = NULL;
1199 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1200 if ((repl->valid_hooks & (1 << i)) == 0)
1201 newinfo->hook_entry[i] = NULL;
1202 else
1203 newinfo->hook_entry[i] = p +
1204 ((char *)repl->hook_entry[i] - repl->entries);
1205 }
1206 ret = translate_table(net, repl->name, newinfo);
1207 if (ret != 0) {
1208 BUGPRINT("Translate_table failed\n");
1209 goto free_chainstack;
1210 }
1211
1212 if (table->check && table->check(newinfo, table->valid_hooks)) {
1213 BUGPRINT("The table doesn't like its own initial data, lol\n");
1214 ret = -EINVAL;
1215 goto free_chainstack;
1216 }
1217
1218 table->private = newinfo;
1219 rwlock_init(&table->lock);
1220 mutex_lock(&ebt_mutex);
1221 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1222 if (strcmp(t->name, table->name) == 0) {
1223 ret = -EEXIST;
1224 BUGPRINT("Table name already exists\n");
1225 goto free_unlock;
1226 }
1227 }
1228
1229 /* Hold a reference count if the chains aren't empty */
1230 if (newinfo->nentries && !try_module_get(table->me)) {
1231 ret = -ENOENT;
1232 goto free_unlock;
1233 }
1234 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1235 mutex_unlock(&ebt_mutex);
1236 return table;
1237 free_unlock:
1238 mutex_unlock(&ebt_mutex);
1239 free_chainstack:
1240 if (newinfo->chainstack) {
1241 for_each_possible_cpu(i)
1242 vfree(newinfo->chainstack[i]);
1243 vfree(newinfo->chainstack);
1244 }
1245 vfree(newinfo->entries);
1246 free_newinfo:
1247 vfree(newinfo);
1248 free_table:
1249 kfree(table);
1250 out:
1251 return ERR_PTR(ret);
1252 }
1253
1254 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1255 {
1256 int i;
1257
1258 if (!table) {
1259 BUGPRINT("Request to unregister NULL table!!!\n");
1260 return;
1261 }
1262 mutex_lock(&ebt_mutex);
1263 list_del(&table->list);
1264 mutex_unlock(&ebt_mutex);
1265 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1266 ebt_cleanup_entry, net, NULL);
1267 if (table->private->nentries)
1268 module_put(table->me);
1269 vfree(table->private->entries);
1270 if (table->private->chainstack) {
1271 for_each_possible_cpu(i)
1272 vfree(table->private->chainstack[i]);
1273 vfree(table->private->chainstack);
1274 }
1275 vfree(table->private);
1276 kfree(table);
1277 }
1278
1279 /* userspace just supplied us with counters */
1280 static int do_update_counters(struct net *net, const char *name,
1281 struct ebt_counter __user *counters,
1282 unsigned int num_counters,
1283 const void __user *user, unsigned int len)
1284 {
1285 int i, ret;
1286 struct ebt_counter *tmp;
1287 struct ebt_table *t;
1288
1289 if (num_counters == 0)
1290 return -EINVAL;
1291
1292 tmp = vmalloc(num_counters * sizeof(*tmp));
1293 if (!tmp)
1294 return -ENOMEM;
1295
1296 t = find_table_lock(net, name, &ret, &ebt_mutex);
1297 if (!t)
1298 goto free_tmp;
1299
1300 if (num_counters != t->private->nentries) {
1301 BUGPRINT("Wrong nr of counters\n");
1302 ret = -EINVAL;
1303 goto unlock_mutex;
1304 }
1305
1306 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1307 ret = -EFAULT;
1308 goto unlock_mutex;
1309 }
1310
1311 /* we want an atomic add of the counters */
1312 write_lock_bh(&t->lock);
1313
1314 /* we add to the counters of the first cpu */
1315 for (i = 0; i < num_counters; i++) {
1316 t->private->counters[i].pcnt += tmp[i].pcnt;
1317 t->private->counters[i].bcnt += tmp[i].bcnt;
1318 }
1319
1320 write_unlock_bh(&t->lock);
1321 ret = 0;
1322 unlock_mutex:
1323 mutex_unlock(&ebt_mutex);
1324 free_tmp:
1325 vfree(tmp);
1326 return ret;
1327 }
1328
1329 static int update_counters(struct net *net, const void __user *user,
1330 unsigned int len)
1331 {
1332 struct ebt_replace hlp;
1333
1334 if (copy_from_user(&hlp, user, sizeof(hlp)))
1335 return -EFAULT;
1336
1337 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1338 return -EINVAL;
1339
1340 return do_update_counters(net, hlp.name, hlp.counters,
1341 hlp.num_counters, user, len);
1342 }
1343
1344 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1345 const char *base, char __user *ubase)
1346 {
1347 char __user *hlp = ubase + ((char *)m - base);
1348 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1349
1350 /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1351 long. Copy 29 bytes and fill remaining bytes with zeroes. */
1352 strlcpy(name, m->u.match->name, sizeof(name));
1353 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1354 return -EFAULT;
1355 return 0;
1356 }
1357
1358 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1359 const char *base, char __user *ubase)
1360 {
1361 char __user *hlp = ubase + ((char *)w - base);
1362 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1363
1364 strlcpy(name, w->u.watcher->name, sizeof(name));
1365 if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1366 return -EFAULT;
1367 return 0;
1368 }
1369
1370 static inline int
1371 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1372 {
1373 int ret;
1374 char __user *hlp;
1375 const struct ebt_entry_target *t;
1376 char name[EBT_FUNCTION_MAXNAMELEN] = {};
1377
1378 if (e->bitmask == 0)
1379 return 0;
1380
1381 hlp = ubase + (((char *)e + e->target_offset) - base);
1382 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1383
1384 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1385 if (ret != 0)
1386 return ret;
1387 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1388 if (ret != 0)
1389 return ret;
1390 strlcpy(name, t->u.target->name, sizeof(name));
1391 if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1392 return -EFAULT;
1393 return 0;
1394 }
1395
1396 static int copy_counters_to_user(struct ebt_table *t,
1397 const struct ebt_counter *oldcounters,
1398 void __user *user, unsigned int num_counters,
1399 unsigned int nentries)
1400 {
1401 struct ebt_counter *counterstmp;
1402 int ret = 0;
1403
1404 /* userspace might not need the counters */
1405 if (num_counters == 0)
1406 return 0;
1407
1408 if (num_counters != nentries) {
1409 BUGPRINT("Num_counters wrong\n");
1410 return -EINVAL;
1411 }
1412
1413 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1414 if (!counterstmp)
1415 return -ENOMEM;
1416
1417 write_lock_bh(&t->lock);
1418 get_counters(oldcounters, counterstmp, nentries);
1419 write_unlock_bh(&t->lock);
1420
1421 if (copy_to_user(user, counterstmp,
1422 nentries * sizeof(struct ebt_counter)))
1423 ret = -EFAULT;
1424 vfree(counterstmp);
1425 return ret;
1426 }
1427
1428 /* called with ebt_mutex locked */
1429 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1430 const int *len, int cmd)
1431 {
1432 struct ebt_replace tmp;
1433 const struct ebt_counter *oldcounters;
1434 unsigned int entries_size, nentries;
1435 int ret;
1436 char *entries;
1437
1438 if (cmd == EBT_SO_GET_ENTRIES) {
1439 entries_size = t->private->entries_size;
1440 nentries = t->private->nentries;
1441 entries = t->private->entries;
1442 oldcounters = t->private->counters;
1443 } else {
1444 entries_size = t->table->entries_size;
1445 nentries = t->table->nentries;
1446 entries = t->table->entries;
1447 oldcounters = t->table->counters;
1448 }
1449
1450 if (copy_from_user(&tmp, user, sizeof(tmp)))
1451 return -EFAULT;
1452
1453 if (*len != sizeof(struct ebt_replace) + entries_size +
1454 (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1455 return -EINVAL;
1456
1457 if (tmp.nentries != nentries) {
1458 BUGPRINT("Nentries wrong\n");
1459 return -EINVAL;
1460 }
1461
1462 if (tmp.entries_size != entries_size) {
1463 BUGPRINT("Wrong size\n");
1464 return -EINVAL;
1465 }
1466
1467 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1468 tmp.num_counters, nentries);
1469 if (ret)
1470 return ret;
1471
1472 if (copy_to_user(tmp.entries, entries, entries_size)) {
1473 BUGPRINT("Couldn't copy entries to userspace\n");
1474 return -EFAULT;
1475 }
1476 /* set the match/watcher/target names right */
1477 return EBT_ENTRY_ITERATE(entries, entries_size,
1478 ebt_make_names, entries, tmp.entries);
1479 }
1480
1481 static int do_ebt_set_ctl(struct sock *sk,
1482 int cmd, void __user *user, unsigned int len)
1483 {
1484 int ret;
1485 struct net *net = sock_net(sk);
1486
1487 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1488 return -EPERM;
1489
1490 switch (cmd) {
1491 case EBT_SO_SET_ENTRIES:
1492 ret = do_replace(net, user, len);
1493 break;
1494 case EBT_SO_SET_COUNTERS:
1495 ret = update_counters(net, user, len);
1496 break;
1497 default:
1498 ret = -EINVAL;
1499 }
1500 return ret;
1501 }
1502
1503 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1504 {
1505 int ret;
1506 struct ebt_replace tmp;
1507 struct ebt_table *t;
1508 struct net *net = sock_net(sk);
1509
1510 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1511 return -EPERM;
1512
1513 if (copy_from_user(&tmp, user, sizeof(tmp)))
1514 return -EFAULT;
1515
1516 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1517 if (!t)
1518 return ret;
1519
1520 switch (cmd) {
1521 case EBT_SO_GET_INFO:
1522 case EBT_SO_GET_INIT_INFO:
1523 if (*len != sizeof(struct ebt_replace)) {
1524 ret = -EINVAL;
1525 mutex_unlock(&ebt_mutex);
1526 break;
1527 }
1528 if (cmd == EBT_SO_GET_INFO) {
1529 tmp.nentries = t->private->nentries;
1530 tmp.entries_size = t->private->entries_size;
1531 tmp.valid_hooks = t->valid_hooks;
1532 } else {
1533 tmp.nentries = t->table->nentries;
1534 tmp.entries_size = t->table->entries_size;
1535 tmp.valid_hooks = t->table->valid_hooks;
1536 }
1537 mutex_unlock(&ebt_mutex);
1538 if (copy_to_user(user, &tmp, *len) != 0) {
1539 BUGPRINT("c2u Didn't work\n");
1540 ret = -EFAULT;
1541 break;
1542 }
1543 ret = 0;
1544 break;
1545
1546 case EBT_SO_GET_ENTRIES:
1547 case EBT_SO_GET_INIT_ENTRIES:
1548 ret = copy_everything_to_user(t, user, len, cmd);
1549 mutex_unlock(&ebt_mutex);
1550 break;
1551
1552 default:
1553 mutex_unlock(&ebt_mutex);
1554 ret = -EINVAL;
1555 }
1556
1557 return ret;
1558 }
1559
1560 #ifdef CONFIG_COMPAT
1561 /* 32 bit-userspace compatibility definitions. */
1562 struct compat_ebt_replace {
1563 char name[EBT_TABLE_MAXNAMELEN];
1564 compat_uint_t valid_hooks;
1565 compat_uint_t nentries;
1566 compat_uint_t entries_size;
1567 /* start of the chains */
1568 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1569 /* nr of counters userspace expects back */
1570 compat_uint_t num_counters;
1571 /* where the kernel will put the old counters. */
1572 compat_uptr_t counters;
1573 compat_uptr_t entries;
1574 };
1575
1576 /* struct ebt_entry_match, _target and _watcher have same layout */
1577 struct compat_ebt_entry_mwt {
1578 union {
1579 char name[EBT_FUNCTION_MAXNAMELEN];
1580 compat_uptr_t ptr;
1581 } u;
1582 compat_uint_t match_size;
1583 compat_uint_t data[0];
1584 };
1585
1586 /* account for possible padding between match_size and ->data */
1587 static int ebt_compat_entry_padsize(void)
1588 {
1589 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1590 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1591 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1592 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1593 }
1594
1595 static int ebt_compat_match_offset(const struct xt_match *match,
1596 unsigned int userlen)
1597 {
1598 /*
1599 * ebt_among needs special handling. The kernel .matchsize is
1600 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1601 * value is expected.
1602 * Example: userspace sends 4500, ebt_among.c wants 4504.
1603 */
1604 if (unlikely(match->matchsize == -1))
1605 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1606 return xt_compat_match_offset(match);
1607 }
1608
1609 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1610 unsigned int *size)
1611 {
1612 const struct xt_match *match = m->u.match;
1613 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1614 int off = ebt_compat_match_offset(match, m->match_size);
1615 compat_uint_t msize = m->match_size - off;
1616
1617 BUG_ON(off >= m->match_size);
1618
1619 if (copy_to_user(cm->u.name, match->name,
1620 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1621 return -EFAULT;
1622
1623 if (match->compat_to_user) {
1624 if (match->compat_to_user(cm->data, m->data))
1625 return -EFAULT;
1626 } else if (copy_to_user(cm->data, m->data, msize))
1627 return -EFAULT;
1628
1629 *size -= ebt_compat_entry_padsize() + off;
1630 *dstptr = cm->data;
1631 *dstptr += msize;
1632 return 0;
1633 }
1634
1635 static int compat_target_to_user(struct ebt_entry_target *t,
1636 void __user **dstptr,
1637 unsigned int *size)
1638 {
1639 const struct xt_target *target = t->u.target;
1640 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1641 int off = xt_compat_target_offset(target);
1642 compat_uint_t tsize = t->target_size - off;
1643
1644 BUG_ON(off >= t->target_size);
1645
1646 if (copy_to_user(cm->u.name, target->name,
1647 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1648 return -EFAULT;
1649
1650 if (target->compat_to_user) {
1651 if (target->compat_to_user(cm->data, t->data))
1652 return -EFAULT;
1653 } else if (copy_to_user(cm->data, t->data, tsize))
1654 return -EFAULT;
1655
1656 *size -= ebt_compat_entry_padsize() + off;
1657 *dstptr = cm->data;
1658 *dstptr += tsize;
1659 return 0;
1660 }
1661
1662 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1663 void __user **dstptr,
1664 unsigned int *size)
1665 {
1666 return compat_target_to_user((struct ebt_entry_target *)w,
1667 dstptr, size);
1668 }
1669
1670 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1671 unsigned int *size)
1672 {
1673 struct ebt_entry_target *t;
1674 struct ebt_entry __user *ce;
1675 u32 watchers_offset, target_offset, next_offset;
1676 compat_uint_t origsize;
1677 int ret;
1678
1679 if (e->bitmask == 0) {
1680 if (*size < sizeof(struct ebt_entries))
1681 return -EINVAL;
1682 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1683 return -EFAULT;
1684
1685 *dstptr += sizeof(struct ebt_entries);
1686 *size -= sizeof(struct ebt_entries);
1687 return 0;
1688 }
1689
1690 if (*size < sizeof(*ce))
1691 return -EINVAL;
1692
1693 ce = (struct ebt_entry __user *)*dstptr;
1694 if (copy_to_user(ce, e, sizeof(*ce)))
1695 return -EFAULT;
1696
1697 origsize = *size;
1698 *dstptr += sizeof(*ce);
1699
1700 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1701 if (ret)
1702 return ret;
1703 watchers_offset = e->watchers_offset - (origsize - *size);
1704
1705 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1706 if (ret)
1707 return ret;
1708 target_offset = e->target_offset - (origsize - *size);
1709
1710 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1711
1712 ret = compat_target_to_user(t, dstptr, size);
1713 if (ret)
1714 return ret;
1715 next_offset = e->next_offset - (origsize - *size);
1716
1717 if (put_user(watchers_offset, &ce->watchers_offset) ||
1718 put_user(target_offset, &ce->target_offset) ||
1719 put_user(next_offset, &ce->next_offset))
1720 return -EFAULT;
1721
1722 *size -= sizeof(*ce);
1723 return 0;
1724 }
1725
1726 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1727 {
1728 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1729 *off += ebt_compat_entry_padsize();
1730 return 0;
1731 }
1732
1733 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1734 {
1735 *off += xt_compat_target_offset(w->u.watcher);
1736 *off += ebt_compat_entry_padsize();
1737 return 0;
1738 }
1739
1740 static int compat_calc_entry(const struct ebt_entry *e,
1741 const struct ebt_table_info *info,
1742 const void *base,
1743 struct compat_ebt_replace *newinfo)
1744 {
1745 const struct ebt_entry_target *t;
1746 unsigned int entry_offset;
1747 int off, ret, i;
1748
1749 if (e->bitmask == 0)
1750 return 0;
1751
1752 off = 0;
1753 entry_offset = (void *)e - base;
1754
1755 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1756 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1757
1758 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1759
1760 off += xt_compat_target_offset(t->u.target);
1761 off += ebt_compat_entry_padsize();
1762
1763 newinfo->entries_size -= off;
1764
1765 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1766 if (ret)
1767 return ret;
1768
1769 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1770 const void *hookptr = info->hook_entry[i];
1771 if (info->hook_entry[i] &&
1772 (e < (struct ebt_entry *)(base - hookptr))) {
1773 newinfo->hook_entry[i] -= off;
1774 pr_debug("0x%08X -> 0x%08X\n",
1775 newinfo->hook_entry[i] + off,
1776 newinfo->hook_entry[i]);
1777 }
1778 }
1779
1780 return 0;
1781 }
1782
1783
1784 static int compat_table_info(const struct ebt_table_info *info,
1785 struct compat_ebt_replace *newinfo)
1786 {
1787 unsigned int size = info->entries_size;
1788 const void *entries = info->entries;
1789
1790 newinfo->entries_size = size;
1791
1792 xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1793 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1794 entries, newinfo);
1795 }
1796
1797 static int compat_copy_everything_to_user(struct ebt_table *t,
1798 void __user *user, int *len, int cmd)
1799 {
1800 struct compat_ebt_replace repl, tmp;
1801 struct ebt_counter *oldcounters;
1802 struct ebt_table_info tinfo;
1803 int ret;
1804 void __user *pos;
1805
1806 memset(&tinfo, 0, sizeof(tinfo));
1807
1808 if (cmd == EBT_SO_GET_ENTRIES) {
1809 tinfo.entries_size = t->private->entries_size;
1810 tinfo.nentries = t->private->nentries;
1811 tinfo.entries = t->private->entries;
1812 oldcounters = t->private->counters;
1813 } else {
1814 tinfo.entries_size = t->table->entries_size;
1815 tinfo.nentries = t->table->nentries;
1816 tinfo.entries = t->table->entries;
1817 oldcounters = t->table->counters;
1818 }
1819
1820 if (copy_from_user(&tmp, user, sizeof(tmp)))
1821 return -EFAULT;
1822
1823 if (tmp.nentries != tinfo.nentries ||
1824 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1825 return -EINVAL;
1826
1827 memcpy(&repl, &tmp, sizeof(repl));
1828 if (cmd == EBT_SO_GET_ENTRIES)
1829 ret = compat_table_info(t->private, &repl);
1830 else
1831 ret = compat_table_info(&tinfo, &repl);
1832 if (ret)
1833 return ret;
1834
1835 if (*len != sizeof(tmp) + repl.entries_size +
1836 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1837 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1838 *len, tinfo.entries_size, repl.entries_size);
1839 return -EINVAL;
1840 }
1841
1842 /* userspace might not need the counters */
1843 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1844 tmp.num_counters, tinfo.nentries);
1845 if (ret)
1846 return ret;
1847
1848 pos = compat_ptr(tmp.entries);
1849 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1850 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1851 }
1852
1853 struct ebt_entries_buf_state {
1854 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1855 u32 buf_kern_len; /* total size of kernel buffer */
1856 u32 buf_kern_offset; /* amount of data copied so far */
1857 u32 buf_user_offset; /* read position in userspace buffer */
1858 };
1859
1860 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1861 {
1862 state->buf_kern_offset += sz;
1863 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1864 }
1865
1866 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1867 void *data, unsigned int sz)
1868 {
1869 if (state->buf_kern_start == NULL)
1870 goto count_only;
1871
1872 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1873
1874 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1875
1876 count_only:
1877 state->buf_user_offset += sz;
1878 return ebt_buf_count(state, sz);
1879 }
1880
1881 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1882 {
1883 char *b = state->buf_kern_start;
1884
1885 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1886
1887 if (b != NULL && sz > 0)
1888 memset(b + state->buf_kern_offset, 0, sz);
1889 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1890 return ebt_buf_count(state, sz);
1891 }
1892
1893 enum compat_mwt {
1894 EBT_COMPAT_MATCH,
1895 EBT_COMPAT_WATCHER,
1896 EBT_COMPAT_TARGET,
1897 };
1898
1899 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1900 enum compat_mwt compat_mwt,
1901 struct ebt_entries_buf_state *state,
1902 const unsigned char *base)
1903 {
1904 char name[EBT_FUNCTION_MAXNAMELEN];
1905 struct xt_match *match;
1906 struct xt_target *wt;
1907 void *dst = NULL;
1908 int off, pad = 0;
1909 unsigned int size_kern, match_size = mwt->match_size;
1910
1911 strlcpy(name, mwt->u.name, sizeof(name));
1912
1913 if (state->buf_kern_start)
1914 dst = state->buf_kern_start + state->buf_kern_offset;
1915
1916 switch (compat_mwt) {
1917 case EBT_COMPAT_MATCH:
1918 match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1919 if (IS_ERR(match))
1920 return PTR_ERR(match);
1921
1922 off = ebt_compat_match_offset(match, match_size);
1923 if (dst) {
1924 if (match->compat_from_user)
1925 match->compat_from_user(dst, mwt->data);
1926 else
1927 memcpy(dst, mwt->data, match_size);
1928 }
1929
1930 size_kern = match->matchsize;
1931 if (unlikely(size_kern == -1))
1932 size_kern = match_size;
1933 module_put(match->me);
1934 break;
1935 case EBT_COMPAT_WATCHER: /* fallthrough */
1936 case EBT_COMPAT_TARGET:
1937 wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1938 if (IS_ERR(wt))
1939 return PTR_ERR(wt);
1940 off = xt_compat_target_offset(wt);
1941
1942 if (dst) {
1943 if (wt->compat_from_user)
1944 wt->compat_from_user(dst, mwt->data);
1945 else
1946 memcpy(dst, mwt->data, match_size);
1947 }
1948
1949 size_kern = wt->targetsize;
1950 module_put(wt->me);
1951 break;
1952
1953 default:
1954 return -EINVAL;
1955 }
1956
1957 state->buf_kern_offset += match_size + off;
1958 state->buf_user_offset += match_size;
1959 pad = XT_ALIGN(size_kern) - size_kern;
1960
1961 if (pad > 0 && dst) {
1962 BUG_ON(state->buf_kern_len <= pad);
1963 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1964 memset(dst + size_kern, 0, pad);
1965 }
1966 return off + match_size;
1967 }
1968
1969 /*
1970 * return size of all matches, watchers or target, including necessary
1971 * alignment and padding.
1972 */
1973 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1974 unsigned int size_left, enum compat_mwt type,
1975 struct ebt_entries_buf_state *state, const void *base)
1976 {
1977 int growth = 0;
1978 char *buf;
1979
1980 if (size_left == 0)
1981 return 0;
1982
1983 buf = (char *) match32;
1984
1985 while (size_left >= sizeof(*match32)) {
1986 struct ebt_entry_match *match_kern;
1987 int ret;
1988
1989 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1990 if (match_kern) {
1991 char *tmp;
1992 tmp = state->buf_kern_start + state->buf_kern_offset;
1993 match_kern = (struct ebt_entry_match *) tmp;
1994 }
1995 ret = ebt_buf_add(state, buf, sizeof(*match32));
1996 if (ret < 0)
1997 return ret;
1998 size_left -= sizeof(*match32);
1999
2000 /* add padding before match->data (if any) */
2001 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2002 if (ret < 0)
2003 return ret;
2004
2005 if (match32->match_size > size_left)
2006 return -EINVAL;
2007
2008 size_left -= match32->match_size;
2009
2010 ret = compat_mtw_from_user(match32, type, state, base);
2011 if (ret < 0)
2012 return ret;
2013
2014 BUG_ON(ret < match32->match_size);
2015 growth += ret - match32->match_size;
2016 growth += ebt_compat_entry_padsize();
2017
2018 buf += sizeof(*match32);
2019 buf += match32->match_size;
2020
2021 if (match_kern)
2022 match_kern->match_size = ret;
2023
2024 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2025 match32 = (struct compat_ebt_entry_mwt *) buf;
2026 }
2027
2028 return growth;
2029 }
2030
2031 /* called for all ebt_entry structures. */
2032 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2033 unsigned int *total,
2034 struct ebt_entries_buf_state *state)
2035 {
2036 unsigned int i, j, startoff, new_offset = 0;
2037 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2038 unsigned int offsets[4];
2039 unsigned int *offsets_update = NULL;
2040 int ret;
2041 char *buf_start;
2042
2043 if (*total < sizeof(struct ebt_entries))
2044 return -EINVAL;
2045
2046 if (!entry->bitmask) {
2047 *total -= sizeof(struct ebt_entries);
2048 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2049 }
2050 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2051 return -EINVAL;
2052
2053 startoff = state->buf_user_offset;
2054 /* pull in most part of ebt_entry, it does not need to be changed. */
2055 ret = ebt_buf_add(state, entry,
2056 offsetof(struct ebt_entry, watchers_offset));
2057 if (ret < 0)
2058 return ret;
2059
2060 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2061 memcpy(&offsets[1], &entry->watchers_offset,
2062 sizeof(offsets) - sizeof(offsets[0]));
2063
2064 if (state->buf_kern_start) {
2065 buf_start = state->buf_kern_start + state->buf_kern_offset;
2066 offsets_update = (unsigned int *) buf_start;
2067 }
2068 ret = ebt_buf_add(state, &offsets[1],
2069 sizeof(offsets) - sizeof(offsets[0]));
2070 if (ret < 0)
2071 return ret;
2072 buf_start = (char *) entry;
2073 /*
2074 * 0: matches offset, always follows ebt_entry.
2075 * 1: watchers offset, from ebt_entry structure
2076 * 2: target offset, from ebt_entry structure
2077 * 3: next ebt_entry offset, from ebt_entry structure
2078 *
2079 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2080 */
2081 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2082 struct compat_ebt_entry_mwt *match32;
2083 unsigned int size;
2084 char *buf = buf_start;
2085
2086 buf = buf_start + offsets[i];
2087 if (offsets[i] > offsets[j])
2088 return -EINVAL;
2089
2090 match32 = (struct compat_ebt_entry_mwt *) buf;
2091 size = offsets[j] - offsets[i];
2092 ret = ebt_size_mwt(match32, size, i, state, base);
2093 if (ret < 0)
2094 return ret;
2095 new_offset += ret;
2096 if (offsets_update && new_offset) {
2097 pr_debug("change offset %d to %d\n",
2098 offsets_update[i], offsets[j] + new_offset);
2099 offsets_update[i] = offsets[j] + new_offset;
2100 }
2101 }
2102
2103 if (state->buf_kern_start == NULL) {
2104 unsigned int offset = buf_start - (char *) base;
2105
2106 ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2107 if (ret < 0)
2108 return ret;
2109 }
2110
2111 startoff = state->buf_user_offset - startoff;
2112
2113 BUG_ON(*total < startoff);
2114 *total -= startoff;
2115 return 0;
2116 }
2117
2118 /*
2119 * repl->entries_size is the size of the ebt_entry blob in userspace.
2120 * It might need more memory when copied to a 64 bit kernel in case
2121 * userspace is 32-bit. So, first task: find out how much memory is needed.
2122 *
2123 * Called before validation is performed.
2124 */
2125 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2126 struct ebt_entries_buf_state *state)
2127 {
2128 unsigned int size_remaining = size_user;
2129 int ret;
2130
2131 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2132 &size_remaining, state);
2133 if (ret < 0)
2134 return ret;
2135
2136 WARN_ON(size_remaining);
2137 return state->buf_kern_offset;
2138 }
2139
2140
2141 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2142 void __user *user, unsigned int len)
2143 {
2144 struct compat_ebt_replace tmp;
2145 int i;
2146
2147 if (len < sizeof(tmp))
2148 return -EINVAL;
2149
2150 if (copy_from_user(&tmp, user, sizeof(tmp)))
2151 return -EFAULT;
2152
2153 if (len != sizeof(tmp) + tmp.entries_size)
2154 return -EINVAL;
2155
2156 if (tmp.entries_size == 0)
2157 return -EINVAL;
2158
2159 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2160 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2161 return -ENOMEM;
2162 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2163 return -ENOMEM;
2164 if (tmp.num_counters == 0)
2165 return -EINVAL;
2166
2167 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2168
2169 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2170 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2171 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2172
2173 repl->num_counters = tmp.num_counters;
2174 repl->counters = compat_ptr(tmp.counters);
2175 repl->entries = compat_ptr(tmp.entries);
2176 return 0;
2177 }
2178
2179 static int compat_do_replace(struct net *net, void __user *user,
2180 unsigned int len)
2181 {
2182 int ret, i, countersize, size64;
2183 struct ebt_table_info *newinfo;
2184 struct ebt_replace tmp;
2185 struct ebt_entries_buf_state state;
2186 void *entries_tmp;
2187
2188 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2189 if (ret) {
2190 /* try real handler in case userland supplied needed padding */
2191 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2192 ret = 0;
2193 return ret;
2194 }
2195
2196 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2197 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2198 if (!newinfo)
2199 return -ENOMEM;
2200
2201 if (countersize)
2202 memset(newinfo->counters, 0, countersize);
2203
2204 memset(&state, 0, sizeof(state));
2205
2206 newinfo->entries = vmalloc(tmp.entries_size);
2207 if (!newinfo->entries) {
2208 ret = -ENOMEM;
2209 goto free_newinfo;
2210 }
2211 if (copy_from_user(
2212 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2213 ret = -EFAULT;
2214 goto free_entries;
2215 }
2216
2217 entries_tmp = newinfo->entries;
2218
2219 xt_compat_lock(NFPROTO_BRIDGE);
2220
2221 xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2222 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2223 if (ret < 0)
2224 goto out_unlock;
2225
2226 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2227 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2228 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2229
2230 size64 = ret;
2231 newinfo->entries = vmalloc(size64);
2232 if (!newinfo->entries) {
2233 vfree(entries_tmp);
2234 ret = -ENOMEM;
2235 goto out_unlock;
2236 }
2237
2238 memset(&state, 0, sizeof(state));
2239 state.buf_kern_start = newinfo->entries;
2240 state.buf_kern_len = size64;
2241
2242 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2243 BUG_ON(ret < 0); /* parses same data again */
2244
2245 vfree(entries_tmp);
2246 tmp.entries_size = size64;
2247
2248 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2249 char __user *usrptr;
2250 if (tmp.hook_entry[i]) {
2251 unsigned int delta;
2252 usrptr = (char __user *) tmp.hook_entry[i];
2253 delta = usrptr - tmp.entries;
2254 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2255 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2256 }
2257 }
2258
2259 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2260 xt_compat_unlock(NFPROTO_BRIDGE);
2261
2262 ret = do_replace_finish(net, &tmp, newinfo);
2263 if (ret == 0)
2264 return ret;
2265 free_entries:
2266 vfree(newinfo->entries);
2267 free_newinfo:
2268 vfree(newinfo);
2269 return ret;
2270 out_unlock:
2271 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2272 xt_compat_unlock(NFPROTO_BRIDGE);
2273 goto free_entries;
2274 }
2275
2276 static int compat_update_counters(struct net *net, void __user *user,
2277 unsigned int len)
2278 {
2279 struct compat_ebt_replace hlp;
2280
2281 if (copy_from_user(&hlp, user, sizeof(hlp)))
2282 return -EFAULT;
2283
2284 /* try real handler in case userland supplied needed padding */
2285 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2286 return update_counters(net, user, len);
2287
2288 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2289 hlp.num_counters, user, len);
2290 }
2291
2292 static int compat_do_ebt_set_ctl(struct sock *sk,
2293 int cmd, void __user *user, unsigned int len)
2294 {
2295 int ret;
2296 struct net *net = sock_net(sk);
2297
2298 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2299 return -EPERM;
2300
2301 switch (cmd) {
2302 case EBT_SO_SET_ENTRIES:
2303 ret = compat_do_replace(net, user, len);
2304 break;
2305 case EBT_SO_SET_COUNTERS:
2306 ret = compat_update_counters(net, user, len);
2307 break;
2308 default:
2309 ret = -EINVAL;
2310 }
2311 return ret;
2312 }
2313
2314 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2315 void __user *user, int *len)
2316 {
2317 int ret;
2318 struct compat_ebt_replace tmp;
2319 struct ebt_table *t;
2320 struct net *net = sock_net(sk);
2321
2322 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2323 return -EPERM;
2324
2325 /* try real handler in case userland supplied needed padding */
2326 if ((cmd == EBT_SO_GET_INFO ||
2327 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2328 return do_ebt_get_ctl(sk, cmd, user, len);
2329
2330 if (copy_from_user(&tmp, user, sizeof(tmp)))
2331 return -EFAULT;
2332
2333 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2334 if (!t)
2335 return ret;
2336
2337 xt_compat_lock(NFPROTO_BRIDGE);
2338 switch (cmd) {
2339 case EBT_SO_GET_INFO:
2340 tmp.nentries = t->private->nentries;
2341 ret = compat_table_info(t->private, &tmp);
2342 if (ret)
2343 goto out;
2344 tmp.valid_hooks = t->valid_hooks;
2345
2346 if (copy_to_user(user, &tmp, *len) != 0) {
2347 ret = -EFAULT;
2348 break;
2349 }
2350 ret = 0;
2351 break;
2352 case EBT_SO_GET_INIT_INFO:
2353 tmp.nentries = t->table->nentries;
2354 tmp.entries_size = t->table->entries_size;
2355 tmp.valid_hooks = t->table->valid_hooks;
2356
2357 if (copy_to_user(user, &tmp, *len) != 0) {
2358 ret = -EFAULT;
2359 break;
2360 }
2361 ret = 0;
2362 break;
2363 case EBT_SO_GET_ENTRIES:
2364 case EBT_SO_GET_INIT_ENTRIES:
2365 /*
2366 * try real handler first in case of userland-side padding.
2367 * in case we are dealing with an 'ordinary' 32 bit binary
2368 * without 64bit compatibility padding, this will fail right
2369 * after copy_from_user when the *len argument is validated.
2370 *
2371 * the compat_ variant needs to do one pass over the kernel
2372 * data set to adjust for size differences before it the check.
2373 */
2374 if (copy_everything_to_user(t, user, len, cmd) == 0)
2375 ret = 0;
2376 else
2377 ret = compat_copy_everything_to_user(t, user, len, cmd);
2378 break;
2379 default:
2380 ret = -EINVAL;
2381 }
2382 out:
2383 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2384 xt_compat_unlock(NFPROTO_BRIDGE);
2385 mutex_unlock(&ebt_mutex);
2386 return ret;
2387 }
2388 #endif
2389
2390 static struct nf_sockopt_ops ebt_sockopts = {
2391 .pf = PF_INET,
2392 .set_optmin = EBT_BASE_CTL,
2393 .set_optmax = EBT_SO_SET_MAX + 1,
2394 .set = do_ebt_set_ctl,
2395 #ifdef CONFIG_COMPAT
2396 .compat_set = compat_do_ebt_set_ctl,
2397 #endif
2398 .get_optmin = EBT_BASE_CTL,
2399 .get_optmax = EBT_SO_GET_MAX + 1,
2400 .get = do_ebt_get_ctl,
2401 #ifdef CONFIG_COMPAT
2402 .compat_get = compat_do_ebt_get_ctl,
2403 #endif
2404 .owner = THIS_MODULE,
2405 };
2406
2407 static int __init ebtables_init(void)
2408 {
2409 int ret;
2410
2411 ret = xt_register_target(&ebt_standard_target);
2412 if (ret < 0)
2413 return ret;
2414 ret = nf_register_sockopt(&ebt_sockopts);
2415 if (ret < 0) {
2416 xt_unregister_target(&ebt_standard_target);
2417 return ret;
2418 }
2419
2420 printk(KERN_INFO "Ebtables v2.0 registered\n");
2421 return 0;
2422 }
2423
2424 static void __exit ebtables_fini(void)
2425 {
2426 nf_unregister_sockopt(&ebt_sockopts);
2427 xt_unregister_target(&ebt_standard_target);
2428 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2429 }
2430
2431 EXPORT_SYMBOL(ebt_register_table);
2432 EXPORT_SYMBOL(ebt_unregister_table);
2433 EXPORT_SYMBOL(ebt_do_table);
2434 module_init(ebtables_init);
2435 module_exit(ebtables_fini);
2436 MODULE_LICENSE("GPL");
This page took 0.081599 seconds and 5 git commands to generate.