[CRYPTO] dm-crypt: Make iv_gen_private a union
[deliverable/linux.git] / net / ipv4 / netfilter / ip_tables.c
CommitLineData
1da177e4
LT
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
2e4e6a17 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
13 * a table
2e4e6a17
HW
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
1da177e4 16 */
1da177e4 17#include <linux/cache.h>
4fc268d2 18#include <linux/capability.h>
1da177e4
LT
19#include <linux/skbuff.h>
20#include <linux/kmod.h>
21#include <linux/vmalloc.h>
22#include <linux/netdevice.h>
23#include <linux/module.h>
1da177e4
LT
24#include <linux/icmp.h>
25#include <net/ip.h>
2722971c 26#include <net/compat.h>
1da177e4 27#include <asm/uaccess.h>
57b47a53 28#include <linux/mutex.h>
1da177e4
LT
29#include <linux/proc_fs.h>
30#include <linux/err.h>
c8923c6b 31#include <linux/cpumask.h>
1da177e4 32
2e4e6a17 33#include <linux/netfilter/x_tables.h>
1da177e4
LT
34#include <linux/netfilter_ipv4/ip_tables.h>
35
36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38MODULE_DESCRIPTION("IPv4 packet filter");
39
40/*#define DEBUG_IP_FIREWALL*/
41/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42/*#define DEBUG_IP_FIREWALL_USER*/
43
44#ifdef DEBUG_IP_FIREWALL
45#define dprintf(format, args...) printk(format , ## args)
46#else
47#define dprintf(format, args...)
48#endif
49
50#ifdef DEBUG_IP_FIREWALL_USER
51#define duprintf(format, args...) printk(format , ## args)
52#else
53#define duprintf(format, args...)
54#endif
55
56#ifdef CONFIG_NETFILTER_DEBUG
57#define IP_NF_ASSERT(x) \
58do { \
59 if (!(x)) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
62} while(0)
63#else
64#define IP_NF_ASSERT(x)
65#endif
1da177e4
LT
66
67#if 0
68/* All the better to debug you with... */
69#define static
70#define inline
71#endif
72
73/*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
1da177e4
LT
80 Hence the start of any table is given by get_table() below. */
81
1da177e4
LT
82/* Returns whether matches rule or not. */
83static inline int
84ip_packet_match(const struct iphdr *ip,
85 const char *indev,
86 const char *outdev,
87 const struct ipt_ip *ipinfo,
88 int isfrag)
89{
90 size_t i;
91 unsigned long ret;
92
93#define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96 IPT_INV_SRCIP)
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98 IPT_INV_DSTIP)) {
99 dprintf("Source or dest mismatch.\n");
100
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ip->saddr),
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107 NIPQUAD(ip->daddr),
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111 return 0;
112 }
113
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
119 }
120
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125 return 0;
126 }
127
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
132 }
133
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138 return 0;
139 }
140
141 /* Check specific protocol */
142 if (ipinfo->proto
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147 return 0;
148 }
149
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155 return 0;
156 }
157
158 return 1;
159}
160
161static inline int
162ip_checkentry(const struct ipt_ip *ip)
163{
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
167 return 0;
168 }
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
172 return 0;
173 }
174 return 1;
175}
176
177static unsigned int
178ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
c4986734 182 const struct xt_target *target,
fe1cb108 183 const void *targinfo)
1da177e4
LT
184{
185 if (net_ratelimit())
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187
188 return NF_DROP;
189}
190
191static inline
192int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
196 int offset,
197 int *hotdrop)
198{
199 /* Stop iteration if it doesn't match */
1c524830
PM
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
1da177e4
LT
202 return 1;
203 else
204 return 0;
205}
206
207static inline struct ipt_entry *
208get_entry(void *base, unsigned int offset)
209{
210 return (struct ipt_entry *)(base + offset);
211}
212
213/* Returns one of the generic firewall policies, like NF_ACCEPT. */
214unsigned int
215ipt_do_table(struct sk_buff **pskb,
216 unsigned int hook,
217 const struct net_device *in,
218 const struct net_device *out,
fe1cb108 219 struct ipt_table *table)
1da177e4
LT
220{
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222 u_int16_t offset;
223 struct iphdr *ip;
224 u_int16_t datalen;
225 int hotdrop = 0;
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
229 void *table_base;
230 struct ipt_entry *e, *back;
8311731a 231 struct xt_table_info *private;
1da177e4
LT
232
233 /* Initialization */
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
243 * match it. */
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
245
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
8311731a 248 private = table->private;
2e4e6a17
HW
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
1da177e4
LT
251
252 /* For return from builtin chain */
2e4e6a17 253 back = get_entry(table_base, private->underflow[hook]);
1da177e4
LT
254
255 do {
256 IP_NF_ASSERT(e);
257 IP_NF_ASSERT(back);
1da177e4
LT
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
260
261 if (IPT_MATCH_ITERATE(e, do_match,
262 *pskb, in, out,
263 offset, &hotdrop) != 0)
264 goto no_match;
265
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
267
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
272 int v;
273
274 v = ((struct ipt_standard_target *)t)->verdict;
275 if (v < 0) {
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
279 break;
280 }
281 e = back;
282 back = get_entry(table_base,
283 back->comefrom);
284 continue;
285 }
05465343
PM
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
1da177e4
LT
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
291 next->comefrom
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
294 back = next;
295 }
296
297 e = get_entry(table_base, v);
298 } else {
299 /* Targets which reenter must return
300 abs. verdicts */
301#ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
303 = 0xeeeeeeec;
304#endif
305 verdict = t->u.kernel.target->target(pskb,
306 in, out,
307 hook,
1c524830 308 t->u.kernel.target,
fe1cb108 309 t->data);
1da177e4
LT
310
311#ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
313 != 0xeeeeeeec
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
317 verdict = NF_DROP;
318 }
319 ((struct ipt_entry *)table_base)->comefrom
320 = 0x57acc001;
321#endif
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
325
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
328 else
329 /* Verdict */
330 break;
331 }
332 } else {
333
334 no_match:
335 e = (void *)e + e->next_offset;
336 }
337 } while (!hotdrop);
338
1da177e4
LT
339 read_unlock_bh(&table->lock);
340
341#ifdef DEBUG_ALLOW_ALL
342 return NF_ACCEPT;
343#else
344 if (hotdrop)
345 return NF_DROP;
346 else return verdict;
347#endif
348}
349
1da177e4
LT
350/* All zeroes == unconditional rule. */
351static inline int
352unconditional(const struct ipt_ip *ip)
353{
354 unsigned int i;
355
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
358 return 0;
359
360 return 1;
361}
362
363/* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
365static int
2e4e6a17 366mark_source_chains(struct xt_table_info *newinfo,
31836064 367 unsigned int valid_hooks, void *entry0)
1da177e4
LT
368{
369 unsigned int hook;
370
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
375 struct ipt_entry *e
31836064 376 = (struct ipt_entry *)(entry0 + pos);
1da177e4
LT
377
378 if (!(valid_hooks & (1 << hook)))
379 continue;
380
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
383
384 for (;;) {
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
387
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
391 return 0;
392 }
393 e->comefrom
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
395
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
400 && t->verdict < 0
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
403
404 /* Return: backtrack through the last
405 big jump. */
406 do {
407 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408#ifdef DEBUG_IP_FIREWALL_USER
409 if (e->comefrom
410 & (1 << NF_IP_NUMHOOKS)) {
411 duprintf("Back unset "
412 "on hook %u "
413 "rule %u\n",
414 hook, pos);
415 }
416#endif
417 oldpos = pos;
418 pos = e->counters.pcnt;
419 e->counters.pcnt = 0;
420
421 /* We're at the start. */
422 if (pos == oldpos)
423 goto next;
424
425 e = (struct ipt_entry *)
31836064 426 (entry0 + pos);
1da177e4
LT
427 } while (oldpos == pos + e->next_offset);
428
429 /* Move along one */
430 size = e->next_offset;
431 e = (struct ipt_entry *)
31836064 432 (entry0 + pos + size);
1da177e4
LT
433 e->counters.pcnt = pos;
434 pos += size;
435 } else {
436 int newpos = t->verdict;
437
438 if (strcmp(t->target.u.user.name,
439 IPT_STANDARD_TARGET) == 0
440 && newpos >= 0) {
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
443 pos, newpos);
444 } else {
445 /* ... this is a fallthru */
446 newpos = pos + e->next_offset;
447 }
448 e = (struct ipt_entry *)
31836064 449 (entry0 + newpos);
1da177e4
LT
450 e->counters.pcnt = pos;
451 pos = newpos;
452 }
453 }
454 next:
455 duprintf("Finished chain %u\n", hook);
456 }
457 return 1;
458}
459
460static inline int
461cleanup_match(struct ipt_entry_match *m, unsigned int *i)
462{
463 if (i && (*i)-- == 0)
464 return 1;
465
466 if (m->u.kernel.match->destroy)
efa74165 467 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
1da177e4
LT
468 module_put(m->u.kernel.match->me);
469 return 0;
470}
471
472static inline int
473standard_check(const struct ipt_entry_target *t,
474 unsigned int max_offset)
475{
476 struct ipt_standard_target *targ = (void *)t;
477
478 /* Check standard info. */
1da177e4
LT
479 if (targ->verdict >= 0
480 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481 duprintf("ipt_standard_check: bad verdict (%i)\n",
482 targ->verdict);
483 return 0;
484 }
1da177e4
LT
485 if (targ->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
487 targ->verdict);
488 return 0;
489 }
490 return 1;
491}
492
493static inline int
494check_match(struct ipt_entry_match *m,
495 const char *name,
496 const struct ipt_ip *ip,
497 unsigned int hookmask,
498 unsigned int *i)
499{
500 struct ipt_match *match;
3cdc7c95 501 int ret;
1da177e4 502
2e4e6a17 503 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1da177e4
LT
504 m->u.user.revision),
505 "ipt_%s", m->u.user.name);
506 if (IS_ERR(match) || !match) {
507 duprintf("check_match: `%s' not found\n", m->u.user.name);
508 return match ? PTR_ERR(match) : -ENOENT;
509 }
510 m->u.kernel.match = match;
511
3cdc7c95
PM
512 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513 name, hookmask, ip->proto,
514 ip->invflags & IPT_INV_PROTO);
515 if (ret)
516 goto err;
517
1da177e4 518 if (m->u.kernel.match->checkentry
1c524830 519 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
1da177e4 520 hookmask)) {
1da177e4
LT
521 duprintf("ip_tables: check failed for `%s'.\n",
522 m->u.kernel.match->name);
3cdc7c95
PM
523 ret = -EINVAL;
524 goto err;
1da177e4
LT
525 }
526
527 (*i)++;
528 return 0;
3cdc7c95
PM
529err:
530 module_put(m->u.kernel.match->me);
531 return ret;
1da177e4
LT
532}
533
534static struct ipt_target ipt_standard_target;
535
536static inline int
537check_entry(struct ipt_entry *e, const char *name, unsigned int size,
538 unsigned int *i)
539{
540 struct ipt_entry_target *t;
541 struct ipt_target *target;
542 int ret;
543 unsigned int j;
544
545 if (!ip_checkentry(&e->ip)) {
546 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
547 return -EINVAL;
548 }
549
590bdf7f
DM
550 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
551 return -EINVAL;
552
1da177e4
LT
553 j = 0;
554 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
555 if (ret != 0)
556 goto cleanup_matches;
557
558 t = ipt_get_target(e);
590bdf7f
DM
559 ret = -EINVAL;
560 if (e->target_offset + t->u.target_size > e->next_offset)
561 goto cleanup_matches;
2e4e6a17
HW
562 target = try_then_request_module(xt_find_target(AF_INET,
563 t->u.user.name,
1da177e4
LT
564 t->u.user.revision),
565 "ipt_%s", t->u.user.name);
566 if (IS_ERR(target) || !target) {
567 duprintf("check_entry: `%s' not found\n", t->u.user.name);
568 ret = target ? PTR_ERR(target) : -ENOENT;
569 goto cleanup_matches;
570 }
571 t->u.kernel.target = target;
572
3cdc7c95
PM
573 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
574 name, e->comefrom, e->ip.proto,
575 e->ip.invflags & IPT_INV_PROTO);
576 if (ret)
577 goto err;
578
1da177e4
LT
579 if (t->u.kernel.target == &ipt_standard_target) {
580 if (!standard_check(t, size)) {
581 ret = -EINVAL;
90d47db4 582 goto err;
1da177e4
LT
583 }
584 } else if (t->u.kernel.target->checkentry
1c524830 585 && !t->u.kernel.target->checkentry(name, e, target, t->data,
1da177e4 586 e->comefrom)) {
1da177e4
LT
587 duprintf("ip_tables: check failed for `%s'.\n",
588 t->u.kernel.target->name);
589 ret = -EINVAL;
3cdc7c95 590 goto err;
1da177e4
LT
591 }
592
593 (*i)++;
594 return 0;
3cdc7c95
PM
595 err:
596 module_put(t->u.kernel.target->me);
1da177e4
LT
597 cleanup_matches:
598 IPT_MATCH_ITERATE(e, cleanup_match, &j);
599 return ret;
600}
601
602static inline int
603check_entry_size_and_hooks(struct ipt_entry *e,
2e4e6a17 604 struct xt_table_info *newinfo,
1da177e4
LT
605 unsigned char *base,
606 unsigned char *limit,
607 const unsigned int *hook_entries,
608 const unsigned int *underflows,
609 unsigned int *i)
610{
611 unsigned int h;
612
613 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
614 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
615 duprintf("Bad offset %p\n", e);
616 return -EINVAL;
617 }
618
619 if (e->next_offset
620 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
621 duprintf("checking: element %p size %u\n",
622 e, e->next_offset);
623 return -EINVAL;
624 }
625
626 /* Check hooks & underflows */
627 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
628 if ((unsigned char *)e - base == hook_entries[h])
629 newinfo->hook_entry[h] = hook_entries[h];
630 if ((unsigned char *)e - base == underflows[h])
631 newinfo->underflow[h] = underflows[h];
632 }
633
634 /* FIXME: underflows must be unconditional, standard verdicts
635 < 0 (not IPT_RETURN). --RR */
636
637 /* Clear counters and comefrom */
2e4e6a17 638 e->counters = ((struct xt_counters) { 0, 0 });
1da177e4
LT
639 e->comefrom = 0;
640
641 (*i)++;
642 return 0;
643}
644
645static inline int
646cleanup_entry(struct ipt_entry *e, unsigned int *i)
647{
648 struct ipt_entry_target *t;
649
650 if (i && (*i)-- == 0)
651 return 1;
652
653 /* Cleanup all matches */
654 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
655 t = ipt_get_target(e);
656 if (t->u.kernel.target->destroy)
efa74165 657 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
1da177e4
LT
658 module_put(t->u.kernel.target->me);
659 return 0;
660}
661
662/* Checks and translates the user-supplied table segment (held in
663 newinfo) */
664static int
665translate_table(const char *name,
666 unsigned int valid_hooks,
2e4e6a17 667 struct xt_table_info *newinfo,
31836064 668 void *entry0,
1da177e4
LT
669 unsigned int size,
670 unsigned int number,
671 const unsigned int *hook_entries,
672 const unsigned int *underflows)
673{
674 unsigned int i;
675 int ret;
676
677 newinfo->size = size;
678 newinfo->number = number;
679
680 /* Init all hooks to impossible value. */
681 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
682 newinfo->hook_entry[i] = 0xFFFFFFFF;
683 newinfo->underflow[i] = 0xFFFFFFFF;
684 }
685
686 duprintf("translate_table: size %u\n", newinfo->size);
687 i = 0;
688 /* Walk through entries, checking offsets. */
31836064 689 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
690 check_entry_size_and_hooks,
691 newinfo,
31836064
ED
692 entry0,
693 entry0 + size,
1da177e4
LT
694 hook_entries, underflows, &i);
695 if (ret != 0)
696 return ret;
697
698 if (i != number) {
699 duprintf("translate_table: %u not %u entries\n",
700 i, number);
701 return -EINVAL;
702 }
703
704 /* Check hooks all assigned */
705 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
706 /* Only hooks which are valid */
707 if (!(valid_hooks & (1 << i)))
708 continue;
709 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
710 duprintf("Invalid hook entry %u %u\n",
711 i, hook_entries[i]);
712 return -EINVAL;
713 }
714 if (newinfo->underflow[i] == 0xFFFFFFFF) {
715 duprintf("Invalid underflow %u %u\n",
716 i, underflows[i]);
717 return -EINVAL;
718 }
719 }
720
1da177e4
LT
721 /* Finally, each sanity check must pass */
722 i = 0;
31836064 723 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
724 check_entry, name, size, &i);
725
590bdf7f
DM
726 if (ret != 0)
727 goto cleanup;
728
729 ret = -ELOOP;
730 if (!mark_source_chains(newinfo, valid_hooks, entry0))
731 goto cleanup;
1da177e4
LT
732
733 /* And one copy for every other CPU */
6f912042 734 for_each_possible_cpu(i) {
31836064
ED
735 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
736 memcpy(newinfo->entries[i], entry0, newinfo->size);
1da177e4
LT
737 }
738
590bdf7f
DM
739 return 0;
740cleanup:
741 IPT_ENTRY_ITERATE(entry0, newinfo->size, cleanup_entry, &i);
1da177e4
LT
742 return ret;
743}
744
1da177e4
LT
745/* Gets counters. */
746static inline int
747add_entry_to_counter(const struct ipt_entry *e,
2e4e6a17 748 struct xt_counters total[],
1da177e4
LT
749 unsigned int *i)
750{
751 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
752
753 (*i)++;
754 return 0;
755}
756
31836064
ED
757static inline int
758set_entry_to_counter(const struct ipt_entry *e,
759 struct ipt_counters total[],
760 unsigned int *i)
761{
762 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
763
764 (*i)++;
765 return 0;
766}
767
1da177e4 768static void
2e4e6a17
HW
769get_counters(const struct xt_table_info *t,
770 struct xt_counters counters[])
1da177e4
LT
771{
772 unsigned int cpu;
773 unsigned int i;
31836064
ED
774 unsigned int curcpu;
775
776 /* Instead of clearing (by a previous call to memset())
777 * the counters and using adds, we set the counters
778 * with data used by 'current' CPU
779 * We dont care about preemption here.
780 */
781 curcpu = raw_smp_processor_id();
782
783 i = 0;
784 IPT_ENTRY_ITERATE(t->entries[curcpu],
785 t->size,
786 set_entry_to_counter,
787 counters,
788 &i);
1da177e4 789
6f912042 790 for_each_possible_cpu(cpu) {
31836064
ED
791 if (cpu == curcpu)
792 continue;
1da177e4 793 i = 0;
31836064 794 IPT_ENTRY_ITERATE(t->entries[cpu],
1da177e4
LT
795 t->size,
796 add_entry_to_counter,
797 counters,
798 &i);
799 }
800}
801
2722971c 802static inline struct xt_counters * alloc_counters(struct ipt_table *table)
1da177e4 803{
2722971c 804 unsigned int countersize;
2e4e6a17
HW
805 struct xt_counters *counters;
806 struct xt_table_info *private = table->private;
1da177e4
LT
807
808 /* We need atomic snapshot of counters: rest doesn't change
809 (other than comefrom, which userspace doesn't care
810 about). */
2e4e6a17 811 countersize = sizeof(struct xt_counters) * private->number;
31836064 812 counters = vmalloc_node(countersize, numa_node_id());
1da177e4
LT
813
814 if (counters == NULL)
2722971c 815 return ERR_PTR(-ENOMEM);
1da177e4
LT
816
817 /* First, sum counters... */
1da177e4 818 write_lock_bh(&table->lock);
2e4e6a17 819 get_counters(private, counters);
1da177e4
LT
820 write_unlock_bh(&table->lock);
821
2722971c
DM
822 return counters;
823}
824
825static int
826copy_entries_to_user(unsigned int total_size,
827 struct ipt_table *table,
828 void __user *userptr)
829{
830 unsigned int off, num;
831 struct ipt_entry *e;
832 struct xt_counters *counters;
833 struct xt_table_info *private = table->private;
834 int ret = 0;
835 void *loc_cpu_entry;
836
837 counters = alloc_counters(table);
838 if (IS_ERR(counters))
839 return PTR_ERR(counters);
840
31836064
ED
841 /* choose the copy that is on our node/cpu, ...
842 * This choice is lazy (because current thread is
843 * allowed to migrate to another cpu)
844 */
2e4e6a17 845 loc_cpu_entry = private->entries[raw_smp_processor_id()];
31836064
ED
846 /* ... then copy entire thing ... */
847 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1da177e4
LT
848 ret = -EFAULT;
849 goto free_counters;
850 }
851
852 /* FIXME: use iterator macros --RR */
853 /* ... then go back and fix counters and names */
854 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
855 unsigned int i;
856 struct ipt_entry_match *m;
857 struct ipt_entry_target *t;
858
31836064 859 e = (struct ipt_entry *)(loc_cpu_entry + off);
1da177e4
LT
860 if (copy_to_user(userptr + off
861 + offsetof(struct ipt_entry, counters),
862 &counters[num],
863 sizeof(counters[num])) != 0) {
864 ret = -EFAULT;
865 goto free_counters;
866 }
867
868 for (i = sizeof(struct ipt_entry);
869 i < e->target_offset;
870 i += m->u.match_size) {
871 m = (void *)e + i;
872
873 if (copy_to_user(userptr + off + i
874 + offsetof(struct ipt_entry_match,
875 u.user.name),
876 m->u.kernel.match->name,
877 strlen(m->u.kernel.match->name)+1)
878 != 0) {
879 ret = -EFAULT;
880 goto free_counters;
881 }
882 }
883
884 t = ipt_get_target(e);
885 if (copy_to_user(userptr + off + e->target_offset
886 + offsetof(struct ipt_entry_target,
887 u.user.name),
888 t->u.kernel.target->name,
889 strlen(t->u.kernel.target->name)+1) != 0) {
890 ret = -EFAULT;
891 goto free_counters;
892 }
893 }
894
895 free_counters:
896 vfree(counters);
897 return ret;
898}
899
2722971c
DM
900#ifdef CONFIG_COMPAT
901struct compat_delta {
902 struct compat_delta *next;
903 u_int16_t offset;
904 short delta;
905};
906
907static struct compat_delta *compat_offsets = NULL;
908
909static int compat_add_offset(u_int16_t offset, short delta)
910{
911 struct compat_delta *tmp;
912
913 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
914 if (!tmp)
915 return -ENOMEM;
916 tmp->offset = offset;
917 tmp->delta = delta;
918 if (compat_offsets) {
919 tmp->next = compat_offsets->next;
920 compat_offsets->next = tmp;
921 } else {
922 compat_offsets = tmp;
923 tmp->next = NULL;
924 }
925 return 0;
926}
927
928static void compat_flush_offsets(void)
929{
930 struct compat_delta *tmp, *next;
931
932 if (compat_offsets) {
933 for(tmp = compat_offsets; tmp; tmp = next) {
934 next = tmp->next;
935 kfree(tmp);
936 }
937 compat_offsets = NULL;
938 }
939}
940
941static short compat_calc_jump(u_int16_t offset)
942{
943 struct compat_delta *tmp;
944 short delta;
945
946 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
947 if (tmp->offset < offset)
948 delta += tmp->delta;
949 return delta;
950}
951
9fa492cd 952static void compat_standard_from_user(void *dst, void *src)
2722971c 953{
9fa492cd 954 int v = *(compat_int_t *)src;
2722971c 955
9fa492cd
PM
956 if (v > 0)
957 v += compat_calc_jump(v);
958 memcpy(dst, &v, sizeof(v));
959}
46c5ea3c 960
9fa492cd 961static int compat_standard_to_user(void __user *dst, void *src)
2722971c 962{
9fa492cd 963 compat_int_t cv = *(int *)src;
2722971c 964
9fa492cd
PM
965 if (cv > 0)
966 cv -= compat_calc_jump(cv);
967 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
2722971c
DM
968}
969
970static inline int
971compat_calc_match(struct ipt_entry_match *m, int * size)
972{
9fa492cd 973 *size += xt_compat_match_offset(m->u.kernel.match);
2722971c
DM
974 return 0;
975}
976
977static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
978 void *base, struct xt_table_info *newinfo)
979{
980 struct ipt_entry_target *t;
981 u_int16_t entry_offset;
982 int off, i, ret;
983
984 off = 0;
985 entry_offset = (void *)e - base;
986 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
987 t = ipt_get_target(e);
9fa492cd 988 off += xt_compat_target_offset(t->u.kernel.target);
2722971c
DM
989 newinfo->size -= off;
990 ret = compat_add_offset(entry_offset, off);
991 if (ret)
992 return ret;
993
994 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
995 if (info->hook_entry[i] && (e < (struct ipt_entry *)
996 (base + info->hook_entry[i])))
997 newinfo->hook_entry[i] -= off;
998 if (info->underflow[i] && (e < (struct ipt_entry *)
999 (base + info->underflow[i])))
1000 newinfo->underflow[i] -= off;
1001 }
1002 return 0;
1003}
1004
1005static int compat_table_info(struct xt_table_info *info,
1006 struct xt_table_info *newinfo)
1007{
1008 void *loc_cpu_entry;
1009 int i;
1010
1011 if (!newinfo || !info)
1012 return -EINVAL;
1013
1014 memset(newinfo, 0, sizeof(struct xt_table_info));
1015 newinfo->size = info->size;
1016 newinfo->number = info->number;
1017 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1018 newinfo->hook_entry[i] = info->hook_entry[i];
1019 newinfo->underflow[i] = info->underflow[i];
1020 }
1021 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1022 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1023 compat_calc_entry, info, loc_cpu_entry, newinfo);
1024}
1025#endif
1026
1027static int get_info(void __user *user, int *len, int compat)
1028{
1029 char name[IPT_TABLE_MAXNAMELEN];
1030 struct ipt_table *t;
1031 int ret;
1032
1033 if (*len != sizeof(struct ipt_getinfo)) {
1034 duprintf("length %u != %u\n", *len,
1035 (unsigned int)sizeof(struct ipt_getinfo));
1036 return -EINVAL;
1037 }
1038
1039 if (copy_from_user(name, user, sizeof(name)) != 0)
1040 return -EFAULT;
1041
1042 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1043#ifdef CONFIG_COMPAT
1044 if (compat)
1045 xt_compat_lock(AF_INET);
1046#endif
1047 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1048 "iptable_%s", name);
1049 if (t && !IS_ERR(t)) {
1050 struct ipt_getinfo info;
1051 struct xt_table_info *private = t->private;
1052
1053#ifdef CONFIG_COMPAT
1054 if (compat) {
1055 struct xt_table_info tmp;
1056 ret = compat_table_info(private, &tmp);
1057 compat_flush_offsets();
1058 private = &tmp;
1059 }
1060#endif
1061 info.valid_hooks = t->valid_hooks;
1062 memcpy(info.hook_entry, private->hook_entry,
1063 sizeof(info.hook_entry));
1064 memcpy(info.underflow, private->underflow,
1065 sizeof(info.underflow));
1066 info.num_entries = private->number;
1067 info.size = private->size;
1068 strcpy(info.name, name);
1069
1070 if (copy_to_user(user, &info, *len) != 0)
1071 ret = -EFAULT;
1072 else
1073 ret = 0;
1074
1075 xt_table_unlock(t);
1076 module_put(t->me);
1077 } else
1078 ret = t ? PTR_ERR(t) : -ENOENT;
1079#ifdef CONFIG_COMPAT
1080 if (compat)
1081 xt_compat_unlock(AF_INET);
1082#endif
1083 return ret;
1084}
1085
1086static int
1087get_entries(struct ipt_get_entries __user *uptr, int *len)
1088{
1089 int ret;
1090 struct ipt_get_entries get;
1091 struct ipt_table *t;
1092
1093 if (*len < sizeof(get)) {
1094 duprintf("get_entries: %u < %d\n", *len,
1095 (unsigned int)sizeof(get));
1096 return -EINVAL;
1097 }
1098 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1099 return -EFAULT;
1100 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1101 duprintf("get_entries: %u != %u\n", *len,
1102 (unsigned int)(sizeof(struct ipt_get_entries) +
1103 get.size));
1104 return -EINVAL;
1105 }
1106
1107 t = xt_find_table_lock(AF_INET, get.name);
1108 if (t && !IS_ERR(t)) {
1109 struct xt_table_info *private = t->private;
1110 duprintf("t->private->number = %u\n",
1111 private->number);
1112 if (get.size == private->size)
1113 ret = copy_entries_to_user(private->size,
1114 t, uptr->entrytable);
1115 else {
1116 duprintf("get_entries: I've got %u not %u!\n",
1117 private->size,
1118 get.size);
1119 ret = -EINVAL;
1120 }
1121 module_put(t->me);
1122 xt_table_unlock(t);
1123 } else
1124 ret = t ? PTR_ERR(t) : -ENOENT;
1125
1126 return ret;
1127}
1128
1129static int
1130__do_replace(const char *name, unsigned int valid_hooks,
1131 struct xt_table_info *newinfo, unsigned int num_counters,
1132 void __user *counters_ptr)
1133{
1134 int ret;
1135 struct ipt_table *t;
1136 struct xt_table_info *oldinfo;
1137 struct xt_counters *counters;
1138 void *loc_cpu_old_entry;
1139
1140 ret = 0;
1141 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1142 if (!counters) {
1143 ret = -ENOMEM;
1144 goto out;
1145 }
1146
1147 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1148 "iptable_%s", name);
1149 if (!t || IS_ERR(t)) {
1150 ret = t ? PTR_ERR(t) : -ENOENT;
1151 goto free_newinfo_counters_untrans;
1152 }
1153
1154 /* You lied! */
1155 if (valid_hooks != t->valid_hooks) {
1156 duprintf("Valid hook crap: %08X vs %08X\n",
1157 valid_hooks, t->valid_hooks);
1158 ret = -EINVAL;
1159 goto put_module;
1160 }
1161
1162 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1163 if (!oldinfo)
1164 goto put_module;
1165
1166 /* Update module usage count based on number of rules */
1167 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1168 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1169 if ((oldinfo->number > oldinfo->initial_entries) ||
1170 (newinfo->number <= oldinfo->initial_entries))
1171 module_put(t->me);
1172 if ((oldinfo->number > oldinfo->initial_entries) &&
1173 (newinfo->number <= oldinfo->initial_entries))
1174 module_put(t->me);
1175
1176 /* Get the old counters. */
1177 get_counters(oldinfo, counters);
1178 /* Decrease module usage counts and free resource */
1179 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1180 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1181 xt_free_table_info(oldinfo);
1182 if (copy_to_user(counters_ptr, counters,
1183 sizeof(struct xt_counters) * num_counters) != 0)
1184 ret = -EFAULT;
1185 vfree(counters);
1186 xt_table_unlock(t);
1187 return ret;
1188
1189 put_module:
1190 module_put(t->me);
1191 xt_table_unlock(t);
1192 free_newinfo_counters_untrans:
1193 vfree(counters);
1194 out:
1195 return ret;
1196}
1197
1198static int
1199do_replace(void __user *user, unsigned int len)
1200{
1201 int ret;
1202 struct ipt_replace tmp;
1203 struct xt_table_info *newinfo;
1204 void *loc_cpu_entry;
1205
1206 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1207 return -EFAULT;
1208
1209 /* Hack: Causes ipchains to give correct error msg --RR */
1210 if (len != sizeof(tmp) + tmp.size)
1211 return -ENOPROTOOPT;
1212
1213 /* overflow check */
1214 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1215 SMP_CACHE_BYTES)
1216 return -ENOMEM;
1217 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1218 return -ENOMEM;
1219
1220 newinfo = xt_alloc_table_info(tmp.size);
1221 if (!newinfo)
1222 return -ENOMEM;
1223
1224 /* choose the copy that is our node/cpu */
1225 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1226 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1227 tmp.size) != 0) {
1228 ret = -EFAULT;
1229 goto free_newinfo;
1230 }
1231
1232 ret = translate_table(tmp.name, tmp.valid_hooks,
1233 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1234 tmp.hook_entry, tmp.underflow);
1235 if (ret != 0)
1236 goto free_newinfo;
1237
1238 duprintf("ip_tables: Translated table\n");
1239
1240 ret = __do_replace(tmp.name, tmp.valid_hooks,
1241 newinfo, tmp.num_counters,
1242 tmp.counters);
1243 if (ret)
1244 goto free_newinfo_untrans;
1245 return 0;
1246
1247 free_newinfo_untrans:
1248 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1249 free_newinfo:
1250 xt_free_table_info(newinfo);
1251 return ret;
1252}
1253
1254/* We're lazy, and add to the first CPU; overflow works its fey magic
1255 * and everything is OK. */
1256static inline int
1257add_counter_to_entry(struct ipt_entry *e,
1258 const struct xt_counters addme[],
1259 unsigned int *i)
1260{
1261#if 0
1262 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1263 *i,
1264 (long unsigned int)e->counters.pcnt,
1265 (long unsigned int)e->counters.bcnt,
1266 (long unsigned int)addme[*i].pcnt,
1267 (long unsigned int)addme[*i].bcnt);
1268#endif
1269
1270 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1271
1272 (*i)++;
1273 return 0;
1274}
1275
1276static int
1277do_add_counters(void __user *user, unsigned int len, int compat)
1278{
1279 unsigned int i;
1280 struct xt_counters_info tmp;
1281 struct xt_counters *paddc;
1282 unsigned int num_counters;
1283 char *name;
1284 int size;
1285 void *ptmp;
1286 struct ipt_table *t;
1287 struct xt_table_info *private;
1288 int ret = 0;
1289 void *loc_cpu_entry;
1290#ifdef CONFIG_COMPAT
1291 struct compat_xt_counters_info compat_tmp;
1292
1293 if (compat) {
1294 ptmp = &compat_tmp;
1295 size = sizeof(struct compat_xt_counters_info);
1296 } else
1297#endif
1298 {
1299 ptmp = &tmp;
1300 size = sizeof(struct xt_counters_info);
1301 }
1302
1303 if (copy_from_user(ptmp, user, size) != 0)
1304 return -EFAULT;
1305
1306#ifdef CONFIG_COMPAT
1307 if (compat) {
1308 num_counters = compat_tmp.num_counters;
1309 name = compat_tmp.name;
1310 } else
1311#endif
1312 {
1313 num_counters = tmp.num_counters;
1314 name = tmp.name;
1315 }
1316
1317 if (len != size + num_counters * sizeof(struct xt_counters))
1318 return -EINVAL;
1319
1320 paddc = vmalloc_node(len - size, numa_node_id());
1321 if (!paddc)
1322 return -ENOMEM;
1323
1324 if (copy_from_user(paddc, user + size, len - size) != 0) {
1325 ret = -EFAULT;
1326 goto free;
1327 }
1328
1329 t = xt_find_table_lock(AF_INET, name);
1330 if (!t || IS_ERR(t)) {
1331 ret = t ? PTR_ERR(t) : -ENOENT;
1332 goto free;
1333 }
1334
1335 write_lock_bh(&t->lock);
1336 private = t->private;
1337 if (private->number != num_counters) {
1338 ret = -EINVAL;
1339 goto unlock_up_free;
1340 }
1341
1342 i = 0;
1343 /* Choose the copy that is on our node */
1344 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1345 IPT_ENTRY_ITERATE(loc_cpu_entry,
1346 private->size,
1347 add_counter_to_entry,
1348 paddc,
1349 &i);
1350 unlock_up_free:
1351 write_unlock_bh(&t->lock);
1352 xt_table_unlock(t);
1353 module_put(t->me);
1354 free:
1355 vfree(paddc);
1356
1357 return ret;
1358}
1359
1360#ifdef CONFIG_COMPAT
1361struct compat_ipt_replace {
1362 char name[IPT_TABLE_MAXNAMELEN];
1363 u32 valid_hooks;
1364 u32 num_entries;
1365 u32 size;
1366 u32 hook_entry[NF_IP_NUMHOOKS];
1367 u32 underflow[NF_IP_NUMHOOKS];
1368 u32 num_counters;
1369 compat_uptr_t counters; /* struct ipt_counters * */
1370 struct compat_ipt_entry entries[0];
1371};
1372
1373static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
3e597c60 1374 void __user **dstptr, compat_uint_t *size)
2722971c 1375{
9fa492cd 1376 return xt_compat_match_to_user(m, dstptr, size);
2722971c
DM
1377}
1378
1379static int compat_copy_entry_to_user(struct ipt_entry *e,
3e597c60 1380 void __user **dstptr, compat_uint_t *size)
2722971c 1381{
3e597c60 1382 struct ipt_entry_target *t;
2722971c
DM
1383 struct compat_ipt_entry __user *ce;
1384 u_int16_t target_offset, next_offset;
1385 compat_uint_t origsize;
1386 int ret;
1387
1388 ret = -EFAULT;
1389 origsize = *size;
1390 ce = (struct compat_ipt_entry __user *)*dstptr;
7800007c 1391 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
2722971c
DM
1392 goto out;
1393
1394 *dstptr += sizeof(struct compat_ipt_entry);
1395 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1396 target_offset = e->target_offset - (origsize - *size);
1397 if (ret)
1398 goto out;
1399 t = ipt_get_target(e);
9fa492cd 1400 ret = xt_compat_target_to_user(t, dstptr, size);
2722971c
DM
1401 if (ret)
1402 goto out;
1403 ret = -EFAULT;
1404 next_offset = e->next_offset - (origsize - *size);
7800007c 1405 if (put_user(target_offset, &ce->target_offset))
2722971c 1406 goto out;
7800007c 1407 if (put_user(next_offset, &ce->next_offset))
2722971c
DM
1408 goto out;
1409 return 0;
1410out:
1411 return ret;
1412}
1413
1414static inline int
1415compat_check_calc_match(struct ipt_entry_match *m,
1416 const char *name,
1417 const struct ipt_ip *ip,
1418 unsigned int hookmask,
1419 int *size, int *i)
1420{
1421 struct ipt_match *match;
1422
1423 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1424 m->u.user.revision),
1425 "ipt_%s", m->u.user.name);
1426 if (IS_ERR(match) || !match) {
1427 duprintf("compat_check_calc_match: `%s' not found\n",
1428 m->u.user.name);
1429 return match ? PTR_ERR(match) : -ENOENT;
1430 }
1431 m->u.kernel.match = match;
9fa492cd 1432 *size += xt_compat_match_offset(match);
2722971c
DM
1433
1434 (*i)++;
1435 return 0;
1436}
1437
1438static inline int
1439check_compat_entry_size_and_hooks(struct ipt_entry *e,
1440 struct xt_table_info *newinfo,
1441 unsigned int *size,
1442 unsigned char *base,
1443 unsigned char *limit,
1444 unsigned int *hook_entries,
1445 unsigned int *underflows,
1446 unsigned int *i,
1447 const char *name)
1448{
1449 struct ipt_entry_target *t;
1450 struct ipt_target *target;
1451 u_int16_t entry_offset;
1452 int ret, off, h, j;
1453
1454 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1455 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1456 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1457 duprintf("Bad offset %p, limit = %p\n", e, limit);
1458 return -EINVAL;
1459 }
1460
1461 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1462 sizeof(struct compat_xt_entry_target)) {
1463 duprintf("checking: element %p size %u\n",
1464 e, e->next_offset);
1465 return -EINVAL;
1466 }
1467
1468 if (!ip_checkentry(&e->ip)) {
1469 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1470 return -EINVAL;
1471 }
1472
590bdf7f
DM
1473 if (e->target_offset + sizeof(struct compat_xt_entry_target) >
1474 e->next_offset)
1475 return -EINVAL;
1476
2722971c
DM
1477 off = 0;
1478 entry_offset = (void *)e - (void *)base;
1479 j = 0;
1480 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1481 e->comefrom, &off, &j);
1482 if (ret != 0)
bec71b16 1483 goto cleanup_matches;
2722971c
DM
1484
1485 t = ipt_get_target(e);
590bdf7f
DM
1486 ret = -EINVAL;
1487 if (e->target_offset + t->u.target_size > e->next_offset)
1488 goto cleanup_matches;
2722971c
DM
1489 target = try_then_request_module(xt_find_target(AF_INET,
1490 t->u.user.name,
1491 t->u.user.revision),
1492 "ipt_%s", t->u.user.name);
1493 if (IS_ERR(target) || !target) {
1494 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1495 ret = target ? PTR_ERR(target) : -ENOENT;
bec71b16 1496 goto cleanup_matches;
2722971c
DM
1497 }
1498 t->u.kernel.target = target;
1499
9fa492cd 1500 off += xt_compat_target_offset(target);
2722971c
DM
1501 *size += off;
1502 ret = compat_add_offset(entry_offset, off);
1503 if (ret)
1504 goto out;
1505
1506 /* Check hooks & underflows */
1507 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1508 if ((unsigned char *)e - base == hook_entries[h])
1509 newinfo->hook_entry[h] = hook_entries[h];
1510 if ((unsigned char *)e - base == underflows[h])
1511 newinfo->underflow[h] = underflows[h];
1512 }
1513
1514 /* Clear counters and comefrom */
1515 e->counters = ((struct ipt_counters) { 0, 0 });
1516 e->comefrom = 0;
1517
1518 (*i)++;
1519 return 0;
bec71b16 1520
2722971c 1521out:
bec71b16
PM
1522 module_put(t->u.kernel.target->me);
1523cleanup_matches:
2722971c
DM
1524 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1525 return ret;
1526}
1527
1528static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1529 void **dstptr, compat_uint_t *size, const char *name,
920b868a 1530 const struct ipt_ip *ip, unsigned int hookmask)
2722971c
DM
1531{
1532 struct ipt_entry_match *dm;
1533 struct ipt_match *match;
1534 int ret;
1535
1536 dm = (struct ipt_entry_match *)*dstptr;
1537 match = m->u.kernel.match;
9fa492cd 1538 xt_compat_match_from_user(m, dstptr, size);
2722971c
DM
1539
1540 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1541 name, hookmask, ip->proto,
1542 ip->invflags & IPT_INV_PROTO);
920b868a 1543 if (!ret && m->u.kernel.match->checkentry
2722971c 1544 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
2722971c
DM
1545 hookmask)) {
1546 duprintf("ip_tables: check failed for `%s'.\n",
1547 m->u.kernel.match->name);
bec71b16 1548 ret = -EINVAL;
2722971c 1549 }
bec71b16 1550 return ret;
2722971c
DM
1551}
1552
1553static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1554 unsigned int *size, const char *name,
1555 struct xt_table_info *newinfo, unsigned char *base)
1556{
1557 struct ipt_entry_target *t;
1558 struct ipt_target *target;
1559 struct ipt_entry *de;
1560 unsigned int origsize;
920b868a 1561 int ret, h;
2722971c
DM
1562
1563 ret = 0;
1564 origsize = *size;
1565 de = (struct ipt_entry *)*dstptr;
1566 memcpy(de, e, sizeof(struct ipt_entry));
1567
1568 *dstptr += sizeof(struct compat_ipt_entry);
1569 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
920b868a 1570 name, &de->ip, de->comefrom);
2722971c 1571 if (ret)
920b868a 1572 goto err;
2722971c
DM
1573 de->target_offset = e->target_offset - (origsize - *size);
1574 t = ipt_get_target(e);
1575 target = t->u.kernel.target;
9fa492cd 1576 xt_compat_target_from_user(t, dstptr, size);
2722971c
DM
1577
1578 de->next_offset = e->next_offset - (origsize - *size);
1579 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1580 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1581 newinfo->hook_entry[h] -= origsize - *size;
1582 if ((unsigned char *)de - base < newinfo->underflow[h])
1583 newinfo->underflow[h] -= origsize - *size;
1584 }
1585
1586 t = ipt_get_target(de);
1587 target = t->u.kernel.target;
1588 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1589 name, e->comefrom, e->ip.proto,
1590 e->ip.invflags & IPT_INV_PROTO);
1591 if (ret)
bec71b16 1592 goto err;
2722971c
DM
1593
1594 ret = -EINVAL;
1595 if (t->u.kernel.target == &ipt_standard_target) {
1596 if (!standard_check(t, *size))
bec71b16 1597 goto err;
2722971c
DM
1598 } else if (t->u.kernel.target->checkentry
1599 && !t->u.kernel.target->checkentry(name, de, target,
efa74165 1600 t->data, de->comefrom)) {
2722971c
DM
1601 duprintf("ip_tables: compat: check failed for `%s'.\n",
1602 t->u.kernel.target->name);
bec71b16 1603 goto err;
2722971c
DM
1604 }
1605 ret = 0;
bec71b16 1606err:
2722971c
DM
1607 return ret;
1608}
1609
1da177e4 1610static int
2722971c
DM
1611translate_compat_table(const char *name,
1612 unsigned int valid_hooks,
1613 struct xt_table_info **pinfo,
1614 void **pentry0,
1615 unsigned int total_size,
1616 unsigned int number,
1617 unsigned int *hook_entries,
1618 unsigned int *underflows)
1da177e4 1619{
920b868a 1620 unsigned int i, j;
2722971c
DM
1621 struct xt_table_info *newinfo, *info;
1622 void *pos, *entry0, *entry1;
1623 unsigned int size;
1da177e4 1624 int ret;
1da177e4 1625
2722971c
DM
1626 info = *pinfo;
1627 entry0 = *pentry0;
1628 size = total_size;
1629 info->number = number;
1630
1631 /* Init all hooks to impossible value. */
1632 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1633 info->hook_entry[i] = 0xFFFFFFFF;
1634 info->underflow[i] = 0xFFFFFFFF;
1635 }
1636
1637 duprintf("translate_compat_table: size %u\n", info->size);
920b868a 1638 j = 0;
2722971c
DM
1639 xt_compat_lock(AF_INET);
1640 /* Walk through entries, checking offsets. */
1641 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1642 check_compat_entry_size_and_hooks,
1643 info, &size, entry0,
1644 entry0 + total_size,
920b868a 1645 hook_entries, underflows, &j, name);
2722971c
DM
1646 if (ret != 0)
1647 goto out_unlock;
1648
1649 ret = -EINVAL;
920b868a 1650 if (j != number) {
2722971c 1651 duprintf("translate_compat_table: %u not %u entries\n",
920b868a 1652 j, number);
2722971c
DM
1653 goto out_unlock;
1654 }
1655
1656 /* Check hooks all assigned */
1657 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1658 /* Only hooks which are valid */
1659 if (!(valid_hooks & (1 << i)))
1660 continue;
1661 if (info->hook_entry[i] == 0xFFFFFFFF) {
1662 duprintf("Invalid hook entry %u %u\n",
1663 i, hook_entries[i]);
1664 goto out_unlock;
1da177e4 1665 }
2722971c
DM
1666 if (info->underflow[i] == 0xFFFFFFFF) {
1667 duprintf("Invalid underflow %u %u\n",
1668 i, underflows[i]);
1669 goto out_unlock;
1670 }
1671 }
1672
1673 ret = -ENOMEM;
1674 newinfo = xt_alloc_table_info(size);
1675 if (!newinfo)
1676 goto out_unlock;
1677
1678 newinfo->number = number;
1679 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1680 newinfo->hook_entry[i] = info->hook_entry[i];
1681 newinfo->underflow[i] = info->underflow[i];
1682 }
1683 entry1 = newinfo->entries[raw_smp_processor_id()];
1684 pos = entry1;
1685 size = total_size;
1686 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1687 compat_copy_entry_from_user, &pos, &size,
1688 name, newinfo, entry1);
1689 compat_flush_offsets();
1690 xt_compat_unlock(AF_INET);
1691 if (ret)
1692 goto free_newinfo;
1693
1694 ret = -ELOOP;
1695 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1696 goto free_newinfo;
1697
1698 /* And one copy for every other CPU */
fb1bb34d 1699 for_each_possible_cpu(i)
2722971c
DM
1700 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1701 memcpy(newinfo->entries[i], entry1, newinfo->size);
1702
1703 *pinfo = newinfo;
1704 *pentry0 = entry1;
1705 xt_free_table_info(info);
1706 return 0;
1da177e4 1707
2722971c
DM
1708free_newinfo:
1709 xt_free_table_info(newinfo);
1710out:
920b868a 1711 IPT_ENTRY_ITERATE(entry0, total_size, cleanup_entry, &j);
1da177e4 1712 return ret;
2722971c 1713out_unlock:
ef4512e7 1714 compat_flush_offsets();
2722971c
DM
1715 xt_compat_unlock(AF_INET);
1716 goto out;
1da177e4
LT
1717}
1718
1719static int
2722971c 1720compat_do_replace(void __user *user, unsigned int len)
1da177e4
LT
1721{
1722 int ret;
2722971c
DM
1723 struct compat_ipt_replace tmp;
1724 struct xt_table_info *newinfo;
1725 void *loc_cpu_entry;
1da177e4
LT
1726
1727 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1728 return -EFAULT;
1729
1730 /* Hack: Causes ipchains to give correct error msg --RR */
1731 if (len != sizeof(tmp) + tmp.size)
1732 return -ENOPROTOOPT;
1733
ee4bb818
KK
1734 /* overflow check */
1735 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1736 SMP_CACHE_BYTES)
1737 return -ENOMEM;
1738 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1739 return -ENOMEM;
1740
2e4e6a17 1741 newinfo = xt_alloc_table_info(tmp.size);
1da177e4
LT
1742 if (!newinfo)
1743 return -ENOMEM;
1744
31836064
ED
1745 /* choose the copy that is our node/cpu */
1746 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1747 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1da177e4
LT
1748 tmp.size) != 0) {
1749 ret = -EFAULT;
1750 goto free_newinfo;
1751 }
1752
2722971c
DM
1753 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1754 &newinfo, &loc_cpu_entry, tmp.size,
1755 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1756 if (ret != 0)
1da177e4 1757 goto free_newinfo;
1da177e4 1758
2722971c 1759 duprintf("compat_do_replace: Translated table\n");
1da177e4 1760
2722971c
DM
1761 ret = __do_replace(tmp.name, tmp.valid_hooks,
1762 newinfo, tmp.num_counters,
1763 compat_ptr(tmp.counters));
1764 if (ret)
1765 goto free_newinfo_untrans;
1766 return 0;
1da177e4 1767
2722971c
DM
1768 free_newinfo_untrans:
1769 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1770 free_newinfo:
1771 xt_free_table_info(newinfo);
1772 return ret;
1773}
1da177e4 1774
2722971c
DM
1775static int
1776compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1777 unsigned int len)
1778{
1779 int ret;
1da177e4 1780
2722971c
DM
1781 if (!capable(CAP_NET_ADMIN))
1782 return -EPERM;
1da177e4 1783
2722971c
DM
1784 switch (cmd) {
1785 case IPT_SO_SET_REPLACE:
1786 ret = compat_do_replace(user, len);
1787 break;
1da177e4 1788
2722971c
DM
1789 case IPT_SO_SET_ADD_COUNTERS:
1790 ret = do_add_counters(user, len, 1);
1791 break;
1792
1793 default:
1794 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1795 ret = -EINVAL;
1796 }
1da177e4 1797
1da177e4
LT
1798 return ret;
1799}
1800
2722971c 1801struct compat_ipt_get_entries
1da177e4 1802{
2722971c
DM
1803 char name[IPT_TABLE_MAXNAMELEN];
1804 compat_uint_t size;
1805 struct compat_ipt_entry entrytable[0];
1806};
1da177e4 1807
2722971c
DM
1808static int compat_copy_entries_to_user(unsigned int total_size,
1809 struct ipt_table *table, void __user *userptr)
1810{
1811 unsigned int off, num;
1812 struct compat_ipt_entry e;
1813 struct xt_counters *counters;
1814 struct xt_table_info *private = table->private;
1815 void __user *pos;
1816 unsigned int size;
1817 int ret = 0;
1818 void *loc_cpu_entry;
1da177e4 1819
2722971c
DM
1820 counters = alloc_counters(table);
1821 if (IS_ERR(counters))
1822 return PTR_ERR(counters);
1823
1824 /* choose the copy that is on our node/cpu, ...
1825 * This choice is lazy (because current thread is
1826 * allowed to migrate to another cpu)
1827 */
1828 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1829 pos = userptr;
1830 size = total_size;
1831 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1832 compat_copy_entry_to_user, &pos, &size);
1833 if (ret)
1834 goto free_counters;
1835
1836 /* ... then go back and fix counters and names */
1837 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1838 unsigned int i;
1839 struct ipt_entry_match m;
1840 struct ipt_entry_target t;
1841
1842 ret = -EFAULT;
1843 if (copy_from_user(&e, userptr + off,
1844 sizeof(struct compat_ipt_entry)))
1845 goto free_counters;
1846 if (copy_to_user(userptr + off +
1847 offsetof(struct compat_ipt_entry, counters),
1848 &counters[num], sizeof(counters[num])))
1849 goto free_counters;
1850
1851 for (i = sizeof(struct compat_ipt_entry);
1852 i < e.target_offset; i += m.u.match_size) {
1853 if (copy_from_user(&m, userptr + off + i,
1854 sizeof(struct ipt_entry_match)))
1855 goto free_counters;
1856 if (copy_to_user(userptr + off + i +
1857 offsetof(struct ipt_entry_match, u.user.name),
1858 m.u.kernel.match->name,
1859 strlen(m.u.kernel.match->name) + 1))
1860 goto free_counters;
1861 }
1862
1863 if (copy_from_user(&t, userptr + off + e.target_offset,
1864 sizeof(struct ipt_entry_target)))
1865 goto free_counters;
1866 if (copy_to_user(userptr + off + e.target_offset +
1867 offsetof(struct ipt_entry_target, u.user.name),
1868 t.u.kernel.target->name,
1869 strlen(t.u.kernel.target->name) + 1))
1870 goto free_counters;
1871 }
1872 ret = 0;
1873free_counters:
1874 vfree(counters);
1875 return ret;
1da177e4
LT
1876}
1877
1878static int
2722971c 1879compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1da177e4 1880{
2722971c
DM
1881 int ret;
1882 struct compat_ipt_get_entries get;
1da177e4 1883 struct ipt_table *t;
1da177e4 1884
1da177e4 1885
2722971c
DM
1886 if (*len < sizeof(get)) {
1887 duprintf("compat_get_entries: %u < %u\n",
1888 *len, (unsigned int)sizeof(get));
1da177e4 1889 return -EINVAL;
2722971c 1890 }
1da177e4 1891
2722971c
DM
1892 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1893 return -EFAULT;
1da177e4 1894
2722971c
DM
1895 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1896 duprintf("compat_get_entries: %u != %u\n", *len,
1897 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1898 get.size));
1899 return -EINVAL;
1da177e4
LT
1900 }
1901
2722971c
DM
1902 xt_compat_lock(AF_INET);
1903 t = xt_find_table_lock(AF_INET, get.name);
1904 if (t && !IS_ERR(t)) {
1905 struct xt_table_info *private = t->private;
1906 struct xt_table_info info;
1907 duprintf("t->private->number = %u\n",
1908 private->number);
1909 ret = compat_table_info(private, &info);
1910 if (!ret && get.size == info.size) {
1911 ret = compat_copy_entries_to_user(private->size,
1912 t, uptr->entrytable);
1913 } else if (!ret) {
1914 duprintf("compat_get_entries: I've got %u not %u!\n",
1915 private->size,
1916 get.size);
1917 ret = -EINVAL;
1918 }
1919 compat_flush_offsets();
1920 module_put(t->me);
1921 xt_table_unlock(t);
1922 } else
1da177e4 1923 ret = t ? PTR_ERR(t) : -ENOENT;
1da177e4 1924
2722971c
DM
1925 xt_compat_unlock(AF_INET);
1926 return ret;
1927}
1da177e4 1928
79030ed0
PM
1929static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1930
2722971c
DM
1931static int
1932compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1933{
1934 int ret;
1da177e4 1935
82fac054
BS
1936 if (!capable(CAP_NET_ADMIN))
1937 return -EPERM;
1938
2722971c
DM
1939 switch (cmd) {
1940 case IPT_SO_GET_INFO:
1941 ret = get_info(user, len, 1);
1942 break;
1943 case IPT_SO_GET_ENTRIES:
1944 ret = compat_get_entries(user, len);
1945 break;
1946 default:
79030ed0 1947 ret = do_ipt_get_ctl(sk, cmd, user, len);
2722971c 1948 }
1da177e4
LT
1949 return ret;
1950}
2722971c 1951#endif
1da177e4
LT
1952
1953static int
1954do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1955{
1956 int ret;
1957
1958 if (!capable(CAP_NET_ADMIN))
1959 return -EPERM;
1960
1961 switch (cmd) {
1962 case IPT_SO_SET_REPLACE:
1963 ret = do_replace(user, len);
1964 break;
1965
1966 case IPT_SO_SET_ADD_COUNTERS:
2722971c 1967 ret = do_add_counters(user, len, 0);
1da177e4
LT
1968 break;
1969
1970 default:
1971 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1972 ret = -EINVAL;
1973 }
1974
1975 return ret;
1976}
1977
1978static int
1979do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1980{
1981 int ret;
1982
1983 if (!capable(CAP_NET_ADMIN))
1984 return -EPERM;
1985
1986 switch (cmd) {
2722971c
DM
1987 case IPT_SO_GET_INFO:
1988 ret = get_info(user, len, 0);
1989 break;
1da177e4 1990
2722971c
DM
1991 case IPT_SO_GET_ENTRIES:
1992 ret = get_entries(user, len);
1da177e4 1993 break;
1da177e4
LT
1994
1995 case IPT_SO_GET_REVISION_MATCH:
1996 case IPT_SO_GET_REVISION_TARGET: {
1997 struct ipt_get_revision rev;
2e4e6a17 1998 int target;
1da177e4
LT
1999
2000 if (*len != sizeof(rev)) {
2001 ret = -EINVAL;
2002 break;
2003 }
2004 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2005 ret = -EFAULT;
2006 break;
2007 }
2008
2009 if (cmd == IPT_SO_GET_REVISION_TARGET)
2e4e6a17 2010 target = 1;
1da177e4 2011 else
2e4e6a17 2012 target = 0;
1da177e4 2013
2e4e6a17
HW
2014 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2015 rev.revision,
2016 target, &ret),
1da177e4
LT
2017 "ipt_%s", rev.name);
2018 break;
2019 }
2020
2021 default:
2022 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2023 ret = -EINVAL;
2024 }
2025
2026 return ret;
2027}
2028
2e4e6a17 2029int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1da177e4
LT
2030{
2031 int ret;
2e4e6a17
HW
2032 struct xt_table_info *newinfo;
2033 static struct xt_table_info bootstrap
1da177e4 2034 = { 0, 0, 0, { 0 }, { 0 }, { } };
31836064 2035 void *loc_cpu_entry;
1da177e4 2036
2e4e6a17 2037 newinfo = xt_alloc_table_info(repl->size);
1da177e4
LT
2038 if (!newinfo)
2039 return -ENOMEM;
2040
31836064
ED
2041 /* choose the copy on our node/cpu
2042 * but dont care of preemption
2043 */
2044 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2045 memcpy(loc_cpu_entry, repl->entries, repl->size);
1da177e4
LT
2046
2047 ret = translate_table(table->name, table->valid_hooks,
31836064 2048 newinfo, loc_cpu_entry, repl->size,
1da177e4
LT
2049 repl->num_entries,
2050 repl->hook_entry,
2051 repl->underflow);
2052 if (ret != 0) {
2e4e6a17 2053 xt_free_table_info(newinfo);
1da177e4
LT
2054 return ret;
2055 }
2056
da298d3a
PM
2057 ret = xt_register_table(table, &bootstrap, newinfo);
2058 if (ret != 0) {
2e4e6a17 2059 xt_free_table_info(newinfo);
1da177e4
LT
2060 return ret;
2061 }
2062
2e4e6a17 2063 return 0;
1da177e4
LT
2064}
2065
2066void ipt_unregister_table(struct ipt_table *table)
2067{
2e4e6a17 2068 struct xt_table_info *private;
31836064
ED
2069 void *loc_cpu_entry;
2070
2e4e6a17 2071 private = xt_unregister_table(table);
1da177e4
LT
2072
2073 /* Decrease module usage counts and free resources */
2e4e6a17
HW
2074 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2075 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2076 xt_free_table_info(private);
1da177e4
LT
2077}
2078
2079/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2080static inline int
2081icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2082 u_int8_t type, u_int8_t code,
2083 int invert)
2084{
2085 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2086 ^ invert;
2087}
2088
2089static int
2090icmp_match(const struct sk_buff *skb,
2091 const struct net_device *in,
2092 const struct net_device *out,
c4986734 2093 const struct xt_match *match,
1da177e4
LT
2094 const void *matchinfo,
2095 int offset,
2e4e6a17 2096 unsigned int protoff,
1da177e4
LT
2097 int *hotdrop)
2098{
2099 struct icmphdr _icmph, *ic;
2100 const struct ipt_icmp *icmpinfo = matchinfo;
2101
2102 /* Must not be a fragment. */
2103 if (offset)
2104 return 0;
2105
2e4e6a17 2106 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1da177e4
LT
2107 if (ic == NULL) {
2108 /* We've been asked to examine this packet, and we
2109 * can't. Hence, no choice but to drop.
2110 */
2111 duprintf("Dropping evil ICMP tinygram.\n");
2112 *hotdrop = 1;
2113 return 0;
2114 }
2115
2116 return icmp_type_code_match(icmpinfo->type,
2117 icmpinfo->code[0],
2118 icmpinfo->code[1],
2119 ic->type, ic->code,
2120 !!(icmpinfo->invflags&IPT_ICMP_INV));
2121}
2122
2123/* Called when user tries to insert an entry of this type. */
2124static int
2125icmp_checkentry(const char *tablename,
2e4e6a17 2126 const void *info,
c4986734 2127 const struct xt_match *match,
1da177e4 2128 void *matchinfo,
1da177e4
LT
2129 unsigned int hook_mask)
2130{
2131 const struct ipt_icmp *icmpinfo = matchinfo;
2132
1d5cd909
PM
2133 /* Must specify no unknown invflags */
2134 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1da177e4
LT
2135}
2136
2137/* The built-in targets: standard (NULL) and error. */
2138static struct ipt_target ipt_standard_target = {
2139 .name = IPT_STANDARD_TARGET,
1d5cd909 2140 .targetsize = sizeof(int),
a45049c5 2141 .family = AF_INET,
2722971c 2142#ifdef CONFIG_COMPAT
9fa492cd
PM
2143 .compatsize = sizeof(compat_int_t),
2144 .compat_from_user = compat_standard_from_user,
2145 .compat_to_user = compat_standard_to_user,
2722971c 2146#endif
1da177e4
LT
2147};
2148
2149static struct ipt_target ipt_error_target = {
2150 .name = IPT_ERROR_TARGET,
2151 .target = ipt_error,
1d5cd909 2152 .targetsize = IPT_FUNCTION_MAXNAMELEN,
a45049c5 2153 .family = AF_INET,
1da177e4
LT
2154};
2155
2156static struct nf_sockopt_ops ipt_sockopts = {
2157 .pf = PF_INET,
2158 .set_optmin = IPT_BASE_CTL,
2159 .set_optmax = IPT_SO_SET_MAX+1,
2160 .set = do_ipt_set_ctl,
2722971c
DM
2161#ifdef CONFIG_COMPAT
2162 .compat_set = compat_do_ipt_set_ctl,
2163#endif
1da177e4
LT
2164 .get_optmin = IPT_BASE_CTL,
2165 .get_optmax = IPT_SO_GET_MAX+1,
2166 .get = do_ipt_get_ctl,
2722971c
DM
2167#ifdef CONFIG_COMPAT
2168 .compat_get = compat_do_ipt_get_ctl,
2169#endif
1da177e4
LT
2170};
2171
1da177e4
LT
2172static struct ipt_match icmp_matchstruct = {
2173 .name = "icmp",
1d5cd909
PM
2174 .match = icmp_match,
2175 .matchsize = sizeof(struct ipt_icmp),
2176 .proto = IPPROTO_ICMP,
a45049c5 2177 .family = AF_INET,
1d5cd909 2178 .checkentry = icmp_checkentry,
1da177e4
LT
2179};
2180
65b4b4e8 2181static int __init ip_tables_init(void)
1da177e4
LT
2182{
2183 int ret;
2184
0eff66e6
PM
2185 ret = xt_proto_init(AF_INET);
2186 if (ret < 0)
2187 goto err1;
2e4e6a17 2188
1da177e4 2189 /* Noone else will be downing sem now, so we won't sleep */
0eff66e6
PM
2190 ret = xt_register_target(&ipt_standard_target);
2191 if (ret < 0)
2192 goto err2;
2193 ret = xt_register_target(&ipt_error_target);
2194 if (ret < 0)
2195 goto err3;
2196 ret = xt_register_match(&icmp_matchstruct);
2197 if (ret < 0)
2198 goto err4;
1da177e4
LT
2199
2200 /* Register setsockopt */
2201 ret = nf_register_sockopt(&ipt_sockopts);
0eff66e6
PM
2202 if (ret < 0)
2203 goto err5;
1da177e4 2204
2e4e6a17 2205 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1da177e4 2206 return 0;
0eff66e6
PM
2207
2208err5:
2209 xt_unregister_match(&icmp_matchstruct);
2210err4:
2211 xt_unregister_target(&ipt_error_target);
2212err3:
2213 xt_unregister_target(&ipt_standard_target);
2214err2:
2215 xt_proto_fini(AF_INET);
2216err1:
2217 return ret;
1da177e4
LT
2218}
2219
65b4b4e8 2220static void __exit ip_tables_fini(void)
1da177e4
LT
2221{
2222 nf_unregister_sockopt(&ipt_sockopts);
2e4e6a17 2223
a45049c5
PNA
2224 xt_unregister_match(&icmp_matchstruct);
2225 xt_unregister_target(&ipt_error_target);
2226 xt_unregister_target(&ipt_standard_target);
2e4e6a17
HW
2227
2228 xt_proto_fini(AF_INET);
1da177e4
LT
2229}
2230
2231EXPORT_SYMBOL(ipt_register_table);
2232EXPORT_SYMBOL(ipt_unregister_table);
1da177e4 2233EXPORT_SYMBOL(ipt_do_table);
65b4b4e8
AM
2234module_init(ip_tables_init);
2235module_exit(ip_tables_fini);
This page took 0.310153 seconds and 5 git commands to generate.