[NETFILTER]: H.323 helper: Change author's email address
[deliverable/linux.git] / net / ipv4 / netfilter / ip_tables.c
CommitLineData
1da177e4
LT
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
2e4e6a17 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
13 * a table
2e4e6a17
HW
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
1da177e4
LT
16 */
17#include <linux/config.h>
18#include <linux/cache.h>
4fc268d2 19#include <linux/capability.h>
1da177e4
LT
20#include <linux/skbuff.h>
21#include <linux/kmod.h>
22#include <linux/vmalloc.h>
23#include <linux/netdevice.h>
24#include <linux/module.h>
1da177e4
LT
25#include <linux/icmp.h>
26#include <net/ip.h>
2722971c 27#include <net/compat.h>
1da177e4 28#include <asm/uaccess.h>
57b47a53 29#include <linux/mutex.h>
1da177e4
LT
30#include <linux/proc_fs.h>
31#include <linux/err.h>
c8923c6b 32#include <linux/cpumask.h>
1da177e4 33
2e4e6a17 34#include <linux/netfilter/x_tables.h>
1da177e4
LT
35#include <linux/netfilter_ipv4/ip_tables.h>
36
37MODULE_LICENSE("GPL");
38MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
39MODULE_DESCRIPTION("IPv4 packet filter");
40
41/*#define DEBUG_IP_FIREWALL*/
42/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
43/*#define DEBUG_IP_FIREWALL_USER*/
44
45#ifdef DEBUG_IP_FIREWALL
46#define dprintf(format, args...) printk(format , ## args)
47#else
48#define dprintf(format, args...)
49#endif
50
51#ifdef DEBUG_IP_FIREWALL_USER
52#define duprintf(format, args...) printk(format , ## args)
53#else
54#define duprintf(format, args...)
55#endif
56
57#ifdef CONFIG_NETFILTER_DEBUG
58#define IP_NF_ASSERT(x) \
59do { \
60 if (!(x)) \
61 printk("IP_NF_ASSERT: %s:%s:%u\n", \
62 __FUNCTION__, __FILE__, __LINE__); \
63} while(0)
64#else
65#define IP_NF_ASSERT(x)
66#endif
1da177e4
LT
67
68#if 0
69/* All the better to debug you with... */
70#define static
71#define inline
72#endif
73
74/*
75 We keep a set of rules for each CPU, so we can avoid write-locking
76 them in the softirq when updating the counters and therefore
77 only need to read-lock in the softirq; doing a write_lock_bh() in user
78 context stops packets coming through and allows user context to read
79 the counters or update the rules.
80
1da177e4
LT
81 Hence the start of any table is given by get_table() below. */
82
1da177e4
LT
83/* Returns whether matches rule or not. */
84static inline int
85ip_packet_match(const struct iphdr *ip,
86 const char *indev,
87 const char *outdev,
88 const struct ipt_ip *ipinfo,
89 int isfrag)
90{
91 size_t i;
92 unsigned long ret;
93
94#define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
95
96 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
97 IPT_INV_SRCIP)
98 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
99 IPT_INV_DSTIP)) {
100 dprintf("Source or dest mismatch.\n");
101
102 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
103 NIPQUAD(ip->saddr),
104 NIPQUAD(ipinfo->smsk.s_addr),
105 NIPQUAD(ipinfo->src.s_addr),
106 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
107 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
108 NIPQUAD(ip->daddr),
109 NIPQUAD(ipinfo->dmsk.s_addr),
110 NIPQUAD(ipinfo->dst.s_addr),
111 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
112 return 0;
113 }
114
115 /* Look for ifname matches; this should unroll nicely. */
116 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
117 ret |= (((const unsigned long *)indev)[i]
118 ^ ((const unsigned long *)ipinfo->iniface)[i])
119 & ((const unsigned long *)ipinfo->iniface_mask)[i];
120 }
121
122 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
123 dprintf("VIA in mismatch (%s vs %s).%s\n",
124 indev, ipinfo->iniface,
125 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
126 return 0;
127 }
128
129 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
130 ret |= (((const unsigned long *)outdev)[i]
131 ^ ((const unsigned long *)ipinfo->outiface)[i])
132 & ((const unsigned long *)ipinfo->outiface_mask)[i];
133 }
134
135 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
136 dprintf("VIA out mismatch (%s vs %s).%s\n",
137 outdev, ipinfo->outiface,
138 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
139 return 0;
140 }
141
142 /* Check specific protocol */
143 if (ipinfo->proto
144 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
145 dprintf("Packet protocol %hi does not match %hi.%s\n",
146 ip->protocol, ipinfo->proto,
147 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
148 return 0;
149 }
150
151 /* If we have a fragment rule but the packet is not a fragment
152 * then we return zero */
153 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
154 dprintf("Fragment rule but not fragment.%s\n",
155 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
156 return 0;
157 }
158
159 return 1;
160}
161
162static inline int
163ip_checkentry(const struct ipt_ip *ip)
164{
165 if (ip->flags & ~IPT_F_MASK) {
166 duprintf("Unknown flag bits set: %08X\n",
167 ip->flags & ~IPT_F_MASK);
168 return 0;
169 }
170 if (ip->invflags & ~IPT_INV_MASK) {
171 duprintf("Unknown invflag bits set: %08X\n",
172 ip->invflags & ~IPT_INV_MASK);
173 return 0;
174 }
175 return 1;
176}
177
178static unsigned int
179ipt_error(struct sk_buff **pskb,
180 const struct net_device *in,
181 const struct net_device *out,
182 unsigned int hooknum,
c4986734 183 const struct xt_target *target,
1da177e4
LT
184 const void *targinfo,
185 void *userinfo)
186{
187 if (net_ratelimit())
188 printk("ip_tables: error: `%s'\n", (char *)targinfo);
189
190 return NF_DROP;
191}
192
193static inline
194int do_match(struct ipt_entry_match *m,
195 const struct sk_buff *skb,
196 const struct net_device *in,
197 const struct net_device *out,
198 int offset,
199 int *hotdrop)
200{
201 /* Stop iteration if it doesn't match */
1c524830
PM
202 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
203 offset, skb->nh.iph->ihl*4, hotdrop))
1da177e4
LT
204 return 1;
205 else
206 return 0;
207}
208
209static inline struct ipt_entry *
210get_entry(void *base, unsigned int offset)
211{
212 return (struct ipt_entry *)(base + offset);
213}
214
215/* Returns one of the generic firewall policies, like NF_ACCEPT. */
216unsigned int
217ipt_do_table(struct sk_buff **pskb,
218 unsigned int hook,
219 const struct net_device *in,
220 const struct net_device *out,
221 struct ipt_table *table,
222 void *userdata)
223{
224 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
225 u_int16_t offset;
226 struct iphdr *ip;
227 u_int16_t datalen;
228 int hotdrop = 0;
229 /* Initializing verdict to NF_DROP keeps gcc happy. */
230 unsigned int verdict = NF_DROP;
231 const char *indev, *outdev;
232 void *table_base;
233 struct ipt_entry *e, *back;
2e4e6a17 234 struct xt_table_info *private = table->private;
1da177e4
LT
235
236 /* Initialization */
237 ip = (*pskb)->nh.iph;
238 datalen = (*pskb)->len - ip->ihl * 4;
239 indev = in ? in->name : nulldevname;
240 outdev = out ? out->name : nulldevname;
241 /* We handle fragments by dealing with the first fragment as
242 * if it was a normal packet. All other fragments are treated
243 * normally, except that they will NEVER match rules that ask
244 * things we don't know, ie. tcp syn flag or ports). If the
245 * rule is also a fragment-specific rule, non-fragments won't
246 * match it. */
247 offset = ntohs(ip->frag_off) & IP_OFFSET;
248
249 read_lock_bh(&table->lock);
250 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
2e4e6a17
HW
251 table_base = (void *)private->entries[smp_processor_id()];
252 e = get_entry(table_base, private->hook_entry[hook]);
1da177e4
LT
253
254 /* For return from builtin chain */
2e4e6a17 255 back = get_entry(table_base, private->underflow[hook]);
1da177e4
LT
256
257 do {
258 IP_NF_ASSERT(e);
259 IP_NF_ASSERT(back);
1da177e4
LT
260 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
261 struct ipt_entry_target *t;
262
263 if (IPT_MATCH_ITERATE(e, do_match,
264 *pskb, in, out,
265 offset, &hotdrop) != 0)
266 goto no_match;
267
268 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
269
270 t = ipt_get_target(e);
271 IP_NF_ASSERT(t->u.kernel.target);
272 /* Standard target? */
273 if (!t->u.kernel.target->target) {
274 int v;
275
276 v = ((struct ipt_standard_target *)t)->verdict;
277 if (v < 0) {
278 /* Pop from stack? */
279 if (v != IPT_RETURN) {
280 verdict = (unsigned)(-v) - 1;
281 break;
282 }
283 e = back;
284 back = get_entry(table_base,
285 back->comefrom);
286 continue;
287 }
05465343
PM
288 if (table_base + v != (void *)e + e->next_offset
289 && !(e->ip.flags & IPT_F_GOTO)) {
1da177e4
LT
290 /* Save old back ptr in next entry */
291 struct ipt_entry *next
292 = (void *)e + e->next_offset;
293 next->comefrom
294 = (void *)back - table_base;
295 /* set back pointer to next entry */
296 back = next;
297 }
298
299 e = get_entry(table_base, v);
300 } else {
301 /* Targets which reenter must return
302 abs. verdicts */
303#ifdef CONFIG_NETFILTER_DEBUG
304 ((struct ipt_entry *)table_base)->comefrom
305 = 0xeeeeeeec;
306#endif
307 verdict = t->u.kernel.target->target(pskb,
308 in, out,
309 hook,
1c524830 310 t->u.kernel.target,
1da177e4
LT
311 t->data,
312 userdata);
313
314#ifdef CONFIG_NETFILTER_DEBUG
315 if (((struct ipt_entry *)table_base)->comefrom
316 != 0xeeeeeeec
317 && verdict == IPT_CONTINUE) {
318 printk("Target %s reentered!\n",
319 t->u.kernel.target->name);
320 verdict = NF_DROP;
321 }
322 ((struct ipt_entry *)table_base)->comefrom
323 = 0x57acc001;
324#endif
325 /* Target might have changed stuff. */
326 ip = (*pskb)->nh.iph;
327 datalen = (*pskb)->len - ip->ihl * 4;
328
329 if (verdict == IPT_CONTINUE)
330 e = (void *)e + e->next_offset;
331 else
332 /* Verdict */
333 break;
334 }
335 } else {
336
337 no_match:
338 e = (void *)e + e->next_offset;
339 }
340 } while (!hotdrop);
341
1da177e4
LT
342 read_unlock_bh(&table->lock);
343
344#ifdef DEBUG_ALLOW_ALL
345 return NF_ACCEPT;
346#else
347 if (hotdrop)
348 return NF_DROP;
349 else return verdict;
350#endif
351}
352
1da177e4
LT
353/* All zeroes == unconditional rule. */
354static inline int
355unconditional(const struct ipt_ip *ip)
356{
357 unsigned int i;
358
359 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
360 if (((__u32 *)ip)[i])
361 return 0;
362
363 return 1;
364}
365
366/* Figures out from what hook each rule can be called: returns 0 if
367 there are loops. Puts hook bitmask in comefrom. */
368static int
2e4e6a17 369mark_source_chains(struct xt_table_info *newinfo,
31836064 370 unsigned int valid_hooks, void *entry0)
1da177e4
LT
371{
372 unsigned int hook;
373
374 /* No recursion; use packet counter to save back ptrs (reset
375 to 0 as we leave), and comefrom to save source hook bitmask */
376 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
377 unsigned int pos = newinfo->hook_entry[hook];
378 struct ipt_entry *e
31836064 379 = (struct ipt_entry *)(entry0 + pos);
1da177e4
LT
380
381 if (!(valid_hooks & (1 << hook)))
382 continue;
383
384 /* Set initial back pointer. */
385 e->counters.pcnt = pos;
386
387 for (;;) {
388 struct ipt_standard_target *t
389 = (void *)ipt_get_target(e);
390
391 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
392 printk("iptables: loop hook %u pos %u %08X.\n",
393 hook, pos, e->comefrom);
394 return 0;
395 }
396 e->comefrom
397 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
398
399 /* Unconditional return/END. */
400 if (e->target_offset == sizeof(struct ipt_entry)
401 && (strcmp(t->target.u.user.name,
402 IPT_STANDARD_TARGET) == 0)
403 && t->verdict < 0
404 && unconditional(&e->ip)) {
405 unsigned int oldpos, size;
406
407 /* Return: backtrack through the last
408 big jump. */
409 do {
410 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
411#ifdef DEBUG_IP_FIREWALL_USER
412 if (e->comefrom
413 & (1 << NF_IP_NUMHOOKS)) {
414 duprintf("Back unset "
415 "on hook %u "
416 "rule %u\n",
417 hook, pos);
418 }
419#endif
420 oldpos = pos;
421 pos = e->counters.pcnt;
422 e->counters.pcnt = 0;
423
424 /* We're at the start. */
425 if (pos == oldpos)
426 goto next;
427
428 e = (struct ipt_entry *)
31836064 429 (entry0 + pos);
1da177e4
LT
430 } while (oldpos == pos + e->next_offset);
431
432 /* Move along one */
433 size = e->next_offset;
434 e = (struct ipt_entry *)
31836064 435 (entry0 + pos + size);
1da177e4
LT
436 e->counters.pcnt = pos;
437 pos += size;
438 } else {
439 int newpos = t->verdict;
440
441 if (strcmp(t->target.u.user.name,
442 IPT_STANDARD_TARGET) == 0
443 && newpos >= 0) {
444 /* This a jump; chase it. */
445 duprintf("Jump rule %u -> %u\n",
446 pos, newpos);
447 } else {
448 /* ... this is a fallthru */
449 newpos = pos + e->next_offset;
450 }
451 e = (struct ipt_entry *)
31836064 452 (entry0 + newpos);
1da177e4
LT
453 e->counters.pcnt = pos;
454 pos = newpos;
455 }
456 }
457 next:
458 duprintf("Finished chain %u\n", hook);
459 }
460 return 1;
461}
462
463static inline int
464cleanup_match(struct ipt_entry_match *m, unsigned int *i)
465{
466 if (i && (*i)-- == 0)
467 return 1;
468
469 if (m->u.kernel.match->destroy)
1c524830 470 m->u.kernel.match->destroy(m->u.kernel.match, m->data,
1da177e4
LT
471 m->u.match_size - sizeof(*m));
472 module_put(m->u.kernel.match->me);
473 return 0;
474}
475
476static inline int
477standard_check(const struct ipt_entry_target *t,
478 unsigned int max_offset)
479{
480 struct ipt_standard_target *targ = (void *)t;
481
482 /* Check standard info. */
1da177e4
LT
483 if (targ->verdict >= 0
484 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
485 duprintf("ipt_standard_check: bad verdict (%i)\n",
486 targ->verdict);
487 return 0;
488 }
1da177e4
LT
489 if (targ->verdict < -NF_MAX_VERDICT - 1) {
490 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
491 targ->verdict);
492 return 0;
493 }
494 return 1;
495}
496
497static inline int
498check_match(struct ipt_entry_match *m,
499 const char *name,
500 const struct ipt_ip *ip,
501 unsigned int hookmask,
502 unsigned int *i)
503{
504 struct ipt_match *match;
3cdc7c95 505 int ret;
1da177e4 506
2e4e6a17 507 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1da177e4
LT
508 m->u.user.revision),
509 "ipt_%s", m->u.user.name);
510 if (IS_ERR(match) || !match) {
511 duprintf("check_match: `%s' not found\n", m->u.user.name);
512 return match ? PTR_ERR(match) : -ENOENT;
513 }
514 m->u.kernel.match = match;
515
3cdc7c95
PM
516 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
517 name, hookmask, ip->proto,
518 ip->invflags & IPT_INV_PROTO);
519 if (ret)
520 goto err;
521
1da177e4 522 if (m->u.kernel.match->checkentry
1c524830 523 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
1da177e4
LT
524 m->u.match_size - sizeof(*m),
525 hookmask)) {
1da177e4
LT
526 duprintf("ip_tables: check failed for `%s'.\n",
527 m->u.kernel.match->name);
3cdc7c95
PM
528 ret = -EINVAL;
529 goto err;
1da177e4
LT
530 }
531
532 (*i)++;
533 return 0;
3cdc7c95
PM
534err:
535 module_put(m->u.kernel.match->me);
536 return ret;
1da177e4
LT
537}
538
539static struct ipt_target ipt_standard_target;
540
541static inline int
542check_entry(struct ipt_entry *e, const char *name, unsigned int size,
543 unsigned int *i)
544{
545 struct ipt_entry_target *t;
546 struct ipt_target *target;
547 int ret;
548 unsigned int j;
549
550 if (!ip_checkentry(&e->ip)) {
551 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
552 return -EINVAL;
553 }
554
555 j = 0;
556 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
557 if (ret != 0)
558 goto cleanup_matches;
559
560 t = ipt_get_target(e);
2e4e6a17
HW
561 target = try_then_request_module(xt_find_target(AF_INET,
562 t->u.user.name,
1da177e4
LT
563 t->u.user.revision),
564 "ipt_%s", t->u.user.name);
565 if (IS_ERR(target) || !target) {
566 duprintf("check_entry: `%s' not found\n", t->u.user.name);
567 ret = target ? PTR_ERR(target) : -ENOENT;
568 goto cleanup_matches;
569 }
570 t->u.kernel.target = target;
571
3cdc7c95
PM
572 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
573 name, e->comefrom, e->ip.proto,
574 e->ip.invflags & IPT_INV_PROTO);
575 if (ret)
576 goto err;
577
1da177e4
LT
578 if (t->u.kernel.target == &ipt_standard_target) {
579 if (!standard_check(t, size)) {
580 ret = -EINVAL;
581 goto cleanup_matches;
582 }
583 } else if (t->u.kernel.target->checkentry
1c524830 584 && !t->u.kernel.target->checkentry(name, e, target, t->data,
1da177e4
LT
585 t->u.target_size
586 - sizeof(*t),
587 e->comefrom)) {
1da177e4
LT
588 duprintf("ip_tables: check failed for `%s'.\n",
589 t->u.kernel.target->name);
590 ret = -EINVAL;
3cdc7c95 591 goto err;
1da177e4
LT
592 }
593
594 (*i)++;
595 return 0;
3cdc7c95
PM
596 err:
597 module_put(t->u.kernel.target->me);
1da177e4
LT
598 cleanup_matches:
599 IPT_MATCH_ITERATE(e, cleanup_match, &j);
600 return ret;
601}
602
603static inline int
604check_entry_size_and_hooks(struct ipt_entry *e,
2e4e6a17 605 struct xt_table_info *newinfo,
1da177e4
LT
606 unsigned char *base,
607 unsigned char *limit,
608 const unsigned int *hook_entries,
609 const unsigned int *underflows,
610 unsigned int *i)
611{
612 unsigned int h;
613
614 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
615 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
616 duprintf("Bad offset %p\n", e);
617 return -EINVAL;
618 }
619
620 if (e->next_offset
621 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
622 duprintf("checking: element %p size %u\n",
623 e, e->next_offset);
624 return -EINVAL;
625 }
626
627 /* Check hooks & underflows */
628 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
629 if ((unsigned char *)e - base == hook_entries[h])
630 newinfo->hook_entry[h] = hook_entries[h];
631 if ((unsigned char *)e - base == underflows[h])
632 newinfo->underflow[h] = underflows[h];
633 }
634
635 /* FIXME: underflows must be unconditional, standard verdicts
636 < 0 (not IPT_RETURN). --RR */
637
638 /* Clear counters and comefrom */
2e4e6a17 639 e->counters = ((struct xt_counters) { 0, 0 });
1da177e4
LT
640 e->comefrom = 0;
641
642 (*i)++;
643 return 0;
644}
645
646static inline int
647cleanup_entry(struct ipt_entry *e, unsigned int *i)
648{
649 struct ipt_entry_target *t;
650
651 if (i && (*i)-- == 0)
652 return 1;
653
654 /* Cleanup all matches */
655 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
656 t = ipt_get_target(e);
657 if (t->u.kernel.target->destroy)
1c524830 658 t->u.kernel.target->destroy(t->u.kernel.target, t->data,
1da177e4
LT
659 t->u.target_size - sizeof(*t));
660 module_put(t->u.kernel.target->me);
661 return 0;
662}
663
664/* Checks and translates the user-supplied table segment (held in
665 newinfo) */
666static int
667translate_table(const char *name,
668 unsigned int valid_hooks,
2e4e6a17 669 struct xt_table_info *newinfo,
31836064 670 void *entry0,
1da177e4
LT
671 unsigned int size,
672 unsigned int number,
673 const unsigned int *hook_entries,
674 const unsigned int *underflows)
675{
676 unsigned int i;
677 int ret;
678
679 newinfo->size = size;
680 newinfo->number = number;
681
682 /* Init all hooks to impossible value. */
683 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
684 newinfo->hook_entry[i] = 0xFFFFFFFF;
685 newinfo->underflow[i] = 0xFFFFFFFF;
686 }
687
688 duprintf("translate_table: size %u\n", newinfo->size);
689 i = 0;
690 /* Walk through entries, checking offsets. */
31836064 691 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
692 check_entry_size_and_hooks,
693 newinfo,
31836064
ED
694 entry0,
695 entry0 + size,
1da177e4
LT
696 hook_entries, underflows, &i);
697 if (ret != 0)
698 return ret;
699
700 if (i != number) {
701 duprintf("translate_table: %u not %u entries\n",
702 i, number);
703 return -EINVAL;
704 }
705
706 /* Check hooks all assigned */
707 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
708 /* Only hooks which are valid */
709 if (!(valid_hooks & (1 << i)))
710 continue;
711 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
712 duprintf("Invalid hook entry %u %u\n",
713 i, hook_entries[i]);
714 return -EINVAL;
715 }
716 if (newinfo->underflow[i] == 0xFFFFFFFF) {
717 duprintf("Invalid underflow %u %u\n",
718 i, underflows[i]);
719 return -EINVAL;
720 }
721 }
722
31836064 723 if (!mark_source_chains(newinfo, valid_hooks, entry0))
1da177e4
LT
724 return -ELOOP;
725
726 /* Finally, each sanity check must pass */
727 i = 0;
31836064 728 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
729 check_entry, name, size, &i);
730
731 if (ret != 0) {
31836064 732 IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
733 cleanup_entry, &i);
734 return ret;
735 }
736
737 /* And one copy for every other CPU */
6f912042 738 for_each_possible_cpu(i) {
31836064
ED
739 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
740 memcpy(newinfo->entries[i], entry0, newinfo->size);
1da177e4
LT
741 }
742
743 return ret;
744}
745
1da177e4
LT
746/* Gets counters. */
747static inline int
748add_entry_to_counter(const struct ipt_entry *e,
2e4e6a17 749 struct xt_counters total[],
1da177e4
LT
750 unsigned int *i)
751{
752 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
753
754 (*i)++;
755 return 0;
756}
757
31836064
ED
758static inline int
759set_entry_to_counter(const struct ipt_entry *e,
760 struct ipt_counters total[],
761 unsigned int *i)
762{
763 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
764
765 (*i)++;
766 return 0;
767}
768
1da177e4 769static void
2e4e6a17
HW
770get_counters(const struct xt_table_info *t,
771 struct xt_counters counters[])
1da177e4
LT
772{
773 unsigned int cpu;
774 unsigned int i;
31836064
ED
775 unsigned int curcpu;
776
777 /* Instead of clearing (by a previous call to memset())
778 * the counters and using adds, we set the counters
779 * with data used by 'current' CPU
780 * We dont care about preemption here.
781 */
782 curcpu = raw_smp_processor_id();
783
784 i = 0;
785 IPT_ENTRY_ITERATE(t->entries[curcpu],
786 t->size,
787 set_entry_to_counter,
788 counters,
789 &i);
1da177e4 790
6f912042 791 for_each_possible_cpu(cpu) {
31836064
ED
792 if (cpu == curcpu)
793 continue;
1da177e4 794 i = 0;
31836064 795 IPT_ENTRY_ITERATE(t->entries[cpu],
1da177e4
LT
796 t->size,
797 add_entry_to_counter,
798 counters,
799 &i);
800 }
801}
802
2722971c 803static inline struct xt_counters * alloc_counters(struct ipt_table *table)
1da177e4 804{
2722971c 805 unsigned int countersize;
2e4e6a17
HW
806 struct xt_counters *counters;
807 struct xt_table_info *private = table->private;
1da177e4
LT
808
809 /* We need atomic snapshot of counters: rest doesn't change
810 (other than comefrom, which userspace doesn't care
811 about). */
2e4e6a17 812 countersize = sizeof(struct xt_counters) * private->number;
31836064 813 counters = vmalloc_node(countersize, numa_node_id());
1da177e4
LT
814
815 if (counters == NULL)
2722971c 816 return ERR_PTR(-ENOMEM);
1da177e4
LT
817
818 /* First, sum counters... */
1da177e4 819 write_lock_bh(&table->lock);
2e4e6a17 820 get_counters(private, counters);
1da177e4
LT
821 write_unlock_bh(&table->lock);
822
2722971c
DM
823 return counters;
824}
825
826static int
827copy_entries_to_user(unsigned int total_size,
828 struct ipt_table *table,
829 void __user *userptr)
830{
831 unsigned int off, num;
832 struct ipt_entry *e;
833 struct xt_counters *counters;
834 struct xt_table_info *private = table->private;
835 int ret = 0;
836 void *loc_cpu_entry;
837
838 counters = alloc_counters(table);
839 if (IS_ERR(counters))
840 return PTR_ERR(counters);
841
31836064
ED
842 /* choose the copy that is on our node/cpu, ...
843 * This choice is lazy (because current thread is
844 * allowed to migrate to another cpu)
845 */
2e4e6a17 846 loc_cpu_entry = private->entries[raw_smp_processor_id()];
31836064
ED
847 /* ... then copy entire thing ... */
848 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1da177e4
LT
849 ret = -EFAULT;
850 goto free_counters;
851 }
852
853 /* FIXME: use iterator macros --RR */
854 /* ... then go back and fix counters and names */
855 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
856 unsigned int i;
857 struct ipt_entry_match *m;
858 struct ipt_entry_target *t;
859
31836064 860 e = (struct ipt_entry *)(loc_cpu_entry + off);
1da177e4
LT
861 if (copy_to_user(userptr + off
862 + offsetof(struct ipt_entry, counters),
863 &counters[num],
864 sizeof(counters[num])) != 0) {
865 ret = -EFAULT;
866 goto free_counters;
867 }
868
869 for (i = sizeof(struct ipt_entry);
870 i < e->target_offset;
871 i += m->u.match_size) {
872 m = (void *)e + i;
873
874 if (copy_to_user(userptr + off + i
875 + offsetof(struct ipt_entry_match,
876 u.user.name),
877 m->u.kernel.match->name,
878 strlen(m->u.kernel.match->name)+1)
879 != 0) {
880 ret = -EFAULT;
881 goto free_counters;
882 }
883 }
884
885 t = ipt_get_target(e);
886 if (copy_to_user(userptr + off + e->target_offset
887 + offsetof(struct ipt_entry_target,
888 u.user.name),
889 t->u.kernel.target->name,
890 strlen(t->u.kernel.target->name)+1) != 0) {
891 ret = -EFAULT;
892 goto free_counters;
893 }
894 }
895
896 free_counters:
897 vfree(counters);
898 return ret;
899}
900
2722971c
DM
901#ifdef CONFIG_COMPAT
902struct compat_delta {
903 struct compat_delta *next;
904 u_int16_t offset;
905 short delta;
906};
907
908static struct compat_delta *compat_offsets = NULL;
909
910static int compat_add_offset(u_int16_t offset, short delta)
911{
912 struct compat_delta *tmp;
913
914 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
915 if (!tmp)
916 return -ENOMEM;
917 tmp->offset = offset;
918 tmp->delta = delta;
919 if (compat_offsets) {
920 tmp->next = compat_offsets->next;
921 compat_offsets->next = tmp;
922 } else {
923 compat_offsets = tmp;
924 tmp->next = NULL;
925 }
926 return 0;
927}
928
929static void compat_flush_offsets(void)
930{
931 struct compat_delta *tmp, *next;
932
933 if (compat_offsets) {
934 for(tmp = compat_offsets; tmp; tmp = next) {
935 next = tmp->next;
936 kfree(tmp);
937 }
938 compat_offsets = NULL;
939 }
940}
941
942static short compat_calc_jump(u_int16_t offset)
943{
944 struct compat_delta *tmp;
945 short delta;
946
947 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
948 if (tmp->offset < offset)
949 delta += tmp->delta;
950 return delta;
951}
952
953struct compat_ipt_standard_target
954{
955 struct compat_xt_entry_target target;
956 compat_int_t verdict;
957};
958
2722971c
DM
959struct compat_ipt_standard
960{
961 struct compat_ipt_entry entry;
962 struct compat_ipt_standard_target target;
963};
964
46c5ea3c
PM
965#define IPT_ST_LEN XT_ALIGN(sizeof(struct ipt_standard_target))
966#define IPT_ST_COMPAT_LEN COMPAT_XT_ALIGN(sizeof(struct compat_ipt_standard_target))
967#define IPT_ST_OFFSET (IPT_ST_LEN - IPT_ST_COMPAT_LEN)
968
2722971c
DM
969static int compat_ipt_standard_fn(void *target,
970 void **dstptr, int *size, int convert)
971{
972 struct compat_ipt_standard_target compat_st, *pcompat_st;
973 struct ipt_standard_target st, *pst;
974 int ret;
975
976 ret = 0;
977 switch (convert) {
978 case COMPAT_TO_USER:
46c5ea3c 979 pst = target;
2722971c 980 memcpy(&compat_st.target, &pst->target,
46c5ea3c 981 sizeof(compat_st.target));
2722971c
DM
982 compat_st.verdict = pst->verdict;
983 if (compat_st.verdict > 0)
984 compat_st.verdict -=
985 compat_calc_jump(compat_st.verdict);
46c5ea3c
PM
986 compat_st.target.u.user.target_size = IPT_ST_COMPAT_LEN;
987 if (copy_to_user(*dstptr, &compat_st, IPT_ST_COMPAT_LEN))
2722971c
DM
988 ret = -EFAULT;
989 *size -= IPT_ST_OFFSET;
46c5ea3c 990 *dstptr += IPT_ST_COMPAT_LEN;
2722971c
DM
991 break;
992 case COMPAT_FROM_USER:
46c5ea3c
PM
993 pcompat_st = target;
994 memcpy(&st.target, &pcompat_st->target, IPT_ST_COMPAT_LEN);
2722971c
DM
995 st.verdict = pcompat_st->verdict;
996 if (st.verdict > 0)
997 st.verdict += compat_calc_jump(st.verdict);
46c5ea3c
PM
998 st.target.u.user.target_size = IPT_ST_LEN;
999 memcpy(*dstptr, &st, IPT_ST_LEN);
2722971c 1000 *size += IPT_ST_OFFSET;
46c5ea3c 1001 *dstptr += IPT_ST_LEN;
2722971c
DM
1002 break;
1003 case COMPAT_CALC_SIZE:
1004 *size += IPT_ST_OFFSET;
1005 break;
1006 default:
1007 ret = -ENOPROTOOPT;
1008 break;
1009 }
1010 return ret;
1011}
1012
1013static inline int
1014compat_calc_match(struct ipt_entry_match *m, int * size)
1015{
1016 if (m->u.kernel.match->compat)
1017 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1018 else
1019 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1020 return 0;
1021}
1022
1023static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
1024 void *base, struct xt_table_info *newinfo)
1025{
1026 struct ipt_entry_target *t;
1027 u_int16_t entry_offset;
1028 int off, i, ret;
1029
1030 off = 0;
1031 entry_offset = (void *)e - base;
1032 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1033 t = ipt_get_target(e);
1034 if (t->u.kernel.target->compat)
1035 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1036 else
1037 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1038 newinfo->size -= off;
1039 ret = compat_add_offset(entry_offset, off);
1040 if (ret)
1041 return ret;
1042
1043 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1044 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1045 (base + info->hook_entry[i])))
1046 newinfo->hook_entry[i] -= off;
1047 if (info->underflow[i] && (e < (struct ipt_entry *)
1048 (base + info->underflow[i])))
1049 newinfo->underflow[i] -= off;
1050 }
1051 return 0;
1052}
1053
1054static int compat_table_info(struct xt_table_info *info,
1055 struct xt_table_info *newinfo)
1056{
1057 void *loc_cpu_entry;
1058 int i;
1059
1060 if (!newinfo || !info)
1061 return -EINVAL;
1062
1063 memset(newinfo, 0, sizeof(struct xt_table_info));
1064 newinfo->size = info->size;
1065 newinfo->number = info->number;
1066 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1067 newinfo->hook_entry[i] = info->hook_entry[i];
1068 newinfo->underflow[i] = info->underflow[i];
1069 }
1070 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1071 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1072 compat_calc_entry, info, loc_cpu_entry, newinfo);
1073}
1074#endif
1075
1076static int get_info(void __user *user, int *len, int compat)
1077{
1078 char name[IPT_TABLE_MAXNAMELEN];
1079 struct ipt_table *t;
1080 int ret;
1081
1082 if (*len != sizeof(struct ipt_getinfo)) {
1083 duprintf("length %u != %u\n", *len,
1084 (unsigned int)sizeof(struct ipt_getinfo));
1085 return -EINVAL;
1086 }
1087
1088 if (copy_from_user(name, user, sizeof(name)) != 0)
1089 return -EFAULT;
1090
1091 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1092#ifdef CONFIG_COMPAT
1093 if (compat)
1094 xt_compat_lock(AF_INET);
1095#endif
1096 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1097 "iptable_%s", name);
1098 if (t && !IS_ERR(t)) {
1099 struct ipt_getinfo info;
1100 struct xt_table_info *private = t->private;
1101
1102#ifdef CONFIG_COMPAT
1103 if (compat) {
1104 struct xt_table_info tmp;
1105 ret = compat_table_info(private, &tmp);
1106 compat_flush_offsets();
1107 private = &tmp;
1108 }
1109#endif
1110 info.valid_hooks = t->valid_hooks;
1111 memcpy(info.hook_entry, private->hook_entry,
1112 sizeof(info.hook_entry));
1113 memcpy(info.underflow, private->underflow,
1114 sizeof(info.underflow));
1115 info.num_entries = private->number;
1116 info.size = private->size;
1117 strcpy(info.name, name);
1118
1119 if (copy_to_user(user, &info, *len) != 0)
1120 ret = -EFAULT;
1121 else
1122 ret = 0;
1123
1124 xt_table_unlock(t);
1125 module_put(t->me);
1126 } else
1127 ret = t ? PTR_ERR(t) : -ENOENT;
1128#ifdef CONFIG_COMPAT
1129 if (compat)
1130 xt_compat_unlock(AF_INET);
1131#endif
1132 return ret;
1133}
1134
1135static int
1136get_entries(struct ipt_get_entries __user *uptr, int *len)
1137{
1138 int ret;
1139 struct ipt_get_entries get;
1140 struct ipt_table *t;
1141
1142 if (*len < sizeof(get)) {
1143 duprintf("get_entries: %u < %d\n", *len,
1144 (unsigned int)sizeof(get));
1145 return -EINVAL;
1146 }
1147 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1148 return -EFAULT;
1149 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1150 duprintf("get_entries: %u != %u\n", *len,
1151 (unsigned int)(sizeof(struct ipt_get_entries) +
1152 get.size));
1153 return -EINVAL;
1154 }
1155
1156 t = xt_find_table_lock(AF_INET, get.name);
1157 if (t && !IS_ERR(t)) {
1158 struct xt_table_info *private = t->private;
1159 duprintf("t->private->number = %u\n",
1160 private->number);
1161 if (get.size == private->size)
1162 ret = copy_entries_to_user(private->size,
1163 t, uptr->entrytable);
1164 else {
1165 duprintf("get_entries: I've got %u not %u!\n",
1166 private->size,
1167 get.size);
1168 ret = -EINVAL;
1169 }
1170 module_put(t->me);
1171 xt_table_unlock(t);
1172 } else
1173 ret = t ? PTR_ERR(t) : -ENOENT;
1174
1175 return ret;
1176}
1177
1178static int
1179__do_replace(const char *name, unsigned int valid_hooks,
1180 struct xt_table_info *newinfo, unsigned int num_counters,
1181 void __user *counters_ptr)
1182{
1183 int ret;
1184 struct ipt_table *t;
1185 struct xt_table_info *oldinfo;
1186 struct xt_counters *counters;
1187 void *loc_cpu_old_entry;
1188
1189 ret = 0;
1190 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1191 if (!counters) {
1192 ret = -ENOMEM;
1193 goto out;
1194 }
1195
1196 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1197 "iptable_%s", name);
1198 if (!t || IS_ERR(t)) {
1199 ret = t ? PTR_ERR(t) : -ENOENT;
1200 goto free_newinfo_counters_untrans;
1201 }
1202
1203 /* You lied! */
1204 if (valid_hooks != t->valid_hooks) {
1205 duprintf("Valid hook crap: %08X vs %08X\n",
1206 valid_hooks, t->valid_hooks);
1207 ret = -EINVAL;
1208 goto put_module;
1209 }
1210
1211 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1212 if (!oldinfo)
1213 goto put_module;
1214
1215 /* Update module usage count based on number of rules */
1216 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1217 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1218 if ((oldinfo->number > oldinfo->initial_entries) ||
1219 (newinfo->number <= oldinfo->initial_entries))
1220 module_put(t->me);
1221 if ((oldinfo->number > oldinfo->initial_entries) &&
1222 (newinfo->number <= oldinfo->initial_entries))
1223 module_put(t->me);
1224
1225 /* Get the old counters. */
1226 get_counters(oldinfo, counters);
1227 /* Decrease module usage counts and free resource */
1228 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1229 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1230 xt_free_table_info(oldinfo);
1231 if (copy_to_user(counters_ptr, counters,
1232 sizeof(struct xt_counters) * num_counters) != 0)
1233 ret = -EFAULT;
1234 vfree(counters);
1235 xt_table_unlock(t);
1236 return ret;
1237
1238 put_module:
1239 module_put(t->me);
1240 xt_table_unlock(t);
1241 free_newinfo_counters_untrans:
1242 vfree(counters);
1243 out:
1244 return ret;
1245}
1246
1247static int
1248do_replace(void __user *user, unsigned int len)
1249{
1250 int ret;
1251 struct ipt_replace tmp;
1252 struct xt_table_info *newinfo;
1253 void *loc_cpu_entry;
1254
1255 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1256 return -EFAULT;
1257
1258 /* Hack: Causes ipchains to give correct error msg --RR */
1259 if (len != sizeof(tmp) + tmp.size)
1260 return -ENOPROTOOPT;
1261
1262 /* overflow check */
1263 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1264 SMP_CACHE_BYTES)
1265 return -ENOMEM;
1266 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1267 return -ENOMEM;
1268
1269 newinfo = xt_alloc_table_info(tmp.size);
1270 if (!newinfo)
1271 return -ENOMEM;
1272
1273 /* choose the copy that is our node/cpu */
1274 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1275 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1276 tmp.size) != 0) {
1277 ret = -EFAULT;
1278 goto free_newinfo;
1279 }
1280
1281 ret = translate_table(tmp.name, tmp.valid_hooks,
1282 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1283 tmp.hook_entry, tmp.underflow);
1284 if (ret != 0)
1285 goto free_newinfo;
1286
1287 duprintf("ip_tables: Translated table\n");
1288
1289 ret = __do_replace(tmp.name, tmp.valid_hooks,
1290 newinfo, tmp.num_counters,
1291 tmp.counters);
1292 if (ret)
1293 goto free_newinfo_untrans;
1294 return 0;
1295
1296 free_newinfo_untrans:
1297 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1298 free_newinfo:
1299 xt_free_table_info(newinfo);
1300 return ret;
1301}
1302
1303/* We're lazy, and add to the first CPU; overflow works its fey magic
1304 * and everything is OK. */
1305static inline int
1306add_counter_to_entry(struct ipt_entry *e,
1307 const struct xt_counters addme[],
1308 unsigned int *i)
1309{
1310#if 0
1311 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1312 *i,
1313 (long unsigned int)e->counters.pcnt,
1314 (long unsigned int)e->counters.bcnt,
1315 (long unsigned int)addme[*i].pcnt,
1316 (long unsigned int)addme[*i].bcnt);
1317#endif
1318
1319 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1320
1321 (*i)++;
1322 return 0;
1323}
1324
1325static int
1326do_add_counters(void __user *user, unsigned int len, int compat)
1327{
1328 unsigned int i;
1329 struct xt_counters_info tmp;
1330 struct xt_counters *paddc;
1331 unsigned int num_counters;
1332 char *name;
1333 int size;
1334 void *ptmp;
1335 struct ipt_table *t;
1336 struct xt_table_info *private;
1337 int ret = 0;
1338 void *loc_cpu_entry;
1339#ifdef CONFIG_COMPAT
1340 struct compat_xt_counters_info compat_tmp;
1341
1342 if (compat) {
1343 ptmp = &compat_tmp;
1344 size = sizeof(struct compat_xt_counters_info);
1345 } else
1346#endif
1347 {
1348 ptmp = &tmp;
1349 size = sizeof(struct xt_counters_info);
1350 }
1351
1352 if (copy_from_user(ptmp, user, size) != 0)
1353 return -EFAULT;
1354
1355#ifdef CONFIG_COMPAT
1356 if (compat) {
1357 num_counters = compat_tmp.num_counters;
1358 name = compat_tmp.name;
1359 } else
1360#endif
1361 {
1362 num_counters = tmp.num_counters;
1363 name = tmp.name;
1364 }
1365
1366 if (len != size + num_counters * sizeof(struct xt_counters))
1367 return -EINVAL;
1368
1369 paddc = vmalloc_node(len - size, numa_node_id());
1370 if (!paddc)
1371 return -ENOMEM;
1372
1373 if (copy_from_user(paddc, user + size, len - size) != 0) {
1374 ret = -EFAULT;
1375 goto free;
1376 }
1377
1378 t = xt_find_table_lock(AF_INET, name);
1379 if (!t || IS_ERR(t)) {
1380 ret = t ? PTR_ERR(t) : -ENOENT;
1381 goto free;
1382 }
1383
1384 write_lock_bh(&t->lock);
1385 private = t->private;
1386 if (private->number != num_counters) {
1387 ret = -EINVAL;
1388 goto unlock_up_free;
1389 }
1390
1391 i = 0;
1392 /* Choose the copy that is on our node */
1393 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1394 IPT_ENTRY_ITERATE(loc_cpu_entry,
1395 private->size,
1396 add_counter_to_entry,
1397 paddc,
1398 &i);
1399 unlock_up_free:
1400 write_unlock_bh(&t->lock);
1401 xt_table_unlock(t);
1402 module_put(t->me);
1403 free:
1404 vfree(paddc);
1405
1406 return ret;
1407}
1408
1409#ifdef CONFIG_COMPAT
1410struct compat_ipt_replace {
1411 char name[IPT_TABLE_MAXNAMELEN];
1412 u32 valid_hooks;
1413 u32 num_entries;
1414 u32 size;
1415 u32 hook_entry[NF_IP_NUMHOOKS];
1416 u32 underflow[NF_IP_NUMHOOKS];
1417 u32 num_counters;
1418 compat_uptr_t counters; /* struct ipt_counters * */
1419 struct compat_ipt_entry entries[0];
1420};
1421
1422static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1423 void __user **dstptr, compat_uint_t *size)
1424{
1425 if (m->u.kernel.match->compat)
1426 return m->u.kernel.match->compat(m, dstptr, size,
1427 COMPAT_TO_USER);
1428 else
1429 return xt_compat_match(m, dstptr, size, COMPAT_TO_USER);
1430}
1431
1432static int compat_copy_entry_to_user(struct ipt_entry *e,
1433 void __user **dstptr, compat_uint_t *size)
1434{
1435 struct ipt_entry_target __user *t;
1436 struct compat_ipt_entry __user *ce;
1437 u_int16_t target_offset, next_offset;
1438 compat_uint_t origsize;
1439 int ret;
1440
1441 ret = -EFAULT;
1442 origsize = *size;
1443 ce = (struct compat_ipt_entry __user *)*dstptr;
1444 if (__copy_to_user(ce, e, sizeof(struct ipt_entry)))
1445 goto out;
1446
1447 *dstptr += sizeof(struct compat_ipt_entry);
1448 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1449 target_offset = e->target_offset - (origsize - *size);
1450 if (ret)
1451 goto out;
1452 t = ipt_get_target(e);
1453 if (t->u.kernel.target->compat)
1454 ret = t->u.kernel.target->compat(t, dstptr, size,
1455 COMPAT_TO_USER);
1456 else
1457 ret = xt_compat_target(t, dstptr, size, COMPAT_TO_USER);
1458 if (ret)
1459 goto out;
1460 ret = -EFAULT;
1461 next_offset = e->next_offset - (origsize - *size);
1462 if (__put_user(target_offset, &ce->target_offset))
1463 goto out;
1464 if (__put_user(next_offset, &ce->next_offset))
1465 goto out;
1466 return 0;
1467out:
1468 return ret;
1469}
1470
1471static inline int
1472compat_check_calc_match(struct ipt_entry_match *m,
1473 const char *name,
1474 const struct ipt_ip *ip,
1475 unsigned int hookmask,
1476 int *size, int *i)
1477{
1478 struct ipt_match *match;
1479
1480 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1481 m->u.user.revision),
1482 "ipt_%s", m->u.user.name);
1483 if (IS_ERR(match) || !match) {
1484 duprintf("compat_check_calc_match: `%s' not found\n",
1485 m->u.user.name);
1486 return match ? PTR_ERR(match) : -ENOENT;
1487 }
1488 m->u.kernel.match = match;
1489
1490 if (m->u.kernel.match->compat)
1491 m->u.kernel.match->compat(m, NULL, size, COMPAT_CALC_SIZE);
1492 else
1493 xt_compat_match(m, NULL, size, COMPAT_CALC_SIZE);
1494
1495 (*i)++;
1496 return 0;
1497}
1498
1499static inline int
1500check_compat_entry_size_and_hooks(struct ipt_entry *e,
1501 struct xt_table_info *newinfo,
1502 unsigned int *size,
1503 unsigned char *base,
1504 unsigned char *limit,
1505 unsigned int *hook_entries,
1506 unsigned int *underflows,
1507 unsigned int *i,
1508 const char *name)
1509{
1510 struct ipt_entry_target *t;
1511 struct ipt_target *target;
1512 u_int16_t entry_offset;
1513 int ret, off, h, j;
1514
1515 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1516 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1517 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1518 duprintf("Bad offset %p, limit = %p\n", e, limit);
1519 return -EINVAL;
1520 }
1521
1522 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1523 sizeof(struct compat_xt_entry_target)) {
1524 duprintf("checking: element %p size %u\n",
1525 e, e->next_offset);
1526 return -EINVAL;
1527 }
1528
1529 if (!ip_checkentry(&e->ip)) {
1530 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1531 return -EINVAL;
1532 }
1533
1534 off = 0;
1535 entry_offset = (void *)e - (void *)base;
1536 j = 0;
1537 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1538 e->comefrom, &off, &j);
1539 if (ret != 0)
1540 goto out;
1541
1542 t = ipt_get_target(e);
1543 target = try_then_request_module(xt_find_target(AF_INET,
1544 t->u.user.name,
1545 t->u.user.revision),
1546 "ipt_%s", t->u.user.name);
1547 if (IS_ERR(target) || !target) {
1548 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1549 ret = target ? PTR_ERR(target) : -ENOENT;
1550 goto out;
1551 }
1552 t->u.kernel.target = target;
1553
1554 if (t->u.kernel.target->compat)
1555 t->u.kernel.target->compat(t, NULL, &off, COMPAT_CALC_SIZE);
1556 else
1557 xt_compat_target(t, NULL, &off, COMPAT_CALC_SIZE);
1558 *size += off;
1559 ret = compat_add_offset(entry_offset, off);
1560 if (ret)
1561 goto out;
1562
1563 /* Check hooks & underflows */
1564 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1565 if ((unsigned char *)e - base == hook_entries[h])
1566 newinfo->hook_entry[h] = hook_entries[h];
1567 if ((unsigned char *)e - base == underflows[h])
1568 newinfo->underflow[h] = underflows[h];
1569 }
1570
1571 /* Clear counters and comefrom */
1572 e->counters = ((struct ipt_counters) { 0, 0 });
1573 e->comefrom = 0;
1574
1575 (*i)++;
1576 return 0;
1577out:
1578 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1579 return ret;
1580}
1581
1582static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1583 void **dstptr, compat_uint_t *size, const char *name,
1584 const struct ipt_ip *ip, unsigned int hookmask)
1585{
1586 struct ipt_entry_match *dm;
1587 struct ipt_match *match;
1588 int ret;
1589
1590 dm = (struct ipt_entry_match *)*dstptr;
1591 match = m->u.kernel.match;
1592 if (match->compat)
1593 match->compat(m, dstptr, size, COMPAT_FROM_USER);
1594 else
1595 xt_compat_match(m, dstptr, size, COMPAT_FROM_USER);
1596
1597 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1598 name, hookmask, ip->proto,
1599 ip->invflags & IPT_INV_PROTO);
1600 if (ret)
1601 return ret;
1602
1603 if (m->u.kernel.match->checkentry
1604 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
1605 dm->u.match_size - sizeof(*dm),
1606 hookmask)) {
1607 duprintf("ip_tables: check failed for `%s'.\n",
1608 m->u.kernel.match->name);
1609 return -EINVAL;
1610 }
1611 return 0;
1612}
1613
1614static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1615 unsigned int *size, const char *name,
1616 struct xt_table_info *newinfo, unsigned char *base)
1617{
1618 struct ipt_entry_target *t;
1619 struct ipt_target *target;
1620 struct ipt_entry *de;
1621 unsigned int origsize;
1622 int ret, h;
1623
1624 ret = 0;
1625 origsize = *size;
1626 de = (struct ipt_entry *)*dstptr;
1627 memcpy(de, e, sizeof(struct ipt_entry));
1628
1629 *dstptr += sizeof(struct compat_ipt_entry);
1630 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1631 name, &de->ip, de->comefrom);
1632 if (ret)
1633 goto out;
1634 de->target_offset = e->target_offset - (origsize - *size);
1635 t = ipt_get_target(e);
1636 target = t->u.kernel.target;
1637 if (target->compat)
1638 target->compat(t, dstptr, size, COMPAT_FROM_USER);
1639 else
1640 xt_compat_target(t, dstptr, size, COMPAT_FROM_USER);
1641
1642 de->next_offset = e->next_offset - (origsize - *size);
1643 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1644 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1645 newinfo->hook_entry[h] -= origsize - *size;
1646 if ((unsigned char *)de - base < newinfo->underflow[h])
1647 newinfo->underflow[h] -= origsize - *size;
1648 }
1649
1650 t = ipt_get_target(de);
1651 target = t->u.kernel.target;
1652 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1653 name, e->comefrom, e->ip.proto,
1654 e->ip.invflags & IPT_INV_PROTO);
1655 if (ret)
1656 goto out;
1657
1658 ret = -EINVAL;
1659 if (t->u.kernel.target == &ipt_standard_target) {
1660 if (!standard_check(t, *size))
1661 goto out;
1662 } else if (t->u.kernel.target->checkentry
1663 && !t->u.kernel.target->checkentry(name, de, target,
1664 t->data, t->u.target_size - sizeof(*t),
1665 de->comefrom)) {
1666 duprintf("ip_tables: compat: check failed for `%s'.\n",
1667 t->u.kernel.target->name);
1668 goto out;
1669 }
1670 ret = 0;
1671out:
1672 return ret;
1673}
1674
1da177e4 1675static int
2722971c
DM
1676translate_compat_table(const char *name,
1677 unsigned int valid_hooks,
1678 struct xt_table_info **pinfo,
1679 void **pentry0,
1680 unsigned int total_size,
1681 unsigned int number,
1682 unsigned int *hook_entries,
1683 unsigned int *underflows)
1da177e4 1684{
2722971c
DM
1685 unsigned int i;
1686 struct xt_table_info *newinfo, *info;
1687 void *pos, *entry0, *entry1;
1688 unsigned int size;
1da177e4 1689 int ret;
1da177e4 1690
2722971c
DM
1691 info = *pinfo;
1692 entry0 = *pentry0;
1693 size = total_size;
1694 info->number = number;
1695
1696 /* Init all hooks to impossible value. */
1697 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1698 info->hook_entry[i] = 0xFFFFFFFF;
1699 info->underflow[i] = 0xFFFFFFFF;
1700 }
1701
1702 duprintf("translate_compat_table: size %u\n", info->size);
1703 i = 0;
1704 xt_compat_lock(AF_INET);
1705 /* Walk through entries, checking offsets. */
1706 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1707 check_compat_entry_size_and_hooks,
1708 info, &size, entry0,
1709 entry0 + total_size,
1710 hook_entries, underflows, &i, name);
1711 if (ret != 0)
1712 goto out_unlock;
1713
1714 ret = -EINVAL;
1715 if (i != number) {
1716 duprintf("translate_compat_table: %u not %u entries\n",
1717 i, number);
1718 goto out_unlock;
1719 }
1720
1721 /* Check hooks all assigned */
1722 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1723 /* Only hooks which are valid */
1724 if (!(valid_hooks & (1 << i)))
1725 continue;
1726 if (info->hook_entry[i] == 0xFFFFFFFF) {
1727 duprintf("Invalid hook entry %u %u\n",
1728 i, hook_entries[i]);
1729 goto out_unlock;
1da177e4 1730 }
2722971c
DM
1731 if (info->underflow[i] == 0xFFFFFFFF) {
1732 duprintf("Invalid underflow %u %u\n",
1733 i, underflows[i]);
1734 goto out_unlock;
1735 }
1736 }
1737
1738 ret = -ENOMEM;
1739 newinfo = xt_alloc_table_info(size);
1740 if (!newinfo)
1741 goto out_unlock;
1742
1743 newinfo->number = number;
1744 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1745 newinfo->hook_entry[i] = info->hook_entry[i];
1746 newinfo->underflow[i] = info->underflow[i];
1747 }
1748 entry1 = newinfo->entries[raw_smp_processor_id()];
1749 pos = entry1;
1750 size = total_size;
1751 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1752 compat_copy_entry_from_user, &pos, &size,
1753 name, newinfo, entry1);
1754 compat_flush_offsets();
1755 xt_compat_unlock(AF_INET);
1756 if (ret)
1757 goto free_newinfo;
1758
1759 ret = -ELOOP;
1760 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1761 goto free_newinfo;
1762
1763 /* And one copy for every other CPU */
1764 for_each_cpu(i)
1765 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1766 memcpy(newinfo->entries[i], entry1, newinfo->size);
1767
1768 *pinfo = newinfo;
1769 *pentry0 = entry1;
1770 xt_free_table_info(info);
1771 return 0;
1da177e4 1772
2722971c
DM
1773free_newinfo:
1774 xt_free_table_info(newinfo);
1775out:
1da177e4 1776 return ret;
2722971c
DM
1777out_unlock:
1778 xt_compat_unlock(AF_INET);
1779 goto out;
1da177e4
LT
1780}
1781
1782static int
2722971c 1783compat_do_replace(void __user *user, unsigned int len)
1da177e4
LT
1784{
1785 int ret;
2722971c
DM
1786 struct compat_ipt_replace tmp;
1787 struct xt_table_info *newinfo;
1788 void *loc_cpu_entry;
1da177e4
LT
1789
1790 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1791 return -EFAULT;
1792
1793 /* Hack: Causes ipchains to give correct error msg --RR */
1794 if (len != sizeof(tmp) + tmp.size)
1795 return -ENOPROTOOPT;
1796
ee4bb818
KK
1797 /* overflow check */
1798 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1799 SMP_CACHE_BYTES)
1800 return -ENOMEM;
1801 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1802 return -ENOMEM;
1803
2e4e6a17 1804 newinfo = xt_alloc_table_info(tmp.size);
1da177e4
LT
1805 if (!newinfo)
1806 return -ENOMEM;
1807
31836064
ED
1808 /* choose the copy that is our node/cpu */
1809 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1810 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1da177e4
LT
1811 tmp.size) != 0) {
1812 ret = -EFAULT;
1813 goto free_newinfo;
1814 }
1815
2722971c
DM
1816 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1817 &newinfo, &loc_cpu_entry, tmp.size,
1818 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1819 if (ret != 0)
1da177e4 1820 goto free_newinfo;
1da177e4 1821
2722971c 1822 duprintf("compat_do_replace: Translated table\n");
1da177e4 1823
2722971c
DM
1824 ret = __do_replace(tmp.name, tmp.valid_hooks,
1825 newinfo, tmp.num_counters,
1826 compat_ptr(tmp.counters));
1827 if (ret)
1828 goto free_newinfo_untrans;
1829 return 0;
1da177e4 1830
2722971c
DM
1831 free_newinfo_untrans:
1832 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1833 free_newinfo:
1834 xt_free_table_info(newinfo);
1835 return ret;
1836}
1da177e4 1837
2722971c
DM
1838static int
1839compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1840 unsigned int len)
1841{
1842 int ret;
1da177e4 1843
2722971c
DM
1844 if (!capable(CAP_NET_ADMIN))
1845 return -EPERM;
1da177e4 1846
2722971c
DM
1847 switch (cmd) {
1848 case IPT_SO_SET_REPLACE:
1849 ret = compat_do_replace(user, len);
1850 break;
1da177e4 1851
2722971c
DM
1852 case IPT_SO_SET_ADD_COUNTERS:
1853 ret = do_add_counters(user, len, 1);
1854 break;
1855
1856 default:
1857 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1858 ret = -EINVAL;
1859 }
1da177e4 1860
1da177e4
LT
1861 return ret;
1862}
1863
2722971c 1864struct compat_ipt_get_entries
1da177e4 1865{
2722971c
DM
1866 char name[IPT_TABLE_MAXNAMELEN];
1867 compat_uint_t size;
1868 struct compat_ipt_entry entrytable[0];
1869};
1da177e4 1870
2722971c
DM
1871static int compat_copy_entries_to_user(unsigned int total_size,
1872 struct ipt_table *table, void __user *userptr)
1873{
1874 unsigned int off, num;
1875 struct compat_ipt_entry e;
1876 struct xt_counters *counters;
1877 struct xt_table_info *private = table->private;
1878 void __user *pos;
1879 unsigned int size;
1880 int ret = 0;
1881 void *loc_cpu_entry;
1da177e4 1882
2722971c
DM
1883 counters = alloc_counters(table);
1884 if (IS_ERR(counters))
1885 return PTR_ERR(counters);
1886
1887 /* choose the copy that is on our node/cpu, ...
1888 * This choice is lazy (because current thread is
1889 * allowed to migrate to another cpu)
1890 */
1891 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1892 pos = userptr;
1893 size = total_size;
1894 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1895 compat_copy_entry_to_user, &pos, &size);
1896 if (ret)
1897 goto free_counters;
1898
1899 /* ... then go back and fix counters and names */
1900 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1901 unsigned int i;
1902 struct ipt_entry_match m;
1903 struct ipt_entry_target t;
1904
1905 ret = -EFAULT;
1906 if (copy_from_user(&e, userptr + off,
1907 sizeof(struct compat_ipt_entry)))
1908 goto free_counters;
1909 if (copy_to_user(userptr + off +
1910 offsetof(struct compat_ipt_entry, counters),
1911 &counters[num], sizeof(counters[num])))
1912 goto free_counters;
1913
1914 for (i = sizeof(struct compat_ipt_entry);
1915 i < e.target_offset; i += m.u.match_size) {
1916 if (copy_from_user(&m, userptr + off + i,
1917 sizeof(struct ipt_entry_match)))
1918 goto free_counters;
1919 if (copy_to_user(userptr + off + i +
1920 offsetof(struct ipt_entry_match, u.user.name),
1921 m.u.kernel.match->name,
1922 strlen(m.u.kernel.match->name) + 1))
1923 goto free_counters;
1924 }
1925
1926 if (copy_from_user(&t, userptr + off + e.target_offset,
1927 sizeof(struct ipt_entry_target)))
1928 goto free_counters;
1929 if (copy_to_user(userptr + off + e.target_offset +
1930 offsetof(struct ipt_entry_target, u.user.name),
1931 t.u.kernel.target->name,
1932 strlen(t.u.kernel.target->name) + 1))
1933 goto free_counters;
1934 }
1935 ret = 0;
1936free_counters:
1937 vfree(counters);
1938 return ret;
1da177e4
LT
1939}
1940
1941static int
2722971c 1942compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1da177e4 1943{
2722971c
DM
1944 int ret;
1945 struct compat_ipt_get_entries get;
1da177e4 1946 struct ipt_table *t;
1da177e4 1947
1da177e4 1948
2722971c
DM
1949 if (*len < sizeof(get)) {
1950 duprintf("compat_get_entries: %u < %u\n",
1951 *len, (unsigned int)sizeof(get));
1da177e4 1952 return -EINVAL;
2722971c 1953 }
1da177e4 1954
2722971c
DM
1955 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1956 return -EFAULT;
1da177e4 1957
2722971c
DM
1958 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1959 duprintf("compat_get_entries: %u != %u\n", *len,
1960 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1961 get.size));
1962 return -EINVAL;
1da177e4
LT
1963 }
1964
2722971c
DM
1965 xt_compat_lock(AF_INET);
1966 t = xt_find_table_lock(AF_INET, get.name);
1967 if (t && !IS_ERR(t)) {
1968 struct xt_table_info *private = t->private;
1969 struct xt_table_info info;
1970 duprintf("t->private->number = %u\n",
1971 private->number);
1972 ret = compat_table_info(private, &info);
1973 if (!ret && get.size == info.size) {
1974 ret = compat_copy_entries_to_user(private->size,
1975 t, uptr->entrytable);
1976 } else if (!ret) {
1977 duprintf("compat_get_entries: I've got %u not %u!\n",
1978 private->size,
1979 get.size);
1980 ret = -EINVAL;
1981 }
1982 compat_flush_offsets();
1983 module_put(t->me);
1984 xt_table_unlock(t);
1985 } else
1da177e4 1986 ret = t ? PTR_ERR(t) : -ENOENT;
1da177e4 1987
2722971c
DM
1988 xt_compat_unlock(AF_INET);
1989 return ret;
1990}
1da177e4 1991
2722971c
DM
1992static int
1993compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1994{
1995 int ret;
1da177e4 1996
2722971c
DM
1997 switch (cmd) {
1998 case IPT_SO_GET_INFO:
1999 ret = get_info(user, len, 1);
2000 break;
2001 case IPT_SO_GET_ENTRIES:
2002 ret = compat_get_entries(user, len);
2003 break;
2004 default:
2005 duprintf("compat_do_ipt_get_ctl: unknown request %i\n", cmd);
2006 ret = -EINVAL;
2007 }
1da177e4
LT
2008 return ret;
2009}
2722971c 2010#endif
1da177e4
LT
2011
2012static int
2013do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2014{
2015 int ret;
2016
2017 if (!capable(CAP_NET_ADMIN))
2018 return -EPERM;
2019
2020 switch (cmd) {
2021 case IPT_SO_SET_REPLACE:
2022 ret = do_replace(user, len);
2023 break;
2024
2025 case IPT_SO_SET_ADD_COUNTERS:
2722971c 2026 ret = do_add_counters(user, len, 0);
1da177e4
LT
2027 break;
2028
2029 default:
2030 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2031 ret = -EINVAL;
2032 }
2033
2034 return ret;
2035}
2036
2037static int
2038do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2039{
2040 int ret;
2041
2042 if (!capable(CAP_NET_ADMIN))
2043 return -EPERM;
2044
2045 switch (cmd) {
2722971c
DM
2046 case IPT_SO_GET_INFO:
2047 ret = get_info(user, len, 0);
2048 break;
1da177e4 2049
2722971c
DM
2050 case IPT_SO_GET_ENTRIES:
2051 ret = get_entries(user, len);
1da177e4 2052 break;
1da177e4
LT
2053
2054 case IPT_SO_GET_REVISION_MATCH:
2055 case IPT_SO_GET_REVISION_TARGET: {
2056 struct ipt_get_revision rev;
2e4e6a17 2057 int target;
1da177e4
LT
2058
2059 if (*len != sizeof(rev)) {
2060 ret = -EINVAL;
2061 break;
2062 }
2063 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2064 ret = -EFAULT;
2065 break;
2066 }
2067
2068 if (cmd == IPT_SO_GET_REVISION_TARGET)
2e4e6a17 2069 target = 1;
1da177e4 2070 else
2e4e6a17 2071 target = 0;
1da177e4 2072
2e4e6a17
HW
2073 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2074 rev.revision,
2075 target, &ret),
1da177e4
LT
2076 "ipt_%s", rev.name);
2077 break;
2078 }
2079
2080 default:
2081 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2082 ret = -EINVAL;
2083 }
2084
2085 return ret;
2086}
2087
2e4e6a17 2088int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1da177e4
LT
2089{
2090 int ret;
2e4e6a17
HW
2091 struct xt_table_info *newinfo;
2092 static struct xt_table_info bootstrap
1da177e4 2093 = { 0, 0, 0, { 0 }, { 0 }, { } };
31836064 2094 void *loc_cpu_entry;
1da177e4 2095
2e4e6a17 2096 newinfo = xt_alloc_table_info(repl->size);
1da177e4
LT
2097 if (!newinfo)
2098 return -ENOMEM;
2099
31836064
ED
2100 /* choose the copy on our node/cpu
2101 * but dont care of preemption
2102 */
2103 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2104 memcpy(loc_cpu_entry, repl->entries, repl->size);
1da177e4
LT
2105
2106 ret = translate_table(table->name, table->valid_hooks,
31836064 2107 newinfo, loc_cpu_entry, repl->size,
1da177e4
LT
2108 repl->num_entries,
2109 repl->hook_entry,
2110 repl->underflow);
2111 if (ret != 0) {
2e4e6a17 2112 xt_free_table_info(newinfo);
1da177e4
LT
2113 return ret;
2114 }
2115
2e4e6a17
HW
2116 if (xt_register_table(table, &bootstrap, newinfo) != 0) {
2117 xt_free_table_info(newinfo);
1da177e4
LT
2118 return ret;
2119 }
2120
2e4e6a17 2121 return 0;
1da177e4
LT
2122}
2123
2124void ipt_unregister_table(struct ipt_table *table)
2125{
2e4e6a17 2126 struct xt_table_info *private;
31836064
ED
2127 void *loc_cpu_entry;
2128
2e4e6a17 2129 private = xt_unregister_table(table);
1da177e4
LT
2130
2131 /* Decrease module usage counts and free resources */
2e4e6a17
HW
2132 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2133 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2134 xt_free_table_info(private);
1da177e4
LT
2135}
2136
2137/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2138static inline int
2139icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2140 u_int8_t type, u_int8_t code,
2141 int invert)
2142{
2143 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2144 ^ invert;
2145}
2146
2147static int
2148icmp_match(const struct sk_buff *skb,
2149 const struct net_device *in,
2150 const struct net_device *out,
c4986734 2151 const struct xt_match *match,
1da177e4
LT
2152 const void *matchinfo,
2153 int offset,
2e4e6a17 2154 unsigned int protoff,
1da177e4
LT
2155 int *hotdrop)
2156{
2157 struct icmphdr _icmph, *ic;
2158 const struct ipt_icmp *icmpinfo = matchinfo;
2159
2160 /* Must not be a fragment. */
2161 if (offset)
2162 return 0;
2163
2e4e6a17 2164 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1da177e4
LT
2165 if (ic == NULL) {
2166 /* We've been asked to examine this packet, and we
2167 * can't. Hence, no choice but to drop.
2168 */
2169 duprintf("Dropping evil ICMP tinygram.\n");
2170 *hotdrop = 1;
2171 return 0;
2172 }
2173
2174 return icmp_type_code_match(icmpinfo->type,
2175 icmpinfo->code[0],
2176 icmpinfo->code[1],
2177 ic->type, ic->code,
2178 !!(icmpinfo->invflags&IPT_ICMP_INV));
2179}
2180
2181/* Called when user tries to insert an entry of this type. */
2182static int
2183icmp_checkentry(const char *tablename,
2e4e6a17 2184 const void *info,
c4986734 2185 const struct xt_match *match,
1da177e4
LT
2186 void *matchinfo,
2187 unsigned int matchsize,
2188 unsigned int hook_mask)
2189{
2190 const struct ipt_icmp *icmpinfo = matchinfo;
2191
1d5cd909
PM
2192 /* Must specify no unknown invflags */
2193 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1da177e4
LT
2194}
2195
2196/* The built-in targets: standard (NULL) and error. */
2197static struct ipt_target ipt_standard_target = {
2198 .name = IPT_STANDARD_TARGET,
1d5cd909 2199 .targetsize = sizeof(int),
a45049c5 2200 .family = AF_INET,
2722971c
DM
2201#ifdef CONFIG_COMPAT
2202 .compat = &compat_ipt_standard_fn,
2203#endif
1da177e4
LT
2204};
2205
2206static struct ipt_target ipt_error_target = {
2207 .name = IPT_ERROR_TARGET,
2208 .target = ipt_error,
1d5cd909 2209 .targetsize = IPT_FUNCTION_MAXNAMELEN,
a45049c5 2210 .family = AF_INET,
1da177e4
LT
2211};
2212
2213static struct nf_sockopt_ops ipt_sockopts = {
2214 .pf = PF_INET,
2215 .set_optmin = IPT_BASE_CTL,
2216 .set_optmax = IPT_SO_SET_MAX+1,
2217 .set = do_ipt_set_ctl,
2722971c
DM
2218#ifdef CONFIG_COMPAT
2219 .compat_set = compat_do_ipt_set_ctl,
2220#endif
1da177e4
LT
2221 .get_optmin = IPT_BASE_CTL,
2222 .get_optmax = IPT_SO_GET_MAX+1,
2223 .get = do_ipt_get_ctl,
2722971c
DM
2224#ifdef CONFIG_COMPAT
2225 .compat_get = compat_do_ipt_get_ctl,
2226#endif
1da177e4
LT
2227};
2228
1da177e4
LT
2229static struct ipt_match icmp_matchstruct = {
2230 .name = "icmp",
1d5cd909
PM
2231 .match = icmp_match,
2232 .matchsize = sizeof(struct ipt_icmp),
2233 .proto = IPPROTO_ICMP,
a45049c5 2234 .family = AF_INET,
1d5cd909 2235 .checkentry = icmp_checkentry,
1da177e4
LT
2236};
2237
65b4b4e8 2238static int __init ip_tables_init(void)
1da177e4
LT
2239{
2240 int ret;
2241
2e4e6a17
HW
2242 xt_proto_init(AF_INET);
2243
1da177e4 2244 /* Noone else will be downing sem now, so we won't sleep */
a45049c5
PNA
2245 xt_register_target(&ipt_standard_target);
2246 xt_register_target(&ipt_error_target);
2247 xt_register_match(&icmp_matchstruct);
1da177e4
LT
2248
2249 /* Register setsockopt */
2250 ret = nf_register_sockopt(&ipt_sockopts);
2251 if (ret < 0) {
2252 duprintf("Unable to register sockopts.\n");
2253 return ret;
2254 }
2255
2e4e6a17 2256 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1da177e4
LT
2257 return 0;
2258}
2259
65b4b4e8 2260static void __exit ip_tables_fini(void)
1da177e4
LT
2261{
2262 nf_unregister_sockopt(&ipt_sockopts);
2e4e6a17 2263
a45049c5
PNA
2264 xt_unregister_match(&icmp_matchstruct);
2265 xt_unregister_target(&ipt_error_target);
2266 xt_unregister_target(&ipt_standard_target);
2e4e6a17
HW
2267
2268 xt_proto_fini(AF_INET);
1da177e4
LT
2269}
2270
2271EXPORT_SYMBOL(ipt_register_table);
2272EXPORT_SYMBOL(ipt_unregister_table);
1da177e4 2273EXPORT_SYMBOL(ipt_do_table);
65b4b4e8
AM
2274module_init(ip_tables_init);
2275module_exit(ip_tables_fini);
This page took 0.269653 seconds and 5 git commands to generate.