[NETFILTER]: x_tables: switch xt_match->checkentry to bool
[deliverable/linux.git] / net / ipv4 / netfilter / ip_tables.c
CommitLineData
1da177e4
LT
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
2e4e6a17 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
1da177e4 10 */
1da177e4 11#include <linux/cache.h>
4fc268d2 12#include <linux/capability.h>
1da177e4
LT
13#include <linux/skbuff.h>
14#include <linux/kmod.h>
15#include <linux/vmalloc.h>
16#include <linux/netdevice.h>
17#include <linux/module.h>
1da177e4
LT
18#include <linux/icmp.h>
19#include <net/ip.h>
2722971c 20#include <net/compat.h>
1da177e4 21#include <asm/uaccess.h>
57b47a53 22#include <linux/mutex.h>
1da177e4
LT
23#include <linux/proc_fs.h>
24#include <linux/err.h>
c8923c6b 25#include <linux/cpumask.h>
1da177e4 26
2e4e6a17 27#include <linux/netfilter/x_tables.h>
1da177e4
LT
28#include <linux/netfilter_ipv4/ip_tables.h>
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32MODULE_DESCRIPTION("IPv4 packet filter");
33
34/*#define DEBUG_IP_FIREWALL*/
35/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36/*#define DEBUG_IP_FIREWALL_USER*/
37
38#ifdef DEBUG_IP_FIREWALL
39#define dprintf(format, args...) printk(format , ## args)
40#else
41#define dprintf(format, args...)
42#endif
43
44#ifdef DEBUG_IP_FIREWALL_USER
45#define duprintf(format, args...) printk(format , ## args)
46#else
47#define duprintf(format, args...)
48#endif
49
50#ifdef CONFIG_NETFILTER_DEBUG
51#define IP_NF_ASSERT(x) \
52do { \
53 if (!(x)) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
56} while(0)
57#else
58#define IP_NF_ASSERT(x)
59#endif
1da177e4
LT
60
61#if 0
62/* All the better to debug you with... */
63#define static
64#define inline
65#endif
66
67/*
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
73
1da177e4
LT
74 Hence the start of any table is given by get_table() below. */
75
1da177e4
LT
76/* Returns whether matches rule or not. */
77static inline int
78ip_packet_match(const struct iphdr *ip,
79 const char *indev,
80 const char *outdev,
81 const struct ipt_ip *ipinfo,
82 int isfrag)
83{
84 size_t i;
85 unsigned long ret;
86
87#define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
88
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
90 IPT_INV_SRCIP)
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
92 IPT_INV_DSTIP)) {
93 dprintf("Source or dest mismatch.\n");
94
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
96 NIPQUAD(ip->saddr),
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
101 NIPQUAD(ip->daddr),
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
105 return 0;
106 }
107
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
113 }
114
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
119 return 0;
120 }
121
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
126 }
127
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
132 return 0;
133 }
134
135 /* Check specific protocol */
136 if (ipinfo->proto
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
141 return 0;
142 }
143
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
149 return 0;
150 }
151
152 return 1;
153}
154
ccb79bdc 155static inline bool
1da177e4
LT
156ip_checkentry(const struct ipt_ip *ip)
157{
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
ccb79bdc 161 return false;
1da177e4
LT
162 }
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
ccb79bdc 166 return false;
1da177e4 167 }
ccb79bdc 168 return true;
1da177e4
LT
169}
170
171static unsigned int
172ipt_error(struct sk_buff **pskb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
c4986734 176 const struct xt_target *target,
fe1cb108 177 const void *targinfo)
1da177e4
LT
178{
179 if (net_ratelimit())
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
181
182 return NF_DROP;
183}
184
185static inline
1d93a9cb
JE
186bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
190 int offset,
191 bool *hotdrop)
1da177e4
LT
192{
193 /* Stop iteration if it doesn't match */
1c524830 194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
c9bdd4b5 195 offset, ip_hdrlen(skb), hotdrop))
1d93a9cb 196 return true;
1da177e4 197 else
1d93a9cb 198 return false;
1da177e4
LT
199}
200
201static inline struct ipt_entry *
202get_entry(void *base, unsigned int offset)
203{
204 return (struct ipt_entry *)(base + offset);
205}
206
207/* Returns one of the generic firewall policies, like NF_ACCEPT. */
208unsigned int
209ipt_do_table(struct sk_buff **pskb,
210 unsigned int hook,
211 const struct net_device *in,
212 const struct net_device *out,
e60a13e0 213 struct xt_table *table)
1da177e4
LT
214{
215 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
216 u_int16_t offset;
217 struct iphdr *ip;
218 u_int16_t datalen;
cff533ac 219 bool hotdrop = false;
1da177e4
LT
220 /* Initializing verdict to NF_DROP keeps gcc happy. */
221 unsigned int verdict = NF_DROP;
222 const char *indev, *outdev;
223 void *table_base;
224 struct ipt_entry *e, *back;
8311731a 225 struct xt_table_info *private;
1da177e4
LT
226
227 /* Initialization */
eddc9ec5 228 ip = ip_hdr(*pskb);
1da177e4
LT
229 datalen = (*pskb)->len - ip->ihl * 4;
230 indev = in ? in->name : nulldevname;
231 outdev = out ? out->name : nulldevname;
232 /* We handle fragments by dealing with the first fragment as
233 * if it was a normal packet. All other fragments are treated
234 * normally, except that they will NEVER match rules that ask
235 * things we don't know, ie. tcp syn flag or ports). If the
236 * rule is also a fragment-specific rule, non-fragments won't
237 * match it. */
238 offset = ntohs(ip->frag_off) & IP_OFFSET;
239
240 read_lock_bh(&table->lock);
241 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
8311731a 242 private = table->private;
2e4e6a17
HW
243 table_base = (void *)private->entries[smp_processor_id()];
244 e = get_entry(table_base, private->hook_entry[hook]);
1da177e4
LT
245
246 /* For return from builtin chain */
2e4e6a17 247 back = get_entry(table_base, private->underflow[hook]);
1da177e4
LT
248
249 do {
250 IP_NF_ASSERT(e);
251 IP_NF_ASSERT(back);
1da177e4
LT
252 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
253 struct ipt_entry_target *t;
254
255 if (IPT_MATCH_ITERATE(e, do_match,
256 *pskb, in, out,
257 offset, &hotdrop) != 0)
258 goto no_match;
259
260 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
261
262 t = ipt_get_target(e);
263 IP_NF_ASSERT(t->u.kernel.target);
264 /* Standard target? */
265 if (!t->u.kernel.target->target) {
266 int v;
267
268 v = ((struct ipt_standard_target *)t)->verdict;
269 if (v < 0) {
270 /* Pop from stack? */
271 if (v != IPT_RETURN) {
272 verdict = (unsigned)(-v) - 1;
273 break;
274 }
275 e = back;
276 back = get_entry(table_base,
277 back->comefrom);
278 continue;
279 }
05465343
PM
280 if (table_base + v != (void *)e + e->next_offset
281 && !(e->ip.flags & IPT_F_GOTO)) {
1da177e4
LT
282 /* Save old back ptr in next entry */
283 struct ipt_entry *next
284 = (void *)e + e->next_offset;
285 next->comefrom
286 = (void *)back - table_base;
287 /* set back pointer to next entry */
288 back = next;
289 }
290
291 e = get_entry(table_base, v);
292 } else {
293 /* Targets which reenter must return
e905a9ed 294 abs. verdicts */
1da177e4
LT
295#ifdef CONFIG_NETFILTER_DEBUG
296 ((struct ipt_entry *)table_base)->comefrom
297 = 0xeeeeeeec;
298#endif
299 verdict = t->u.kernel.target->target(pskb,
300 in, out,
301 hook,
1c524830 302 t->u.kernel.target,
fe1cb108 303 t->data);
1da177e4
LT
304
305#ifdef CONFIG_NETFILTER_DEBUG
306 if (((struct ipt_entry *)table_base)->comefrom
307 != 0xeeeeeeec
308 && verdict == IPT_CONTINUE) {
309 printk("Target %s reentered!\n",
310 t->u.kernel.target->name);
311 verdict = NF_DROP;
312 }
313 ((struct ipt_entry *)table_base)->comefrom
314 = 0x57acc001;
315#endif
316 /* Target might have changed stuff. */
eddc9ec5 317 ip = ip_hdr(*pskb);
1da177e4
LT
318 datalen = (*pskb)->len - ip->ihl * 4;
319
320 if (verdict == IPT_CONTINUE)
321 e = (void *)e + e->next_offset;
322 else
323 /* Verdict */
324 break;
325 }
326 } else {
327
328 no_match:
329 e = (void *)e + e->next_offset;
330 }
331 } while (!hotdrop);
332
1da177e4
LT
333 read_unlock_bh(&table->lock);
334
335#ifdef DEBUG_ALLOW_ALL
336 return NF_ACCEPT;
337#else
338 if (hotdrop)
339 return NF_DROP;
340 else return verdict;
341#endif
342}
343
1da177e4
LT
344/* All zeroes == unconditional rule. */
345static inline int
346unconditional(const struct ipt_ip *ip)
347{
348 unsigned int i;
349
350 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
351 if (((__u32 *)ip)[i])
352 return 0;
353
354 return 1;
355}
356
357/* Figures out from what hook each rule can be called: returns 0 if
358 there are loops. Puts hook bitmask in comefrom. */
359static int
2e4e6a17 360mark_source_chains(struct xt_table_info *newinfo,
31836064 361 unsigned int valid_hooks, void *entry0)
1da177e4
LT
362{
363 unsigned int hook;
364
365 /* No recursion; use packet counter to save back ptrs (reset
366 to 0 as we leave), and comefrom to save source hook bitmask */
367 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
368 unsigned int pos = newinfo->hook_entry[hook];
369 struct ipt_entry *e
31836064 370 = (struct ipt_entry *)(entry0 + pos);
1da177e4
LT
371
372 if (!(valid_hooks & (1 << hook)))
373 continue;
374
375 /* Set initial back pointer. */
376 e->counters.pcnt = pos;
377
378 for (;;) {
379 struct ipt_standard_target *t
380 = (void *)ipt_get_target(e);
e1b4b9f3 381 int visited = e->comefrom & (1 << hook);
1da177e4
LT
382
383 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
384 printk("iptables: loop hook %u pos %u %08X.\n",
385 hook, pos, e->comefrom);
386 return 0;
387 }
388 e->comefrom
389 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
390
391 /* Unconditional return/END. */
e1b4b9f3 392 if ((e->target_offset == sizeof(struct ipt_entry)
1da177e4
LT
393 && (strcmp(t->target.u.user.name,
394 IPT_STANDARD_TARGET) == 0)
395 && t->verdict < 0
e1b4b9f3 396 && unconditional(&e->ip)) || visited) {
1da177e4
LT
397 unsigned int oldpos, size;
398
74c9c0c1
DM
399 if (t->verdict < -NF_MAX_VERDICT - 1) {
400 duprintf("mark_source_chains: bad "
401 "negative verdict (%i)\n",
402 t->verdict);
403 return 0;
404 }
405
1da177e4
LT
406 /* Return: backtrack through the last
407 big jump. */
408 do {
409 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
410#ifdef DEBUG_IP_FIREWALL_USER
411 if (e->comefrom
412 & (1 << NF_IP_NUMHOOKS)) {
413 duprintf("Back unset "
414 "on hook %u "
415 "rule %u\n",
416 hook, pos);
417 }
418#endif
419 oldpos = pos;
420 pos = e->counters.pcnt;
421 e->counters.pcnt = 0;
422
423 /* We're at the start. */
424 if (pos == oldpos)
425 goto next;
426
427 e = (struct ipt_entry *)
31836064 428 (entry0 + pos);
1da177e4
LT
429 } while (oldpos == pos + e->next_offset);
430
431 /* Move along one */
432 size = e->next_offset;
433 e = (struct ipt_entry *)
31836064 434 (entry0 + pos + size);
1da177e4
LT
435 e->counters.pcnt = pos;
436 pos += size;
437 } else {
438 int newpos = t->verdict;
439
440 if (strcmp(t->target.u.user.name,
441 IPT_STANDARD_TARGET) == 0
442 && newpos >= 0) {
74c9c0c1
DM
443 if (newpos > newinfo->size -
444 sizeof(struct ipt_entry)) {
445 duprintf("mark_source_chains: "
446 "bad verdict (%i)\n",
447 newpos);
448 return 0;
449 }
1da177e4
LT
450 /* This a jump; chase it. */
451 duprintf("Jump rule %u -> %u\n",
452 pos, newpos);
453 } else {
454 /* ... this is a fallthru */
455 newpos = pos + e->next_offset;
456 }
457 e = (struct ipt_entry *)
31836064 458 (entry0 + newpos);
1da177e4
LT
459 e->counters.pcnt = pos;
460 pos = newpos;
461 }
462 }
463 next:
464 duprintf("Finished chain %u\n", hook);
465 }
466 return 1;
467}
468
469static inline int
470cleanup_match(struct ipt_entry_match *m, unsigned int *i)
471{
472 if (i && (*i)-- == 0)
473 return 1;
474
475 if (m->u.kernel.match->destroy)
efa74165 476 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
1da177e4
LT
477 module_put(m->u.kernel.match->me);
478 return 0;
479}
480
1da177e4 481static inline int
a96be246
DM
482check_entry(struct ipt_entry *e, const char *name)
483{
484 struct ipt_entry_target *t;
485
486 if (!ip_checkentry(&e->ip)) {
487 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
488 return -EINVAL;
489 }
490
491 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
492 return -EINVAL;
493
494 t = ipt_get_target(e);
495 if (e->target_offset + t->u.target_size > e->next_offset)
496 return -EINVAL;
497
498 return 0;
499}
500
501static inline int check_match(struct ipt_entry_match *m, const char *name,
4c1b52bc
DM
502 const struct ipt_ip *ip, unsigned int hookmask,
503 unsigned int *i)
a96be246 504{
6709dbbb 505 struct xt_match *match;
a96be246
DM
506 int ret;
507
508 match = m->u.kernel.match;
509 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
510 name, hookmask, ip->proto,
511 ip->invflags & IPT_INV_PROTO);
512 if (!ret && m->u.kernel.match->checkentry
513 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
514 hookmask)) {
515 duprintf("ip_tables: check failed for `%s'.\n",
516 m->u.kernel.match->name);
517 ret = -EINVAL;
518 }
4c1b52bc
DM
519 if (!ret)
520 (*i)++;
a96be246
DM
521 return ret;
522}
523
524static inline int
525find_check_match(struct ipt_entry_match *m,
1da177e4
LT
526 const char *name,
527 const struct ipt_ip *ip,
528 unsigned int hookmask,
529 unsigned int *i)
530{
6709dbbb 531 struct xt_match *match;
3cdc7c95 532 int ret;
1da177e4 533
2e4e6a17 534 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1da177e4
LT
535 m->u.user.revision),
536 "ipt_%s", m->u.user.name);
537 if (IS_ERR(match) || !match) {
a96be246 538 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
1da177e4
LT
539 return match ? PTR_ERR(match) : -ENOENT;
540 }
541 m->u.kernel.match = match;
542
4c1b52bc 543 ret = check_match(m, name, ip, hookmask, i);
3cdc7c95
PM
544 if (ret)
545 goto err;
546
1da177e4 547 return 0;
3cdc7c95
PM
548err:
549 module_put(m->u.kernel.match->me);
550 return ret;
1da177e4
LT
551}
552
a96be246
DM
553static inline int check_target(struct ipt_entry *e, const char *name)
554{
e905a9ed 555 struct ipt_entry_target *t;
6709dbbb 556 struct xt_target *target;
e905a9ed 557 int ret;
a96be246
DM
558
559 t = ipt_get_target(e);
560 target = t->u.kernel.target;
561 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
562 name, e->comefrom, e->ip.proto,
563 e->ip.invflags & IPT_INV_PROTO);
564 if (!ret && t->u.kernel.target->checkentry
565 && !t->u.kernel.target->checkentry(name, e, target,
566 t->data, e->comefrom)) {
567 duprintf("ip_tables: check failed for `%s'.\n",
568 t->u.kernel.target->name);
569 ret = -EINVAL;
570 }
571 return ret;
572}
1da177e4
LT
573
574static inline int
a96be246 575find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
1da177e4
LT
576 unsigned int *i)
577{
578 struct ipt_entry_target *t;
6709dbbb 579 struct xt_target *target;
1da177e4
LT
580 int ret;
581 unsigned int j;
582
a96be246
DM
583 ret = check_entry(e, name);
584 if (ret)
585 return ret;
590bdf7f 586
1da177e4 587 j = 0;
a96be246
DM
588 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
589 e->comefrom, &j);
1da177e4
LT
590 if (ret != 0)
591 goto cleanup_matches;
592
593 t = ipt_get_target(e);
2e4e6a17
HW
594 target = try_then_request_module(xt_find_target(AF_INET,
595 t->u.user.name,
1da177e4
LT
596 t->u.user.revision),
597 "ipt_%s", t->u.user.name);
598 if (IS_ERR(target) || !target) {
a96be246 599 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
1da177e4
LT
600 ret = target ? PTR_ERR(target) : -ENOENT;
601 goto cleanup_matches;
602 }
603 t->u.kernel.target = target;
604
a96be246 605 ret = check_target(e, name);
3cdc7c95
PM
606 if (ret)
607 goto err;
608
1da177e4
LT
609 (*i)++;
610 return 0;
3cdc7c95
PM
611 err:
612 module_put(t->u.kernel.target->me);
1da177e4
LT
613 cleanup_matches:
614 IPT_MATCH_ITERATE(e, cleanup_match, &j);
615 return ret;
616}
617
618static inline int
619check_entry_size_and_hooks(struct ipt_entry *e,
2e4e6a17 620 struct xt_table_info *newinfo,
1da177e4
LT
621 unsigned char *base,
622 unsigned char *limit,
623 const unsigned int *hook_entries,
624 const unsigned int *underflows,
625 unsigned int *i)
626{
627 unsigned int h;
628
629 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
630 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
631 duprintf("Bad offset %p\n", e);
632 return -EINVAL;
633 }
634
635 if (e->next_offset
636 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
637 duprintf("checking: element %p size %u\n",
638 e, e->next_offset);
639 return -EINVAL;
640 }
641
642 /* Check hooks & underflows */
643 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
644 if ((unsigned char *)e - base == hook_entries[h])
645 newinfo->hook_entry[h] = hook_entries[h];
646 if ((unsigned char *)e - base == underflows[h])
647 newinfo->underflow[h] = underflows[h];
648 }
649
650 /* FIXME: underflows must be unconditional, standard verdicts
e905a9ed 651 < 0 (not IPT_RETURN). --RR */
1da177e4
LT
652
653 /* Clear counters and comefrom */
2e4e6a17 654 e->counters = ((struct xt_counters) { 0, 0 });
1da177e4
LT
655 e->comefrom = 0;
656
657 (*i)++;
658 return 0;
659}
660
661static inline int
662cleanup_entry(struct ipt_entry *e, unsigned int *i)
663{
664 struct ipt_entry_target *t;
665
666 if (i && (*i)-- == 0)
667 return 1;
668
669 /* Cleanup all matches */
670 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
671 t = ipt_get_target(e);
672 if (t->u.kernel.target->destroy)
efa74165 673 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
1da177e4
LT
674 module_put(t->u.kernel.target->me);
675 return 0;
676}
677
678/* Checks and translates the user-supplied table segment (held in
679 newinfo) */
680static int
681translate_table(const char *name,
682 unsigned int valid_hooks,
2e4e6a17 683 struct xt_table_info *newinfo,
31836064 684 void *entry0,
1da177e4
LT
685 unsigned int size,
686 unsigned int number,
687 const unsigned int *hook_entries,
688 const unsigned int *underflows)
689{
690 unsigned int i;
691 int ret;
692
693 newinfo->size = size;
694 newinfo->number = number;
695
696 /* Init all hooks to impossible value. */
697 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
698 newinfo->hook_entry[i] = 0xFFFFFFFF;
699 newinfo->underflow[i] = 0xFFFFFFFF;
700 }
701
702 duprintf("translate_table: size %u\n", newinfo->size);
703 i = 0;
704 /* Walk through entries, checking offsets. */
31836064 705 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
706 check_entry_size_and_hooks,
707 newinfo,
31836064
ED
708 entry0,
709 entry0 + size,
1da177e4
LT
710 hook_entries, underflows, &i);
711 if (ret != 0)
712 return ret;
713
714 if (i != number) {
715 duprintf("translate_table: %u not %u entries\n",
716 i, number);
717 return -EINVAL;
718 }
719
720 /* Check hooks all assigned */
721 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
722 /* Only hooks which are valid */
723 if (!(valid_hooks & (1 << i)))
724 continue;
725 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
726 duprintf("Invalid hook entry %u %u\n",
727 i, hook_entries[i]);
728 return -EINVAL;
729 }
730 if (newinfo->underflow[i] == 0xFFFFFFFF) {
731 duprintf("Invalid underflow %u %u\n",
732 i, underflows[i]);
733 return -EINVAL;
734 }
735 }
736
74c9c0c1
DM
737 if (!mark_source_chains(newinfo, valid_hooks, entry0))
738 return -ELOOP;
739
1da177e4
LT
740 /* Finally, each sanity check must pass */
741 i = 0;
31836064 742 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
a96be246 743 find_check_entry, name, size, &i);
1da177e4 744
74c9c0c1
DM
745 if (ret != 0) {
746 IPT_ENTRY_ITERATE(entry0, newinfo->size,
747 cleanup_entry, &i);
748 return ret;
749 }
1da177e4
LT
750
751 /* And one copy for every other CPU */
6f912042 752 for_each_possible_cpu(i) {
31836064
ED
753 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
754 memcpy(newinfo->entries[i], entry0, newinfo->size);
1da177e4
LT
755 }
756
757 return ret;
758}
759
1da177e4
LT
760/* Gets counters. */
761static inline int
762add_entry_to_counter(const struct ipt_entry *e,
2e4e6a17 763 struct xt_counters total[],
1da177e4
LT
764 unsigned int *i)
765{
766 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
767
768 (*i)++;
769 return 0;
770}
771
31836064
ED
772static inline int
773set_entry_to_counter(const struct ipt_entry *e,
774 struct ipt_counters total[],
775 unsigned int *i)
776{
777 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
778
779 (*i)++;
780 return 0;
781}
782
1da177e4 783static void
2e4e6a17
HW
784get_counters(const struct xt_table_info *t,
785 struct xt_counters counters[])
1da177e4
LT
786{
787 unsigned int cpu;
788 unsigned int i;
31836064
ED
789 unsigned int curcpu;
790
791 /* Instead of clearing (by a previous call to memset())
792 * the counters and using adds, we set the counters
793 * with data used by 'current' CPU
794 * We dont care about preemption here.
795 */
796 curcpu = raw_smp_processor_id();
797
798 i = 0;
799 IPT_ENTRY_ITERATE(t->entries[curcpu],
800 t->size,
801 set_entry_to_counter,
802 counters,
803 &i);
1da177e4 804
6f912042 805 for_each_possible_cpu(cpu) {
31836064
ED
806 if (cpu == curcpu)
807 continue;
1da177e4 808 i = 0;
31836064 809 IPT_ENTRY_ITERATE(t->entries[cpu],
1da177e4
LT
810 t->size,
811 add_entry_to_counter,
812 counters,
813 &i);
814 }
815}
816
e60a13e0 817static inline struct xt_counters * alloc_counters(struct xt_table *table)
1da177e4 818{
2722971c 819 unsigned int countersize;
2e4e6a17
HW
820 struct xt_counters *counters;
821 struct xt_table_info *private = table->private;
1da177e4
LT
822
823 /* We need atomic snapshot of counters: rest doesn't change
824 (other than comefrom, which userspace doesn't care
825 about). */
2e4e6a17 826 countersize = sizeof(struct xt_counters) * private->number;
31836064 827 counters = vmalloc_node(countersize, numa_node_id());
1da177e4
LT
828
829 if (counters == NULL)
2722971c 830 return ERR_PTR(-ENOMEM);
1da177e4
LT
831
832 /* First, sum counters... */
1da177e4 833 write_lock_bh(&table->lock);
2e4e6a17 834 get_counters(private, counters);
1da177e4
LT
835 write_unlock_bh(&table->lock);
836
2722971c
DM
837 return counters;
838}
839
840static int
841copy_entries_to_user(unsigned int total_size,
e60a13e0 842 struct xt_table *table,
2722971c
DM
843 void __user *userptr)
844{
845 unsigned int off, num;
846 struct ipt_entry *e;
847 struct xt_counters *counters;
848 struct xt_table_info *private = table->private;
849 int ret = 0;
850 void *loc_cpu_entry;
851
852 counters = alloc_counters(table);
853 if (IS_ERR(counters))
854 return PTR_ERR(counters);
855
31836064
ED
856 /* choose the copy that is on our node/cpu, ...
857 * This choice is lazy (because current thread is
858 * allowed to migrate to another cpu)
859 */
2e4e6a17 860 loc_cpu_entry = private->entries[raw_smp_processor_id()];
31836064
ED
861 /* ... then copy entire thing ... */
862 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1da177e4
LT
863 ret = -EFAULT;
864 goto free_counters;
865 }
866
867 /* FIXME: use iterator macros --RR */
868 /* ... then go back and fix counters and names */
869 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
870 unsigned int i;
871 struct ipt_entry_match *m;
872 struct ipt_entry_target *t;
873
31836064 874 e = (struct ipt_entry *)(loc_cpu_entry + off);
1da177e4
LT
875 if (copy_to_user(userptr + off
876 + offsetof(struct ipt_entry, counters),
877 &counters[num],
878 sizeof(counters[num])) != 0) {
879 ret = -EFAULT;
880 goto free_counters;
881 }
882
883 for (i = sizeof(struct ipt_entry);
884 i < e->target_offset;
885 i += m->u.match_size) {
886 m = (void *)e + i;
887
888 if (copy_to_user(userptr + off + i
889 + offsetof(struct ipt_entry_match,
890 u.user.name),
891 m->u.kernel.match->name,
892 strlen(m->u.kernel.match->name)+1)
893 != 0) {
894 ret = -EFAULT;
895 goto free_counters;
896 }
897 }
898
899 t = ipt_get_target(e);
900 if (copy_to_user(userptr + off + e->target_offset
901 + offsetof(struct ipt_entry_target,
902 u.user.name),
903 t->u.kernel.target->name,
904 strlen(t->u.kernel.target->name)+1) != 0) {
905 ret = -EFAULT;
906 goto free_counters;
907 }
908 }
909
910 free_counters:
911 vfree(counters);
912 return ret;
913}
914
2722971c
DM
915#ifdef CONFIG_COMPAT
916struct compat_delta {
917 struct compat_delta *next;
e5b5ef7d 918 unsigned int offset;
2722971c
DM
919 short delta;
920};
921
922static struct compat_delta *compat_offsets = NULL;
923
e5b5ef7d 924static int compat_add_offset(unsigned int offset, short delta)
2722971c
DM
925{
926 struct compat_delta *tmp;
927
928 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
929 if (!tmp)
930 return -ENOMEM;
931 tmp->offset = offset;
932 tmp->delta = delta;
933 if (compat_offsets) {
934 tmp->next = compat_offsets->next;
935 compat_offsets->next = tmp;
936 } else {
937 compat_offsets = tmp;
938 tmp->next = NULL;
939 }
940 return 0;
941}
942
943static void compat_flush_offsets(void)
944{
945 struct compat_delta *tmp, *next;
946
947 if (compat_offsets) {
948 for(tmp = compat_offsets; tmp; tmp = next) {
949 next = tmp->next;
950 kfree(tmp);
951 }
952 compat_offsets = NULL;
953 }
954}
955
e5b5ef7d 956static short compat_calc_jump(unsigned int offset)
2722971c
DM
957{
958 struct compat_delta *tmp;
959 short delta;
960
961 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
962 if (tmp->offset < offset)
963 delta += tmp->delta;
964 return delta;
965}
966
9fa492cd 967static void compat_standard_from_user(void *dst, void *src)
2722971c 968{
9fa492cd 969 int v = *(compat_int_t *)src;
2722971c 970
9fa492cd
PM
971 if (v > 0)
972 v += compat_calc_jump(v);
973 memcpy(dst, &v, sizeof(v));
974}
46c5ea3c 975
9fa492cd 976static int compat_standard_to_user(void __user *dst, void *src)
2722971c 977{
9fa492cd 978 compat_int_t cv = *(int *)src;
2722971c 979
9fa492cd
PM
980 if (cv > 0)
981 cv -= compat_calc_jump(cv);
982 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
2722971c
DM
983}
984
985static inline int
986compat_calc_match(struct ipt_entry_match *m, int * size)
987{
9fa492cd 988 *size += xt_compat_match_offset(m->u.kernel.match);
2722971c
DM
989 return 0;
990}
991
992static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
993 void *base, struct xt_table_info *newinfo)
994{
995 struct ipt_entry_target *t;
e5b5ef7d 996 unsigned int entry_offset;
2722971c
DM
997 int off, i, ret;
998
999 off = 0;
1000 entry_offset = (void *)e - base;
1001 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1002 t = ipt_get_target(e);
9fa492cd 1003 off += xt_compat_target_offset(t->u.kernel.target);
2722971c
DM
1004 newinfo->size -= off;
1005 ret = compat_add_offset(entry_offset, off);
1006 if (ret)
1007 return ret;
1008
1009 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
1010 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1011 (base + info->hook_entry[i])))
1012 newinfo->hook_entry[i] -= off;
1013 if (info->underflow[i] && (e < (struct ipt_entry *)
1014 (base + info->underflow[i])))
1015 newinfo->underflow[i] -= off;
1016 }
1017 return 0;
1018}
1019
1020static int compat_table_info(struct xt_table_info *info,
1021 struct xt_table_info *newinfo)
1022{
1023 void *loc_cpu_entry;
1024 int i;
1025
1026 if (!newinfo || !info)
1027 return -EINVAL;
1028
1029 memset(newinfo, 0, sizeof(struct xt_table_info));
1030 newinfo->size = info->size;
1031 newinfo->number = info->number;
1032 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1033 newinfo->hook_entry[i] = info->hook_entry[i];
1034 newinfo->underflow[i] = info->underflow[i];
1035 }
1036 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1037 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1038 compat_calc_entry, info, loc_cpu_entry, newinfo);
1039}
1040#endif
1041
1042static int get_info(void __user *user, int *len, int compat)
1043{
1044 char name[IPT_TABLE_MAXNAMELEN];
e60a13e0 1045 struct xt_table *t;
2722971c
DM
1046 int ret;
1047
1048 if (*len != sizeof(struct ipt_getinfo)) {
1049 duprintf("length %u != %u\n", *len,
1050 (unsigned int)sizeof(struct ipt_getinfo));
1051 return -EINVAL;
1052 }
1053
1054 if (copy_from_user(name, user, sizeof(name)) != 0)
1055 return -EFAULT;
1056
1057 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1058#ifdef CONFIG_COMPAT
1059 if (compat)
1060 xt_compat_lock(AF_INET);
1061#endif
1062 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1063 "iptable_%s", name);
1064 if (t && !IS_ERR(t)) {
1065 struct ipt_getinfo info;
1066 struct xt_table_info *private = t->private;
1067
1068#ifdef CONFIG_COMPAT
1069 if (compat) {
1070 struct xt_table_info tmp;
1071 ret = compat_table_info(private, &tmp);
1072 compat_flush_offsets();
1073 private = &tmp;
1074 }
1075#endif
1076 info.valid_hooks = t->valid_hooks;
1077 memcpy(info.hook_entry, private->hook_entry,
1078 sizeof(info.hook_entry));
1079 memcpy(info.underflow, private->underflow,
1080 sizeof(info.underflow));
1081 info.num_entries = private->number;
1082 info.size = private->size;
1083 strcpy(info.name, name);
1084
1085 if (copy_to_user(user, &info, *len) != 0)
1086 ret = -EFAULT;
1087 else
1088 ret = 0;
1089
1090 xt_table_unlock(t);
1091 module_put(t->me);
1092 } else
1093 ret = t ? PTR_ERR(t) : -ENOENT;
1094#ifdef CONFIG_COMPAT
1095 if (compat)
1096 xt_compat_unlock(AF_INET);
1097#endif
1098 return ret;
1099}
1100
1101static int
1102get_entries(struct ipt_get_entries __user *uptr, int *len)
1103{
1104 int ret;
1105 struct ipt_get_entries get;
e60a13e0 1106 struct xt_table *t;
2722971c
DM
1107
1108 if (*len < sizeof(get)) {
1109 duprintf("get_entries: %u < %d\n", *len,
1110 (unsigned int)sizeof(get));
1111 return -EINVAL;
1112 }
1113 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1114 return -EFAULT;
1115 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1116 duprintf("get_entries: %u != %u\n", *len,
1117 (unsigned int)(sizeof(struct ipt_get_entries) +
1118 get.size));
1119 return -EINVAL;
1120 }
1121
1122 t = xt_find_table_lock(AF_INET, get.name);
1123 if (t && !IS_ERR(t)) {
1124 struct xt_table_info *private = t->private;
1125 duprintf("t->private->number = %u\n",
1126 private->number);
1127 if (get.size == private->size)
1128 ret = copy_entries_to_user(private->size,
1129 t, uptr->entrytable);
1130 else {
1131 duprintf("get_entries: I've got %u not %u!\n",
1132 private->size,
1133 get.size);
1134 ret = -EINVAL;
1135 }
1136 module_put(t->me);
1137 xt_table_unlock(t);
1138 } else
1139 ret = t ? PTR_ERR(t) : -ENOENT;
1140
1141 return ret;
1142}
1143
1144static int
1145__do_replace(const char *name, unsigned int valid_hooks,
1146 struct xt_table_info *newinfo, unsigned int num_counters,
1147 void __user *counters_ptr)
1148{
1149 int ret;
e60a13e0 1150 struct xt_table *t;
2722971c
DM
1151 struct xt_table_info *oldinfo;
1152 struct xt_counters *counters;
1153 void *loc_cpu_old_entry;
1154
1155 ret = 0;
1156 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1157 if (!counters) {
1158 ret = -ENOMEM;
1159 goto out;
1160 }
1161
1162 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1163 "iptable_%s", name);
1164 if (!t || IS_ERR(t)) {
1165 ret = t ? PTR_ERR(t) : -ENOENT;
1166 goto free_newinfo_counters_untrans;
1167 }
1168
1169 /* You lied! */
1170 if (valid_hooks != t->valid_hooks) {
1171 duprintf("Valid hook crap: %08X vs %08X\n",
1172 valid_hooks, t->valid_hooks);
1173 ret = -EINVAL;
1174 goto put_module;
1175 }
1176
1177 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1178 if (!oldinfo)
1179 goto put_module;
1180
1181 /* Update module usage count based on number of rules */
1182 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1183 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1184 if ((oldinfo->number > oldinfo->initial_entries) ||
1185 (newinfo->number <= oldinfo->initial_entries))
1186 module_put(t->me);
1187 if ((oldinfo->number > oldinfo->initial_entries) &&
1188 (newinfo->number <= oldinfo->initial_entries))
1189 module_put(t->me);
1190
1191 /* Get the old counters. */
1192 get_counters(oldinfo, counters);
1193 /* Decrease module usage counts and free resource */
1194 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1195 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1196 xt_free_table_info(oldinfo);
1197 if (copy_to_user(counters_ptr, counters,
1198 sizeof(struct xt_counters) * num_counters) != 0)
1199 ret = -EFAULT;
1200 vfree(counters);
1201 xt_table_unlock(t);
1202 return ret;
1203
1204 put_module:
1205 module_put(t->me);
1206 xt_table_unlock(t);
1207 free_newinfo_counters_untrans:
1208 vfree(counters);
1209 out:
1210 return ret;
1211}
1212
1213static int
1214do_replace(void __user *user, unsigned int len)
1215{
1216 int ret;
1217 struct ipt_replace tmp;
1218 struct xt_table_info *newinfo;
1219 void *loc_cpu_entry;
1220
1221 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1222 return -EFAULT;
1223
1224 /* Hack: Causes ipchains to give correct error msg --RR */
1225 if (len != sizeof(tmp) + tmp.size)
1226 return -ENOPROTOOPT;
1227
1228 /* overflow check */
1229 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1230 SMP_CACHE_BYTES)
1231 return -ENOMEM;
1232 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1233 return -ENOMEM;
1234
1235 newinfo = xt_alloc_table_info(tmp.size);
1236 if (!newinfo)
1237 return -ENOMEM;
1238
1239 /* choose the copy that is our node/cpu */
1240 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1241 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1242 tmp.size) != 0) {
1243 ret = -EFAULT;
1244 goto free_newinfo;
1245 }
1246
1247 ret = translate_table(tmp.name, tmp.valid_hooks,
1248 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1249 tmp.hook_entry, tmp.underflow);
1250 if (ret != 0)
1251 goto free_newinfo;
1252
1253 duprintf("ip_tables: Translated table\n");
1254
1255 ret = __do_replace(tmp.name, tmp.valid_hooks,
1256 newinfo, tmp.num_counters,
1257 tmp.counters);
1258 if (ret)
1259 goto free_newinfo_untrans;
1260 return 0;
1261
1262 free_newinfo_untrans:
1263 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1264 free_newinfo:
1265 xt_free_table_info(newinfo);
1266 return ret;
1267}
1268
1269/* We're lazy, and add to the first CPU; overflow works its fey magic
1270 * and everything is OK. */
1271static inline int
1272add_counter_to_entry(struct ipt_entry *e,
1273 const struct xt_counters addme[],
1274 unsigned int *i)
1275{
1276#if 0
1277 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1278 *i,
1279 (long unsigned int)e->counters.pcnt,
1280 (long unsigned int)e->counters.bcnt,
1281 (long unsigned int)addme[*i].pcnt,
1282 (long unsigned int)addme[*i].bcnt);
1283#endif
1284
1285 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1286
1287 (*i)++;
1288 return 0;
1289}
1290
1291static int
1292do_add_counters(void __user *user, unsigned int len, int compat)
1293{
1294 unsigned int i;
1295 struct xt_counters_info tmp;
1296 struct xt_counters *paddc;
1297 unsigned int num_counters;
1298 char *name;
1299 int size;
1300 void *ptmp;
e60a13e0 1301 struct xt_table *t;
2722971c
DM
1302 struct xt_table_info *private;
1303 int ret = 0;
1304 void *loc_cpu_entry;
1305#ifdef CONFIG_COMPAT
1306 struct compat_xt_counters_info compat_tmp;
1307
1308 if (compat) {
1309 ptmp = &compat_tmp;
1310 size = sizeof(struct compat_xt_counters_info);
1311 } else
1312#endif
1313 {
1314 ptmp = &tmp;
1315 size = sizeof(struct xt_counters_info);
1316 }
1317
1318 if (copy_from_user(ptmp, user, size) != 0)
1319 return -EFAULT;
1320
1321#ifdef CONFIG_COMPAT
1322 if (compat) {
1323 num_counters = compat_tmp.num_counters;
1324 name = compat_tmp.name;
1325 } else
1326#endif
1327 {
1328 num_counters = tmp.num_counters;
1329 name = tmp.name;
1330 }
1331
1332 if (len != size + num_counters * sizeof(struct xt_counters))
1333 return -EINVAL;
1334
1335 paddc = vmalloc_node(len - size, numa_node_id());
1336 if (!paddc)
1337 return -ENOMEM;
1338
1339 if (copy_from_user(paddc, user + size, len - size) != 0) {
1340 ret = -EFAULT;
1341 goto free;
1342 }
1343
1344 t = xt_find_table_lock(AF_INET, name);
1345 if (!t || IS_ERR(t)) {
1346 ret = t ? PTR_ERR(t) : -ENOENT;
1347 goto free;
1348 }
1349
1350 write_lock_bh(&t->lock);
1351 private = t->private;
1352 if (private->number != num_counters) {
1353 ret = -EINVAL;
1354 goto unlock_up_free;
1355 }
1356
1357 i = 0;
1358 /* Choose the copy that is on our node */
1359 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1360 IPT_ENTRY_ITERATE(loc_cpu_entry,
1361 private->size,
1362 add_counter_to_entry,
1363 paddc,
1364 &i);
1365 unlock_up_free:
1366 write_unlock_bh(&t->lock);
1367 xt_table_unlock(t);
1368 module_put(t->me);
1369 free:
1370 vfree(paddc);
1371
1372 return ret;
1373}
1374
1375#ifdef CONFIG_COMPAT
1376struct compat_ipt_replace {
1377 char name[IPT_TABLE_MAXNAMELEN];
1378 u32 valid_hooks;
1379 u32 num_entries;
1380 u32 size;
1381 u32 hook_entry[NF_IP_NUMHOOKS];
1382 u32 underflow[NF_IP_NUMHOOKS];
1383 u32 num_counters;
1384 compat_uptr_t counters; /* struct ipt_counters * */
1385 struct compat_ipt_entry entries[0];
1386};
1387
1388static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
3e597c60 1389 void __user **dstptr, compat_uint_t *size)
2722971c 1390{
9fa492cd 1391 return xt_compat_match_to_user(m, dstptr, size);
2722971c
DM
1392}
1393
1394static int compat_copy_entry_to_user(struct ipt_entry *e,
3e597c60 1395 void __user **dstptr, compat_uint_t *size)
2722971c 1396{
3e597c60 1397 struct ipt_entry_target *t;
2722971c
DM
1398 struct compat_ipt_entry __user *ce;
1399 u_int16_t target_offset, next_offset;
1400 compat_uint_t origsize;
1401 int ret;
1402
1403 ret = -EFAULT;
1404 origsize = *size;
1405 ce = (struct compat_ipt_entry __user *)*dstptr;
7800007c 1406 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
2722971c
DM
1407 goto out;
1408
1409 *dstptr += sizeof(struct compat_ipt_entry);
1410 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1411 target_offset = e->target_offset - (origsize - *size);
1412 if (ret)
1413 goto out;
1414 t = ipt_get_target(e);
9fa492cd 1415 ret = xt_compat_target_to_user(t, dstptr, size);
2722971c
DM
1416 if (ret)
1417 goto out;
1418 ret = -EFAULT;
1419 next_offset = e->next_offset - (origsize - *size);
7800007c 1420 if (put_user(target_offset, &ce->target_offset))
2722971c 1421 goto out;
7800007c 1422 if (put_user(next_offset, &ce->next_offset))
2722971c
DM
1423 goto out;
1424 return 0;
1425out:
1426 return ret;
1427}
1428
1429static inline int
4c1b52bc 1430compat_find_calc_match(struct ipt_entry_match *m,
2722971c
DM
1431 const char *name,
1432 const struct ipt_ip *ip,
1433 unsigned int hookmask,
1434 int *size, int *i)
1435{
6709dbbb 1436 struct xt_match *match;
2722971c
DM
1437
1438 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1439 m->u.user.revision),
1440 "ipt_%s", m->u.user.name);
1441 if (IS_ERR(match) || !match) {
1442 duprintf("compat_check_calc_match: `%s' not found\n",
1443 m->u.user.name);
1444 return match ? PTR_ERR(match) : -ENOENT;
1445 }
1446 m->u.kernel.match = match;
9fa492cd 1447 *size += xt_compat_match_offset(match);
2722971c
DM
1448
1449 (*i)++;
1450 return 0;
1451}
1452
4c1b52bc
DM
1453static inline int
1454compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1455{
1456 if (i && (*i)-- == 0)
1457 return 1;
1458
1459 module_put(m->u.kernel.match->me);
1460 return 0;
1461}
1462
1463static inline int
1464compat_release_entry(struct ipt_entry *e, unsigned int *i)
1465{
1466 struct ipt_entry_target *t;
1467
1468 if (i && (*i)-- == 0)
1469 return 1;
1470
1471 /* Cleanup all matches */
1472 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1473 t = ipt_get_target(e);
1474 module_put(t->u.kernel.target->me);
1475 return 0;
1476}
1477
2722971c
DM
1478static inline int
1479check_compat_entry_size_and_hooks(struct ipt_entry *e,
1480 struct xt_table_info *newinfo,
1481 unsigned int *size,
1482 unsigned char *base,
1483 unsigned char *limit,
1484 unsigned int *hook_entries,
1485 unsigned int *underflows,
1486 unsigned int *i,
1487 const char *name)
1488{
1489 struct ipt_entry_target *t;
6709dbbb 1490 struct xt_target *target;
e5b5ef7d 1491 unsigned int entry_offset;
2722971c
DM
1492 int ret, off, h, j;
1493
1494 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1495 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1496 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1497 duprintf("Bad offset %p, limit = %p\n", e, limit);
1498 return -EINVAL;
1499 }
1500
1501 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1502 sizeof(struct compat_xt_entry_target)) {
1503 duprintf("checking: element %p size %u\n",
1504 e, e->next_offset);
1505 return -EINVAL;
1506 }
1507
a96be246
DM
1508 ret = check_entry(e, name);
1509 if (ret)
1510 return ret;
590bdf7f 1511
2722971c
DM
1512 off = 0;
1513 entry_offset = (void *)e - (void *)base;
1514 j = 0;
4c1b52bc 1515 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
2722971c
DM
1516 e->comefrom, &off, &j);
1517 if (ret != 0)
4c1b52bc 1518 goto release_matches;
2722971c
DM
1519
1520 t = ipt_get_target(e);
1521 target = try_then_request_module(xt_find_target(AF_INET,
1522 t->u.user.name,
1523 t->u.user.revision),
1524 "ipt_%s", t->u.user.name);
1525 if (IS_ERR(target) || !target) {
a96be246
DM
1526 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1527 t->u.user.name);
2722971c 1528 ret = target ? PTR_ERR(target) : -ENOENT;
4c1b52bc 1529 goto release_matches;
2722971c
DM
1530 }
1531 t->u.kernel.target = target;
1532
9fa492cd 1533 off += xt_compat_target_offset(target);
2722971c
DM
1534 *size += off;
1535 ret = compat_add_offset(entry_offset, off);
1536 if (ret)
1537 goto out;
1538
1539 /* Check hooks & underflows */
1540 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1541 if ((unsigned char *)e - base == hook_entries[h])
1542 newinfo->hook_entry[h] = hook_entries[h];
1543 if ((unsigned char *)e - base == underflows[h])
1544 newinfo->underflow[h] = underflows[h];
1545 }
1546
1547 /* Clear counters and comefrom */
1548 e->counters = ((struct ipt_counters) { 0, 0 });
1549 e->comefrom = 0;
1550
1551 (*i)++;
1552 return 0;
bec71b16 1553
2722971c 1554out:
bec71b16 1555 module_put(t->u.kernel.target->me);
4c1b52bc
DM
1556release_matches:
1557 IPT_MATCH_ITERATE(e, compat_release_match, &j);
2722971c
DM
1558 return ret;
1559}
1560
1561static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1562 void **dstptr, compat_uint_t *size, const char *name,
920b868a 1563 const struct ipt_ip *ip, unsigned int hookmask)
2722971c 1564{
9fa492cd 1565 xt_compat_match_from_user(m, dstptr, size);
f6677f43 1566 return 0;
2722971c
DM
1567}
1568
1569static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1570 unsigned int *size, const char *name,
1571 struct xt_table_info *newinfo, unsigned char *base)
1572{
1573 struct ipt_entry_target *t;
6709dbbb 1574 struct xt_target *target;
2722971c
DM
1575 struct ipt_entry *de;
1576 unsigned int origsize;
920b868a 1577 int ret, h;
2722971c
DM
1578
1579 ret = 0;
1580 origsize = *size;
1581 de = (struct ipt_entry *)*dstptr;
1582 memcpy(de, e, sizeof(struct ipt_entry));
1583
1584 *dstptr += sizeof(struct compat_ipt_entry);
1585 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
920b868a 1586 name, &de->ip, de->comefrom);
2722971c 1587 if (ret)
f6677f43 1588 return ret;
2722971c
DM
1589 de->target_offset = e->target_offset - (origsize - *size);
1590 t = ipt_get_target(e);
1591 target = t->u.kernel.target;
9fa492cd 1592 xt_compat_target_from_user(t, dstptr, size);
2722971c
DM
1593
1594 de->next_offset = e->next_offset - (origsize - *size);
1595 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1596 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1597 newinfo->hook_entry[h] -= origsize - *size;
1598 if ((unsigned char *)de - base < newinfo->underflow[h])
1599 newinfo->underflow[h] -= origsize - *size;
1600 }
f6677f43
DM
1601 return ret;
1602}
1603
4c1b52bc
DM
1604static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1605 unsigned int *i)
f6677f43 1606{
4c1b52bc 1607 int j, ret;
f6677f43 1608
4c1b52bc
DM
1609 j = 0;
1610 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
f6677f43 1611 if (ret)
4c1b52bc
DM
1612 goto cleanup_matches;
1613
1614 ret = check_target(e, name);
1615 if (ret)
1616 goto cleanup_matches;
f6677f43 1617
4c1b52bc
DM
1618 (*i)++;
1619 return 0;
1620
1621 cleanup_matches:
1622 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1623 return ret;
f6677f43
DM
1624}
1625
1da177e4 1626static int
2722971c
DM
1627translate_compat_table(const char *name,
1628 unsigned int valid_hooks,
1629 struct xt_table_info **pinfo,
1630 void **pentry0,
1631 unsigned int total_size,
1632 unsigned int number,
1633 unsigned int *hook_entries,
1634 unsigned int *underflows)
1da177e4 1635{
920b868a 1636 unsigned int i, j;
2722971c
DM
1637 struct xt_table_info *newinfo, *info;
1638 void *pos, *entry0, *entry1;
1639 unsigned int size;
1da177e4 1640 int ret;
1da177e4 1641
2722971c
DM
1642 info = *pinfo;
1643 entry0 = *pentry0;
1644 size = total_size;
1645 info->number = number;
1646
1647 /* Init all hooks to impossible value. */
1648 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1649 info->hook_entry[i] = 0xFFFFFFFF;
1650 info->underflow[i] = 0xFFFFFFFF;
1651 }
1652
1653 duprintf("translate_compat_table: size %u\n", info->size);
920b868a 1654 j = 0;
2722971c
DM
1655 xt_compat_lock(AF_INET);
1656 /* Walk through entries, checking offsets. */
1657 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1658 check_compat_entry_size_and_hooks,
1659 info, &size, entry0,
1660 entry0 + total_size,
920b868a 1661 hook_entries, underflows, &j, name);
2722971c
DM
1662 if (ret != 0)
1663 goto out_unlock;
1664
1665 ret = -EINVAL;
920b868a 1666 if (j != number) {
2722971c 1667 duprintf("translate_compat_table: %u not %u entries\n",
920b868a 1668 j, number);
2722971c
DM
1669 goto out_unlock;
1670 }
1671
1672 /* Check hooks all assigned */
1673 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1674 /* Only hooks which are valid */
1675 if (!(valid_hooks & (1 << i)))
1676 continue;
1677 if (info->hook_entry[i] == 0xFFFFFFFF) {
1678 duprintf("Invalid hook entry %u %u\n",
1679 i, hook_entries[i]);
1680 goto out_unlock;
1da177e4 1681 }
2722971c
DM
1682 if (info->underflow[i] == 0xFFFFFFFF) {
1683 duprintf("Invalid underflow %u %u\n",
1684 i, underflows[i]);
1685 goto out_unlock;
1686 }
1687 }
1688
1689 ret = -ENOMEM;
1690 newinfo = xt_alloc_table_info(size);
1691 if (!newinfo)
1692 goto out_unlock;
1693
1694 newinfo->number = number;
1695 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1696 newinfo->hook_entry[i] = info->hook_entry[i];
1697 newinfo->underflow[i] = info->underflow[i];
1698 }
1699 entry1 = newinfo->entries[raw_smp_processor_id()];
1700 pos = entry1;
1701 size = total_size;
1702 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1703 compat_copy_entry_from_user, &pos, &size,
1704 name, newinfo, entry1);
1705 compat_flush_offsets();
1706 xt_compat_unlock(AF_INET);
1707 if (ret)
1708 goto free_newinfo;
1709
1710 ret = -ELOOP;
1711 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1712 goto free_newinfo;
1713
4c1b52bc 1714 i = 0;
f6677f43 1715 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
4c1b52bc
DM
1716 name, &i);
1717 if (ret) {
1718 j -= i;
1719 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1720 compat_release_entry, &j);
1721 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1722 xt_free_table_info(newinfo);
1723 return ret;
1724 }
f6677f43 1725
2722971c 1726 /* And one copy for every other CPU */
fb1bb34d 1727 for_each_possible_cpu(i)
2722971c
DM
1728 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1729 memcpy(newinfo->entries[i], entry1, newinfo->size);
1730
1731 *pinfo = newinfo;
1732 *pentry0 = entry1;
1733 xt_free_table_info(info);
1734 return 0;
1da177e4 1735
2722971c
DM
1736free_newinfo:
1737 xt_free_table_info(newinfo);
1738out:
4c1b52bc 1739 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1da177e4 1740 return ret;
2722971c 1741out_unlock:
ef4512e7 1742 compat_flush_offsets();
2722971c
DM
1743 xt_compat_unlock(AF_INET);
1744 goto out;
1da177e4
LT
1745}
1746
1747static int
2722971c 1748compat_do_replace(void __user *user, unsigned int len)
1da177e4
LT
1749{
1750 int ret;
2722971c
DM
1751 struct compat_ipt_replace tmp;
1752 struct xt_table_info *newinfo;
1753 void *loc_cpu_entry;
1da177e4
LT
1754
1755 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1756 return -EFAULT;
1757
1758 /* Hack: Causes ipchains to give correct error msg --RR */
1759 if (len != sizeof(tmp) + tmp.size)
1760 return -ENOPROTOOPT;
1761
ee4bb818
KK
1762 /* overflow check */
1763 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1764 SMP_CACHE_BYTES)
1765 return -ENOMEM;
1766 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1767 return -ENOMEM;
1768
2e4e6a17 1769 newinfo = xt_alloc_table_info(tmp.size);
1da177e4
LT
1770 if (!newinfo)
1771 return -ENOMEM;
1772
31836064
ED
1773 /* choose the copy that is our node/cpu */
1774 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1775 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1da177e4
LT
1776 tmp.size) != 0) {
1777 ret = -EFAULT;
1778 goto free_newinfo;
1779 }
1780
2722971c
DM
1781 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1782 &newinfo, &loc_cpu_entry, tmp.size,
1783 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1784 if (ret != 0)
1da177e4 1785 goto free_newinfo;
1da177e4 1786
2722971c 1787 duprintf("compat_do_replace: Translated table\n");
1da177e4 1788
2722971c
DM
1789 ret = __do_replace(tmp.name, tmp.valid_hooks,
1790 newinfo, tmp.num_counters,
1791 compat_ptr(tmp.counters));
1792 if (ret)
1793 goto free_newinfo_untrans;
1794 return 0;
1da177e4 1795
2722971c
DM
1796 free_newinfo_untrans:
1797 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1798 free_newinfo:
1799 xt_free_table_info(newinfo);
1800 return ret;
1801}
1da177e4 1802
2722971c
DM
1803static int
1804compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1805 unsigned int len)
1806{
1807 int ret;
1da177e4 1808
2722971c
DM
1809 if (!capable(CAP_NET_ADMIN))
1810 return -EPERM;
1da177e4 1811
2722971c
DM
1812 switch (cmd) {
1813 case IPT_SO_SET_REPLACE:
1814 ret = compat_do_replace(user, len);
1815 break;
1da177e4 1816
2722971c
DM
1817 case IPT_SO_SET_ADD_COUNTERS:
1818 ret = do_add_counters(user, len, 1);
1819 break;
1820
1821 default:
1822 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1823 ret = -EINVAL;
1824 }
1da177e4 1825
1da177e4
LT
1826 return ret;
1827}
1828
2722971c 1829struct compat_ipt_get_entries
1da177e4 1830{
2722971c
DM
1831 char name[IPT_TABLE_MAXNAMELEN];
1832 compat_uint_t size;
1833 struct compat_ipt_entry entrytable[0];
1834};
1da177e4 1835
2722971c 1836static int compat_copy_entries_to_user(unsigned int total_size,
e60a13e0 1837 struct xt_table *table, void __user *userptr)
2722971c
DM
1838{
1839 unsigned int off, num;
1840 struct compat_ipt_entry e;
1841 struct xt_counters *counters;
1842 struct xt_table_info *private = table->private;
1843 void __user *pos;
1844 unsigned int size;
1845 int ret = 0;
1846 void *loc_cpu_entry;
1da177e4 1847
2722971c
DM
1848 counters = alloc_counters(table);
1849 if (IS_ERR(counters))
1850 return PTR_ERR(counters);
1851
1852 /* choose the copy that is on our node/cpu, ...
1853 * This choice is lazy (because current thread is
1854 * allowed to migrate to another cpu)
1855 */
1856 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1857 pos = userptr;
1858 size = total_size;
1859 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1860 compat_copy_entry_to_user, &pos, &size);
1861 if (ret)
1862 goto free_counters;
1863
1864 /* ... then go back and fix counters and names */
1865 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1866 unsigned int i;
1867 struct ipt_entry_match m;
1868 struct ipt_entry_target t;
1869
1870 ret = -EFAULT;
1871 if (copy_from_user(&e, userptr + off,
1872 sizeof(struct compat_ipt_entry)))
1873 goto free_counters;
1874 if (copy_to_user(userptr + off +
1875 offsetof(struct compat_ipt_entry, counters),
1876 &counters[num], sizeof(counters[num])))
1877 goto free_counters;
1878
1879 for (i = sizeof(struct compat_ipt_entry);
1880 i < e.target_offset; i += m.u.match_size) {
1881 if (copy_from_user(&m, userptr + off + i,
1882 sizeof(struct ipt_entry_match)))
1883 goto free_counters;
1884 if (copy_to_user(userptr + off + i +
1885 offsetof(struct ipt_entry_match, u.user.name),
1886 m.u.kernel.match->name,
1887 strlen(m.u.kernel.match->name) + 1))
1888 goto free_counters;
1889 }
1890
1891 if (copy_from_user(&t, userptr + off + e.target_offset,
1892 sizeof(struct ipt_entry_target)))
1893 goto free_counters;
1894 if (copy_to_user(userptr + off + e.target_offset +
1895 offsetof(struct ipt_entry_target, u.user.name),
1896 t.u.kernel.target->name,
1897 strlen(t.u.kernel.target->name) + 1))
1898 goto free_counters;
1899 }
1900 ret = 0;
1901free_counters:
1902 vfree(counters);
1903 return ret;
1da177e4
LT
1904}
1905
1906static int
2722971c 1907compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1da177e4 1908{
2722971c
DM
1909 int ret;
1910 struct compat_ipt_get_entries get;
e60a13e0 1911 struct xt_table *t;
1da177e4 1912
1da177e4 1913
2722971c
DM
1914 if (*len < sizeof(get)) {
1915 duprintf("compat_get_entries: %u < %u\n",
1916 *len, (unsigned int)sizeof(get));
1da177e4 1917 return -EINVAL;
2722971c 1918 }
1da177e4 1919
2722971c
DM
1920 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1921 return -EFAULT;
1da177e4 1922
2722971c
DM
1923 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1924 duprintf("compat_get_entries: %u != %u\n", *len,
1925 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1926 get.size));
1927 return -EINVAL;
1da177e4
LT
1928 }
1929
2722971c
DM
1930 xt_compat_lock(AF_INET);
1931 t = xt_find_table_lock(AF_INET, get.name);
1932 if (t && !IS_ERR(t)) {
1933 struct xt_table_info *private = t->private;
1934 struct xt_table_info info;
1935 duprintf("t->private->number = %u\n",
1936 private->number);
1937 ret = compat_table_info(private, &info);
1938 if (!ret && get.size == info.size) {
1939 ret = compat_copy_entries_to_user(private->size,
1940 t, uptr->entrytable);
1941 } else if (!ret) {
1942 duprintf("compat_get_entries: I've got %u not %u!\n",
1943 private->size,
1944 get.size);
1945 ret = -EINVAL;
1946 }
1947 compat_flush_offsets();
1948 module_put(t->me);
1949 xt_table_unlock(t);
1950 } else
1da177e4 1951 ret = t ? PTR_ERR(t) : -ENOENT;
1da177e4 1952
2722971c
DM
1953 xt_compat_unlock(AF_INET);
1954 return ret;
1955}
1da177e4 1956
79030ed0
PM
1957static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1958
2722971c
DM
1959static int
1960compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1961{
1962 int ret;
1da177e4 1963
82fac054
BS
1964 if (!capable(CAP_NET_ADMIN))
1965 return -EPERM;
1966
2722971c
DM
1967 switch (cmd) {
1968 case IPT_SO_GET_INFO:
1969 ret = get_info(user, len, 1);
1970 break;
1971 case IPT_SO_GET_ENTRIES:
1972 ret = compat_get_entries(user, len);
1973 break;
1974 default:
79030ed0 1975 ret = do_ipt_get_ctl(sk, cmd, user, len);
2722971c 1976 }
1da177e4
LT
1977 return ret;
1978}
2722971c 1979#endif
1da177e4
LT
1980
1981static int
1982do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1983{
1984 int ret;
1985
1986 if (!capable(CAP_NET_ADMIN))
1987 return -EPERM;
1988
1989 switch (cmd) {
1990 case IPT_SO_SET_REPLACE:
1991 ret = do_replace(user, len);
1992 break;
1993
1994 case IPT_SO_SET_ADD_COUNTERS:
2722971c 1995 ret = do_add_counters(user, len, 0);
1da177e4
LT
1996 break;
1997
1998 default:
1999 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2000 ret = -EINVAL;
2001 }
2002
2003 return ret;
2004}
2005
2006static int
2007do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2008{
2009 int ret;
2010
2011 if (!capable(CAP_NET_ADMIN))
2012 return -EPERM;
2013
2014 switch (cmd) {
2722971c
DM
2015 case IPT_SO_GET_INFO:
2016 ret = get_info(user, len, 0);
2017 break;
1da177e4 2018
2722971c
DM
2019 case IPT_SO_GET_ENTRIES:
2020 ret = get_entries(user, len);
1da177e4 2021 break;
1da177e4
LT
2022
2023 case IPT_SO_GET_REVISION_MATCH:
2024 case IPT_SO_GET_REVISION_TARGET: {
2025 struct ipt_get_revision rev;
2e4e6a17 2026 int target;
1da177e4
LT
2027
2028 if (*len != sizeof(rev)) {
2029 ret = -EINVAL;
2030 break;
2031 }
2032 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2033 ret = -EFAULT;
2034 break;
2035 }
2036
2037 if (cmd == IPT_SO_GET_REVISION_TARGET)
2e4e6a17 2038 target = 1;
1da177e4 2039 else
2e4e6a17 2040 target = 0;
1da177e4 2041
2e4e6a17
HW
2042 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2043 rev.revision,
2044 target, &ret),
1da177e4
LT
2045 "ipt_%s", rev.name);
2046 break;
2047 }
2048
2049 default:
2050 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2051 ret = -EINVAL;
2052 }
2053
2054 return ret;
2055}
2056
2e4e6a17 2057int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1da177e4
LT
2058{
2059 int ret;
2e4e6a17
HW
2060 struct xt_table_info *newinfo;
2061 static struct xt_table_info bootstrap
1da177e4 2062 = { 0, 0, 0, { 0 }, { 0 }, { } };
31836064 2063 void *loc_cpu_entry;
1da177e4 2064
2e4e6a17 2065 newinfo = xt_alloc_table_info(repl->size);
1da177e4
LT
2066 if (!newinfo)
2067 return -ENOMEM;
2068
31836064
ED
2069 /* choose the copy on our node/cpu
2070 * but dont care of preemption
2071 */
2072 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2073 memcpy(loc_cpu_entry, repl->entries, repl->size);
1da177e4
LT
2074
2075 ret = translate_table(table->name, table->valid_hooks,
31836064 2076 newinfo, loc_cpu_entry, repl->size,
1da177e4
LT
2077 repl->num_entries,
2078 repl->hook_entry,
2079 repl->underflow);
2080 if (ret != 0) {
2e4e6a17 2081 xt_free_table_info(newinfo);
1da177e4
LT
2082 return ret;
2083 }
2084
da298d3a
PM
2085 ret = xt_register_table(table, &bootstrap, newinfo);
2086 if (ret != 0) {
2e4e6a17 2087 xt_free_table_info(newinfo);
1da177e4
LT
2088 return ret;
2089 }
2090
2e4e6a17 2091 return 0;
1da177e4
LT
2092}
2093
e60a13e0 2094void ipt_unregister_table(struct xt_table *table)
1da177e4 2095{
2e4e6a17 2096 struct xt_table_info *private;
31836064
ED
2097 void *loc_cpu_entry;
2098
e905a9ed 2099 private = xt_unregister_table(table);
1da177e4
LT
2100
2101 /* Decrease module usage counts and free resources */
2e4e6a17
HW
2102 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2103 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2104 xt_free_table_info(private);
1da177e4
LT
2105}
2106
2107/* Returns 1 if the type and code is matched by the range, 0 otherwise */
1d93a9cb 2108static inline bool
1da177e4
LT
2109icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2110 u_int8_t type, u_int8_t code,
1d93a9cb 2111 bool invert)
1da177e4
LT
2112{
2113 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2114 ^ invert;
2115}
2116
1d93a9cb 2117static bool
1da177e4
LT
2118icmp_match(const struct sk_buff *skb,
2119 const struct net_device *in,
2120 const struct net_device *out,
c4986734 2121 const struct xt_match *match,
1da177e4
LT
2122 const void *matchinfo,
2123 int offset,
2e4e6a17 2124 unsigned int protoff,
cff533ac 2125 bool *hotdrop)
1da177e4
LT
2126{
2127 struct icmphdr _icmph, *ic;
2128 const struct ipt_icmp *icmpinfo = matchinfo;
2129
2130 /* Must not be a fragment. */
2131 if (offset)
1d93a9cb 2132 return false;
1da177e4 2133
2e4e6a17 2134 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1da177e4
LT
2135 if (ic == NULL) {
2136 /* We've been asked to examine this packet, and we
2137 * can't. Hence, no choice but to drop.
2138 */
2139 duprintf("Dropping evil ICMP tinygram.\n");
cff533ac 2140 *hotdrop = true;
1d93a9cb 2141 return false;
1da177e4
LT
2142 }
2143
2144 return icmp_type_code_match(icmpinfo->type,
2145 icmpinfo->code[0],
2146 icmpinfo->code[1],
2147 ic->type, ic->code,
2148 !!(icmpinfo->invflags&IPT_ICMP_INV));
2149}
2150
2151/* Called when user tries to insert an entry of this type. */
ccb79bdc 2152static bool
1da177e4 2153icmp_checkentry(const char *tablename,
2e4e6a17 2154 const void *info,
c4986734 2155 const struct xt_match *match,
1da177e4 2156 void *matchinfo,
1da177e4
LT
2157 unsigned int hook_mask)
2158{
2159 const struct ipt_icmp *icmpinfo = matchinfo;
2160
1d5cd909
PM
2161 /* Must specify no unknown invflags */
2162 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1da177e4
LT
2163}
2164
2165/* The built-in targets: standard (NULL) and error. */
6709dbbb 2166static struct xt_target ipt_standard_target = {
1da177e4 2167 .name = IPT_STANDARD_TARGET,
1d5cd909 2168 .targetsize = sizeof(int),
a45049c5 2169 .family = AF_INET,
2722971c 2170#ifdef CONFIG_COMPAT
9fa492cd
PM
2171 .compatsize = sizeof(compat_int_t),
2172 .compat_from_user = compat_standard_from_user,
2173 .compat_to_user = compat_standard_to_user,
2722971c 2174#endif
1da177e4
LT
2175};
2176
6709dbbb 2177static struct xt_target ipt_error_target = {
1da177e4
LT
2178 .name = IPT_ERROR_TARGET,
2179 .target = ipt_error,
1d5cd909 2180 .targetsize = IPT_FUNCTION_MAXNAMELEN,
a45049c5 2181 .family = AF_INET,
1da177e4
LT
2182};
2183
2184static struct nf_sockopt_ops ipt_sockopts = {
2185 .pf = PF_INET,
2186 .set_optmin = IPT_BASE_CTL,
2187 .set_optmax = IPT_SO_SET_MAX+1,
2188 .set = do_ipt_set_ctl,
2722971c
DM
2189#ifdef CONFIG_COMPAT
2190 .compat_set = compat_do_ipt_set_ctl,
2191#endif
1da177e4
LT
2192 .get_optmin = IPT_BASE_CTL,
2193 .get_optmax = IPT_SO_GET_MAX+1,
2194 .get = do_ipt_get_ctl,
2722971c
DM
2195#ifdef CONFIG_COMPAT
2196 .compat_get = compat_do_ipt_get_ctl,
2197#endif
1da177e4
LT
2198};
2199
6709dbbb 2200static struct xt_match icmp_matchstruct = {
1da177e4 2201 .name = "icmp",
1d5cd909
PM
2202 .match = icmp_match,
2203 .matchsize = sizeof(struct ipt_icmp),
2204 .proto = IPPROTO_ICMP,
a45049c5 2205 .family = AF_INET,
1d5cd909 2206 .checkentry = icmp_checkentry,
1da177e4
LT
2207};
2208
65b4b4e8 2209static int __init ip_tables_init(void)
1da177e4
LT
2210{
2211 int ret;
2212
0eff66e6
PM
2213 ret = xt_proto_init(AF_INET);
2214 if (ret < 0)
2215 goto err1;
2e4e6a17 2216
1da177e4 2217 /* Noone else will be downing sem now, so we won't sleep */
0eff66e6
PM
2218 ret = xt_register_target(&ipt_standard_target);
2219 if (ret < 0)
2220 goto err2;
2221 ret = xt_register_target(&ipt_error_target);
2222 if (ret < 0)
2223 goto err3;
2224 ret = xt_register_match(&icmp_matchstruct);
2225 if (ret < 0)
2226 goto err4;
1da177e4
LT
2227
2228 /* Register setsockopt */
2229 ret = nf_register_sockopt(&ipt_sockopts);
0eff66e6
PM
2230 if (ret < 0)
2231 goto err5;
1da177e4 2232
2e4e6a17 2233 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1da177e4 2234 return 0;
0eff66e6
PM
2235
2236err5:
2237 xt_unregister_match(&icmp_matchstruct);
2238err4:
2239 xt_unregister_target(&ipt_error_target);
2240err3:
2241 xt_unregister_target(&ipt_standard_target);
2242err2:
2243 xt_proto_fini(AF_INET);
2244err1:
2245 return ret;
1da177e4
LT
2246}
2247
65b4b4e8 2248static void __exit ip_tables_fini(void)
1da177e4
LT
2249{
2250 nf_unregister_sockopt(&ipt_sockopts);
2e4e6a17 2251
a45049c5
PNA
2252 xt_unregister_match(&icmp_matchstruct);
2253 xt_unregister_target(&ipt_error_target);
2254 xt_unregister_target(&ipt_standard_target);
2e4e6a17
HW
2255
2256 xt_proto_fini(AF_INET);
1da177e4
LT
2257}
2258
2259EXPORT_SYMBOL(ipt_register_table);
2260EXPORT_SYMBOL(ipt_unregister_table);
1da177e4 2261EXPORT_SYMBOL(ipt_do_table);
65b4b4e8
AM
2262module_init(ip_tables_init);
2263module_exit(ip_tables_fini);
This page took 0.380305 seconds and 5 git commands to generate.