[NETFILTER]: ip_tables: fix compat types
[deliverable/linux.git] / net / ipv4 / netfilter / ip_tables.c
CommitLineData
1da177e4
LT
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
2e4e6a17 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
1da177e4 10 */
1da177e4 11#include <linux/cache.h>
4fc268d2 12#include <linux/capability.h>
1da177e4
LT
13#include <linux/skbuff.h>
14#include <linux/kmod.h>
15#include <linux/vmalloc.h>
16#include <linux/netdevice.h>
17#include <linux/module.h>
1da177e4
LT
18#include <linux/icmp.h>
19#include <net/ip.h>
2722971c 20#include <net/compat.h>
1da177e4 21#include <asm/uaccess.h>
57b47a53 22#include <linux/mutex.h>
1da177e4
LT
23#include <linux/proc_fs.h>
24#include <linux/err.h>
c8923c6b 25#include <linux/cpumask.h>
1da177e4 26
2e4e6a17 27#include <linux/netfilter/x_tables.h>
1da177e4
LT
28#include <linux/netfilter_ipv4/ip_tables.h>
29
30MODULE_LICENSE("GPL");
31MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32MODULE_DESCRIPTION("IPv4 packet filter");
33
34/*#define DEBUG_IP_FIREWALL*/
35/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36/*#define DEBUG_IP_FIREWALL_USER*/
37
38#ifdef DEBUG_IP_FIREWALL
39#define dprintf(format, args...) printk(format , ## args)
40#else
41#define dprintf(format, args...)
42#endif
43
44#ifdef DEBUG_IP_FIREWALL_USER
45#define duprintf(format, args...) printk(format , ## args)
46#else
47#define duprintf(format, args...)
48#endif
49
50#ifdef CONFIG_NETFILTER_DEBUG
51#define IP_NF_ASSERT(x) \
52do { \
53 if (!(x)) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
56} while(0)
57#else
58#define IP_NF_ASSERT(x)
59#endif
1da177e4
LT
60
61#if 0
62/* All the better to debug you with... */
63#define static
64#define inline
65#endif
66
67/*
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
73
1da177e4
LT
74 Hence the start of any table is given by get_table() below. */
75
1da177e4
LT
76/* Returns whether matches rule or not. */
77static inline int
78ip_packet_match(const struct iphdr *ip,
79 const char *indev,
80 const char *outdev,
81 const struct ipt_ip *ipinfo,
82 int isfrag)
83{
84 size_t i;
85 unsigned long ret;
86
87#define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
88
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
90 IPT_INV_SRCIP)
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
92 IPT_INV_DSTIP)) {
93 dprintf("Source or dest mismatch.\n");
94
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
96 NIPQUAD(ip->saddr),
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
101 NIPQUAD(ip->daddr),
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
105 return 0;
106 }
107
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
113 }
114
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
119 return 0;
120 }
121
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
126 }
127
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
132 return 0;
133 }
134
135 /* Check specific protocol */
136 if (ipinfo->proto
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
141 return 0;
142 }
143
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
149 return 0;
150 }
151
152 return 1;
153}
154
ccb79bdc 155static inline bool
1da177e4
LT
156ip_checkentry(const struct ipt_ip *ip)
157{
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
ccb79bdc 161 return false;
1da177e4
LT
162 }
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
ccb79bdc 166 return false;
1da177e4 167 }
ccb79bdc 168 return true;
1da177e4
LT
169}
170
171static unsigned int
3db05fea 172ipt_error(struct sk_buff *skb,
1da177e4
LT
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
c4986734 176 const struct xt_target *target,
fe1cb108 177 const void *targinfo)
1da177e4
LT
178{
179 if (net_ratelimit())
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
181
182 return NF_DROP;
183}
184
185static inline
1d93a9cb
JE
186bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
190 int offset,
191 bool *hotdrop)
1da177e4
LT
192{
193 /* Stop iteration if it doesn't match */
1c524830 194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
c9bdd4b5 195 offset, ip_hdrlen(skb), hotdrop))
1d93a9cb 196 return true;
1da177e4 197 else
1d93a9cb 198 return false;
1da177e4
LT
199}
200
201static inline struct ipt_entry *
202get_entry(void *base, unsigned int offset)
203{
204 return (struct ipt_entry *)(base + offset);
205}
206
ba9dda3a
JK
207/* All zeroes == unconditional rule. */
208static inline int
209unconditional(const struct ipt_ip *ip)
210{
211 unsigned int i;
212
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
215 return 0;
216
217 return 1;
218}
219
220#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222static const char *hooknames[] = {
6e23ae2a
PM
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
ba9dda3a
JK
228};
229
230enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
234};
235
236static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
240};
241
242static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
244 .u = {
245 .log = {
246 .level = 4,
247 .logflags = NF_LOG_MASK,
248 },
249 },
250};
251
252static inline int
253get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
256{
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
258
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
262 (*rulenum) = 0;
263 } else if (s == e) {
264 (*rulenum)++;
265
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
269 && t->verdict < 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
275 }
276 return 1;
277 } else
278 (*rulenum)++;
279
280 return 0;
281}
282
283static void trace_packet(struct sk_buff *skb,
284 unsigned int hook,
285 const struct net_device *in,
286 const struct net_device *out,
287 char *tablename,
288 struct xt_table_info *private,
289 struct ipt_entry *e)
290{
291 void *table_base;
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
295
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
298
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
301
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
306
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
310}
311#endif
312
1da177e4
LT
313/* Returns one of the generic firewall policies, like NF_ACCEPT. */
314unsigned int
3db05fea 315ipt_do_table(struct sk_buff *skb,
1da177e4
LT
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
e60a13e0 319 struct xt_table *table)
1da177e4
LT
320{
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 u_int16_t offset;
323 struct iphdr *ip;
324 u_int16_t datalen;
cff533ac 325 bool hotdrop = false;
1da177e4
LT
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
329 void *table_base;
330 struct ipt_entry *e, *back;
8311731a 331 struct xt_table_info *private;
1da177e4
LT
332
333 /* Initialization */
3db05fea
HX
334 ip = ip_hdr(skb);
335 datalen = skb->len - ip->ihl * 4;
1da177e4
LT
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
343 * match it. */
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
345
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
8311731a 348 private = table->private;
2e4e6a17
HW
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
1da177e4
LT
351
352 /* For return from builtin chain */
2e4e6a17 353 back = get_entry(table_base, private->underflow[hook]);
1da177e4
LT
354
355 do {
356 IP_NF_ASSERT(e);
357 IP_NF_ASSERT(back);
1da177e4
LT
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
360
361 if (IPT_MATCH_ITERATE(e, do_match,
3db05fea 362 skb, in, out,
1da177e4
LT
363 offset, &hotdrop) != 0)
364 goto no_match;
365
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
367
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
ba9dda3a
JK
370
371#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
3db05fea
HX
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
ba9dda3a
JK
376 table->name, private, e);
377#endif
1da177e4
LT
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
380 int v;
381
382 v = ((struct ipt_standard_target *)t)->verdict;
383 if (v < 0) {
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
387 break;
388 }
389 e = back;
390 back = get_entry(table_base,
391 back->comefrom);
392 continue;
393 }
05465343
PM
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
1da177e4
LT
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
399 next->comefrom
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
402 back = next;
403 }
404
405 e = get_entry(table_base, v);
406 } else {
407 /* Targets which reenter must return
e905a9ed 408 abs. verdicts */
1da177e4
LT
409#ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
411 = 0xeeeeeeec;
412#endif
3db05fea 413 verdict = t->u.kernel.target->target(skb,
1da177e4
LT
414 in, out,
415 hook,
1c524830 416 t->u.kernel.target,
fe1cb108 417 t->data);
1da177e4
LT
418
419#ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
421 != 0xeeeeeeec
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
425 verdict = NF_DROP;
426 }
427 ((struct ipt_entry *)table_base)->comefrom
428 = 0x57acc001;
429#endif
430 /* Target might have changed stuff. */
3db05fea
HX
431 ip = ip_hdr(skb);
432 datalen = skb->len - ip->ihl * 4;
1da177e4
LT
433
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
436 else
437 /* Verdict */
438 break;
439 }
440 } else {
441
442 no_match:
443 e = (void *)e + e->next_offset;
444 }
445 } while (!hotdrop);
446
1da177e4
LT
447 read_unlock_bh(&table->lock);
448
449#ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451#else
452 if (hotdrop)
453 return NF_DROP;
454 else return verdict;
455#endif
456}
457
1da177e4
LT
458/* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460static int
2e4e6a17 461mark_source_chains(struct xt_table_info *newinfo,
31836064 462 unsigned int valid_hooks, void *entry0)
1da177e4
LT
463{
464 unsigned int hook;
465
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
6e23ae2a 468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
1da177e4
LT
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ipt_entry *e
31836064 471 = (struct ipt_entry *)(entry0 + pos);
1da177e4
LT
472
473 if (!(valid_hooks & (1 << hook)))
474 continue;
475
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
478
479 for (;;) {
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
e1b4b9f3 482 int visited = e->comefrom & (1 << hook);
1da177e4 483
6e23ae2a 484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
1da177e4
LT
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
487 return 0;
488 }
489 e->comefrom
6e23ae2a 490 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
1da177e4
LT
491
492 /* Unconditional return/END. */
e1b4b9f3 493 if ((e->target_offset == sizeof(struct ipt_entry)
1da177e4
LT
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
496 && t->verdict < 0
e1b4b9f3 497 && unconditional(&e->ip)) || visited) {
1da177e4
LT
498 unsigned int oldpos, size;
499
74c9c0c1
DM
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
505 }
506
1da177e4
LT
507 /* Return: backtrack through the last
508 big jump. */
509 do {
6e23ae2a 510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
1da177e4
LT
511#ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
6e23ae2a 513 & (1 << NF_INET_NUMHOOKS)) {
1da177e4
LT
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
518 }
519#endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
523
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
527
528 e = (struct ipt_entry *)
31836064 529 (entry0 + pos);
1da177e4
LT
530 } while (oldpos == pos + e->next_offset);
531
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ipt_entry *)
31836064 535 (entry0 + pos + size);
1da177e4
LT
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
540
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
543 && newpos >= 0) {
74c9c0c1
DM
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
550 }
1da177e4
LT
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
557 }
558 e = (struct ipt_entry *)
31836064 559 (entry0 + newpos);
1da177e4
LT
560 e->counters.pcnt = pos;
561 pos = newpos;
562 }
563 }
564 next:
565 duprintf("Finished chain %u\n", hook);
566 }
567 return 1;
568}
569
570static inline int
571cleanup_match(struct ipt_entry_match *m, unsigned int *i)
572{
573 if (i && (*i)-- == 0)
574 return 1;
575
576 if (m->u.kernel.match->destroy)
efa74165 577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
1da177e4
LT
578 module_put(m->u.kernel.match->me);
579 return 0;
580}
581
1da177e4 582static inline int
a96be246
DM
583check_entry(struct ipt_entry *e, const char *name)
584{
585 struct ipt_entry_target *t;
586
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
589 return -EINVAL;
590 }
591
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
593 return -EINVAL;
594
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
597 return -EINVAL;
598
599 return 0;
600}
601
602static inline int check_match(struct ipt_entry_match *m, const char *name,
4b478248
PM
603 const struct ipt_ip *ip,
604 unsigned int hookmask, unsigned int *i)
a96be246 605{
6709dbbb 606 struct xt_match *match;
a96be246
DM
607 int ret;
608
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
615 hookmask)) {
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
618 ret = -EINVAL;
619 }
4c1b52bc
DM
620 if (!ret)
621 (*i)++;
a96be246
DM
622 return ret;
623}
624
625static inline int
626find_check_match(struct ipt_entry_match *m,
4b478248
PM
627 const char *name,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
630 unsigned int *i)
1da177e4 631{
6709dbbb 632 struct xt_match *match;
3cdc7c95 633 int ret;
1da177e4 634
2e4e6a17 635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1da177e4
LT
636 m->u.user.revision),
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
a96be246 639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
1da177e4
LT
640 return match ? PTR_ERR(match) : -ENOENT;
641 }
642 m->u.kernel.match = match;
643
4c1b52bc 644 ret = check_match(m, name, ip, hookmask, i);
3cdc7c95
PM
645 if (ret)
646 goto err;
647
1da177e4 648 return 0;
3cdc7c95
PM
649err:
650 module_put(m->u.kernel.match->me);
651 return ret;
1da177e4
LT
652}
653
a96be246
DM
654static inline int check_target(struct ipt_entry *e, const char *name)
655{
e905a9ed 656 struct ipt_entry_target *t;
6709dbbb 657 struct xt_target *target;
e905a9ed 658 int ret;
a96be246
DM
659
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
4b478248
PM
666 && !t->u.kernel.target->checkentry(name, e, target, t->data,
667 e->comefrom)) {
a96be246
DM
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
670 ret = -EINVAL;
671 }
672 return ret;
673}
1da177e4
LT
674
675static inline int
a96be246 676find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
4b478248 677 unsigned int *i)
1da177e4
LT
678{
679 struct ipt_entry_target *t;
6709dbbb 680 struct xt_target *target;
1da177e4
LT
681 int ret;
682 unsigned int j;
683
a96be246
DM
684 ret = check_entry(e, name);
685 if (ret)
686 return ret;
590bdf7f 687
1da177e4 688 j = 0;
a96be246 689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
4b478248 690 e->comefrom, &j);
1da177e4
LT
691 if (ret != 0)
692 goto cleanup_matches;
693
694 t = ipt_get_target(e);
2e4e6a17 695 target = try_then_request_module(xt_find_target(AF_INET,
4b478248
PM
696 t->u.user.name,
697 t->u.user.revision),
1da177e4
LT
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
a96be246 700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
1da177e4
LT
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
703 }
704 t->u.kernel.target = target;
705
a96be246 706 ret = check_target(e, name);
3cdc7c95
PM
707 if (ret)
708 goto err;
709
1da177e4
LT
710 (*i)++;
711 return 0;
3cdc7c95
PM
712 err:
713 module_put(t->u.kernel.target->me);
1da177e4
LT
714 cleanup_matches:
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
716 return ret;
717}
718
719static inline int
720check_entry_size_and_hooks(struct ipt_entry *e,
2e4e6a17 721 struct xt_table_info *newinfo,
1da177e4
LT
722 unsigned char *base,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
726 unsigned int *i)
727{
728 unsigned int h;
729
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
733 return -EINVAL;
734 }
735
736 if (e->next_offset
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
739 e, e->next_offset);
740 return -EINVAL;
741 }
742
743 /* Check hooks & underflows */
6e23ae2a 744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1da177e4
LT
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
749 }
750
751 /* FIXME: underflows must be unconditional, standard verdicts
e905a9ed 752 < 0 (not IPT_RETURN). --RR */
1da177e4
LT
753
754 /* Clear counters and comefrom */
2e4e6a17 755 e->counters = ((struct xt_counters) { 0, 0 });
1da177e4
LT
756 e->comefrom = 0;
757
758 (*i)++;
759 return 0;
760}
761
762static inline int
763cleanup_entry(struct ipt_entry *e, unsigned int *i)
764{
765 struct ipt_entry_target *t;
766
767 if (i && (*i)-- == 0)
768 return 1;
769
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
efa74165 774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
1da177e4
LT
775 module_put(t->u.kernel.target->me);
776 return 0;
777}
778
779/* Checks and translates the user-supplied table segment (held in
780 newinfo) */
781static int
782translate_table(const char *name,
783 unsigned int valid_hooks,
2e4e6a17 784 struct xt_table_info *newinfo,
31836064 785 void *entry0,
1da177e4
LT
786 unsigned int size,
787 unsigned int number,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
790{
791 unsigned int i;
792 int ret;
793
794 newinfo->size = size;
795 newinfo->number = number;
796
797 /* Init all hooks to impossible value. */
6e23ae2a 798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1da177e4
LT
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
801 }
802
803 duprintf("translate_table: size %u\n", newinfo->size);
804 i = 0;
805 /* Walk through entries, checking offsets. */
31836064 806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
807 check_entry_size_and_hooks,
808 newinfo,
31836064
ED
809 entry0,
810 entry0 + size,
1da177e4
LT
811 hook_entries, underflows, &i);
812 if (ret != 0)
813 return ret;
814
815 if (i != number) {
816 duprintf("translate_table: %u not %u entries\n",
817 i, number);
818 return -EINVAL;
819 }
820
821 /* Check hooks all assigned */
6e23ae2a 822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1da177e4
LT
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
825 continue;
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
828 i, hook_entries[i]);
829 return -EINVAL;
830 }
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
833 i, underflows[i]);
834 return -EINVAL;
835 }
836 }
837
74c9c0c1
DM
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
839 return -ELOOP;
840
1da177e4
LT
841 /* Finally, each sanity check must pass */
842 i = 0;
31836064 843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
a96be246 844 find_check_entry, name, size, &i);
1da177e4 845
74c9c0c1
DM
846 if (ret != 0) {
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
848 cleanup_entry, &i);
849 return ret;
850 }
1da177e4
LT
851
852 /* And one copy for every other CPU */
6f912042 853 for_each_possible_cpu(i) {
31836064
ED
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
1da177e4
LT
856 }
857
858 return ret;
859}
860
1da177e4
LT
861/* Gets counters. */
862static inline int
863add_entry_to_counter(const struct ipt_entry *e,
2e4e6a17 864 struct xt_counters total[],
1da177e4
LT
865 unsigned int *i)
866{
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
868
869 (*i)++;
870 return 0;
871}
872
31836064
ED
873static inline int
874set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
876 unsigned int *i)
877{
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
879
880 (*i)++;
881 return 0;
882}
883
1da177e4 884static void
2e4e6a17
HW
885get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
1da177e4
LT
887{
888 unsigned int cpu;
889 unsigned int i;
31836064
ED
890 unsigned int curcpu;
891
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
896 */
897 curcpu = raw_smp_processor_id();
898
899 i = 0;
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
901 t->size,
902 set_entry_to_counter,
903 counters,
904 &i);
1da177e4 905
6f912042 906 for_each_possible_cpu(cpu) {
31836064
ED
907 if (cpu == curcpu)
908 continue;
1da177e4 909 i = 0;
31836064 910 IPT_ENTRY_ITERATE(t->entries[cpu],
1da177e4
LT
911 t->size,
912 add_entry_to_counter,
913 counters,
914 &i);
915 }
916}
917
e60a13e0 918static inline struct xt_counters * alloc_counters(struct xt_table *table)
1da177e4 919{
2722971c 920 unsigned int countersize;
2e4e6a17
HW
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
1da177e4
LT
923
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
926 about). */
2e4e6a17 927 countersize = sizeof(struct xt_counters) * private->number;
31836064 928 counters = vmalloc_node(countersize, numa_node_id());
1da177e4
LT
929
930 if (counters == NULL)
2722971c 931 return ERR_PTR(-ENOMEM);
1da177e4
LT
932
933 /* First, sum counters... */
1da177e4 934 write_lock_bh(&table->lock);
2e4e6a17 935 get_counters(private, counters);
1da177e4
LT
936 write_unlock_bh(&table->lock);
937
2722971c
DM
938 return counters;
939}
940
941static int
942copy_entries_to_user(unsigned int total_size,
e60a13e0 943 struct xt_table *table,
2722971c
DM
944 void __user *userptr)
945{
946 unsigned int off, num;
947 struct ipt_entry *e;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
950 int ret = 0;
951 void *loc_cpu_entry;
952
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
956
31836064
ED
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
960 */
2e4e6a17 961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
31836064
ED
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1da177e4
LT
964 ret = -EFAULT;
965 goto free_counters;
966 }
967
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
971 unsigned int i;
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
974
31836064 975 e = (struct ipt_entry *)(loc_cpu_entry + off);
1da177e4
LT
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
978 &counters[num],
979 sizeof(counters[num])) != 0) {
980 ret = -EFAULT;
981 goto free_counters;
982 }
983
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
987 m = (void *)e + i;
988
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
991 u.user.name),
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
994 != 0) {
995 ret = -EFAULT;
996 goto free_counters;
997 }
998 }
999
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1003 u.user.name),
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1006 ret = -EFAULT;
1007 goto free_counters;
1008 }
1009 }
1010
1011 free_counters:
1012 vfree(counters);
1013 return ret;
1014}
1015
2722971c
DM
1016#ifdef CONFIG_COMPAT
1017struct compat_delta {
1018 struct compat_delta *next;
e5b5ef7d 1019 unsigned int offset;
2722971c
DM
1020 short delta;
1021};
1022
4b478248 1023static struct compat_delta *compat_offsets;
2722971c 1024
e5b5ef7d 1025static int compat_add_offset(unsigned int offset, short delta)
2722971c
DM
1026{
1027 struct compat_delta *tmp;
1028
1029 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
1030 if (!tmp)
1031 return -ENOMEM;
1032 tmp->offset = offset;
1033 tmp->delta = delta;
1034 if (compat_offsets) {
1035 tmp->next = compat_offsets->next;
1036 compat_offsets->next = tmp;
1037 } else {
1038 compat_offsets = tmp;
1039 tmp->next = NULL;
1040 }
1041 return 0;
1042}
1043
1044static void compat_flush_offsets(void)
1045{
1046 struct compat_delta *tmp, *next;
1047
1048 if (compat_offsets) {
4b478248 1049 for (tmp = compat_offsets; tmp; tmp = next) {
2722971c
DM
1050 next = tmp->next;
1051 kfree(tmp);
1052 }
1053 compat_offsets = NULL;
1054 }
1055}
1056
e5b5ef7d 1057static short compat_calc_jump(unsigned int offset)
2722971c
DM
1058{
1059 struct compat_delta *tmp;
1060 short delta;
1061
4b478248 1062 for (tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
2722971c
DM
1063 if (tmp->offset < offset)
1064 delta += tmp->delta;
1065 return delta;
1066}
1067
9fa492cd 1068static void compat_standard_from_user(void *dst, void *src)
2722971c 1069{
9fa492cd 1070 int v = *(compat_int_t *)src;
2722971c 1071
9fa492cd
PM
1072 if (v > 0)
1073 v += compat_calc_jump(v);
1074 memcpy(dst, &v, sizeof(v));
1075}
46c5ea3c 1076
9fa492cd 1077static int compat_standard_to_user(void __user *dst, void *src)
2722971c 1078{
9fa492cd 1079 compat_int_t cv = *(int *)src;
2722971c 1080
9fa492cd
PM
1081 if (cv > 0)
1082 cv -= compat_calc_jump(cv);
1083 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
2722971c
DM
1084}
1085
1086static inline int
4b478248 1087compat_calc_match(struct ipt_entry_match *m, int *size)
2722971c 1088{
9fa492cd 1089 *size += xt_compat_match_offset(m->u.kernel.match);
2722971c
DM
1090 return 0;
1091}
1092
259d4e41 1093static int compat_calc_entry(struct ipt_entry *e,
4b478248
PM
1094 const struct xt_table_info *info,
1095 void *base, struct xt_table_info *newinfo)
2722971c
DM
1096{
1097 struct ipt_entry_target *t;
e5b5ef7d 1098 unsigned int entry_offset;
2722971c
DM
1099 int off, i, ret;
1100
30c08c41 1101 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
2722971c
DM
1102 entry_offset = (void *)e - base;
1103 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1104 t = ipt_get_target(e);
9fa492cd 1105 off += xt_compat_target_offset(t->u.kernel.target);
2722971c
DM
1106 newinfo->size -= off;
1107 ret = compat_add_offset(entry_offset, off);
1108 if (ret)
1109 return ret;
1110
6e23ae2a 1111 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
4b478248
PM
1112 if (info->hook_entry[i] &&
1113 (e < (struct ipt_entry *)(base + info->hook_entry[i])))
2722971c 1114 newinfo->hook_entry[i] -= off;
4b478248
PM
1115 if (info->underflow[i] &&
1116 (e < (struct ipt_entry *)(base + info->underflow[i])))
2722971c
DM
1117 newinfo->underflow[i] -= off;
1118 }
1119 return 0;
1120}
1121
259d4e41 1122static int compat_table_info(const struct xt_table_info *info,
4b478248 1123 struct xt_table_info *newinfo)
2722971c
DM
1124{
1125 void *loc_cpu_entry;
2722971c
DM
1126
1127 if (!newinfo || !info)
1128 return -EINVAL;
1129
259d4e41
ED
1130 /* we dont care about newinfo->entries[] */
1131 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1132 newinfo->initial_entries = 0;
2722971c
DM
1133 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1134 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
4b478248
PM
1135 compat_calc_entry, info, loc_cpu_entry,
1136 newinfo);
2722971c
DM
1137}
1138#endif
1139
1140static int get_info(void __user *user, int *len, int compat)
1141{
1142 char name[IPT_TABLE_MAXNAMELEN];
e60a13e0 1143 struct xt_table *t;
2722971c
DM
1144 int ret;
1145
1146 if (*len != sizeof(struct ipt_getinfo)) {
1147 duprintf("length %u != %u\n", *len,
1148 (unsigned int)sizeof(struct ipt_getinfo));
1149 return -EINVAL;
1150 }
1151
1152 if (copy_from_user(name, user, sizeof(name)) != 0)
1153 return -EFAULT;
1154
1155 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1156#ifdef CONFIG_COMPAT
1157 if (compat)
1158 xt_compat_lock(AF_INET);
1159#endif
1160 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
4b478248 1161 "iptable_%s", name);
2722971c
DM
1162 if (t && !IS_ERR(t)) {
1163 struct ipt_getinfo info;
1164 struct xt_table_info *private = t->private;
1165
1166#ifdef CONFIG_COMPAT
1167 if (compat) {
1168 struct xt_table_info tmp;
1169 ret = compat_table_info(private, &tmp);
1170 compat_flush_offsets();
4b478248 1171 private = &tmp;
2722971c
DM
1172 }
1173#endif
1174 info.valid_hooks = t->valid_hooks;
1175 memcpy(info.hook_entry, private->hook_entry,
4b478248 1176 sizeof(info.hook_entry));
2722971c 1177 memcpy(info.underflow, private->underflow,
4b478248 1178 sizeof(info.underflow));
2722971c
DM
1179 info.num_entries = private->number;
1180 info.size = private->size;
1181 strcpy(info.name, name);
1182
1183 if (copy_to_user(user, &info, *len) != 0)
1184 ret = -EFAULT;
1185 else
1186 ret = 0;
1187
1188 xt_table_unlock(t);
1189 module_put(t->me);
1190 } else
1191 ret = t ? PTR_ERR(t) : -ENOENT;
1192#ifdef CONFIG_COMPAT
1193 if (compat)
1194 xt_compat_unlock(AF_INET);
1195#endif
1196 return ret;
1197}
1198
1199static int
1200get_entries(struct ipt_get_entries __user *uptr, int *len)
1201{
1202 int ret;
1203 struct ipt_get_entries get;
e60a13e0 1204 struct xt_table *t;
2722971c
DM
1205
1206 if (*len < sizeof(get)) {
1207 duprintf("get_entries: %u < %d\n", *len,
1208 (unsigned int)sizeof(get));
1209 return -EINVAL;
1210 }
1211 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1212 return -EFAULT;
1213 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1214 duprintf("get_entries: %u != %u\n", *len,
1215 (unsigned int)(sizeof(struct ipt_get_entries) +
1216 get.size));
1217 return -EINVAL;
1218 }
1219
1220 t = xt_find_table_lock(AF_INET, get.name);
1221 if (t && !IS_ERR(t)) {
1222 struct xt_table_info *private = t->private;
1223 duprintf("t->private->number = %u\n",
1224 private->number);
1225 if (get.size == private->size)
1226 ret = copy_entries_to_user(private->size,
1227 t, uptr->entrytable);
1228 else {
1229 duprintf("get_entries: I've got %u not %u!\n",
1230 private->size,
1231 get.size);
1232 ret = -EINVAL;
1233 }
1234 module_put(t->me);
1235 xt_table_unlock(t);
1236 } else
1237 ret = t ? PTR_ERR(t) : -ENOENT;
1238
1239 return ret;
1240}
1241
1242static int
1243__do_replace(const char *name, unsigned int valid_hooks,
4b478248
PM
1244 struct xt_table_info *newinfo, unsigned int num_counters,
1245 void __user *counters_ptr)
2722971c
DM
1246{
1247 int ret;
e60a13e0 1248 struct xt_table *t;
2722971c
DM
1249 struct xt_table_info *oldinfo;
1250 struct xt_counters *counters;
1251 void *loc_cpu_old_entry;
1252
1253 ret = 0;
1254 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1255 if (!counters) {
1256 ret = -ENOMEM;
1257 goto out;
1258 }
1259
1260 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1261 "iptable_%s", name);
1262 if (!t || IS_ERR(t)) {
1263 ret = t ? PTR_ERR(t) : -ENOENT;
1264 goto free_newinfo_counters_untrans;
1265 }
1266
1267 /* You lied! */
1268 if (valid_hooks != t->valid_hooks) {
1269 duprintf("Valid hook crap: %08X vs %08X\n",
1270 valid_hooks, t->valid_hooks);
1271 ret = -EINVAL;
1272 goto put_module;
1273 }
1274
1275 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1276 if (!oldinfo)
1277 goto put_module;
1278
1279 /* Update module usage count based on number of rules */
1280 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1281 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1282 if ((oldinfo->number > oldinfo->initial_entries) ||
1283 (newinfo->number <= oldinfo->initial_entries))
1284 module_put(t->me);
1285 if ((oldinfo->number > oldinfo->initial_entries) &&
1286 (newinfo->number <= oldinfo->initial_entries))
1287 module_put(t->me);
1288
1289 /* Get the old counters. */
1290 get_counters(oldinfo, counters);
1291 /* Decrease module usage counts and free resource */
1292 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
4b478248
PM
1293 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1294 NULL);
2722971c
DM
1295 xt_free_table_info(oldinfo);
1296 if (copy_to_user(counters_ptr, counters,
1297 sizeof(struct xt_counters) * num_counters) != 0)
1298 ret = -EFAULT;
1299 vfree(counters);
1300 xt_table_unlock(t);
1301 return ret;
1302
1303 put_module:
1304 module_put(t->me);
1305 xt_table_unlock(t);
1306 free_newinfo_counters_untrans:
1307 vfree(counters);
1308 out:
1309 return ret;
1310}
1311
1312static int
1313do_replace(void __user *user, unsigned int len)
1314{
1315 int ret;
1316 struct ipt_replace tmp;
1317 struct xt_table_info *newinfo;
1318 void *loc_cpu_entry;
1319
1320 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1321 return -EFAULT;
1322
1323 /* Hack: Causes ipchains to give correct error msg --RR */
1324 if (len != sizeof(tmp) + tmp.size)
1325 return -ENOPROTOOPT;
1326
1327 /* overflow check */
2722971c
DM
1328 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1329 return -ENOMEM;
1330
1331 newinfo = xt_alloc_table_info(tmp.size);
1332 if (!newinfo)
1333 return -ENOMEM;
1334
1335 /* choose the copy that is our node/cpu */
1336 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1337 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1338 tmp.size) != 0) {
1339 ret = -EFAULT;
1340 goto free_newinfo;
1341 }
1342
1343 ret = translate_table(tmp.name, tmp.valid_hooks,
1344 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1345 tmp.hook_entry, tmp.underflow);
1346 if (ret != 0)
1347 goto free_newinfo;
1348
1349 duprintf("ip_tables: Translated table\n");
1350
4b478248
PM
1351 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1352 tmp.num_counters, tmp.counters);
2722971c
DM
1353 if (ret)
1354 goto free_newinfo_untrans;
1355 return 0;
1356
1357 free_newinfo_untrans:
1358 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1359 free_newinfo:
1360 xt_free_table_info(newinfo);
1361 return ret;
1362}
1363
1364/* We're lazy, and add to the first CPU; overflow works its fey magic
1365 * and everything is OK. */
1366static inline int
1367add_counter_to_entry(struct ipt_entry *e,
1368 const struct xt_counters addme[],
1369 unsigned int *i)
1370{
1371#if 0
1372 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1373 *i,
1374 (long unsigned int)e->counters.pcnt,
1375 (long unsigned int)e->counters.bcnt,
1376 (long unsigned int)addme[*i].pcnt,
1377 (long unsigned int)addme[*i].bcnt);
1378#endif
1379
1380 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1381
1382 (*i)++;
1383 return 0;
1384}
1385
1386static int
1387do_add_counters(void __user *user, unsigned int len, int compat)
1388{
1389 unsigned int i;
1390 struct xt_counters_info tmp;
1391 struct xt_counters *paddc;
1392 unsigned int num_counters;
1393 char *name;
1394 int size;
1395 void *ptmp;
e60a13e0 1396 struct xt_table *t;
2722971c
DM
1397 struct xt_table_info *private;
1398 int ret = 0;
1399 void *loc_cpu_entry;
1400#ifdef CONFIG_COMPAT
1401 struct compat_xt_counters_info compat_tmp;
1402
1403 if (compat) {
1404 ptmp = &compat_tmp;
1405 size = sizeof(struct compat_xt_counters_info);
1406 } else
1407#endif
1408 {
1409 ptmp = &tmp;
1410 size = sizeof(struct xt_counters_info);
1411 }
1412
1413 if (copy_from_user(ptmp, user, size) != 0)
1414 return -EFAULT;
1415
1416#ifdef CONFIG_COMPAT
1417 if (compat) {
1418 num_counters = compat_tmp.num_counters;
1419 name = compat_tmp.name;
1420 } else
1421#endif
1422 {
1423 num_counters = tmp.num_counters;
1424 name = tmp.name;
1425 }
1426
1427 if (len != size + num_counters * sizeof(struct xt_counters))
1428 return -EINVAL;
1429
1430 paddc = vmalloc_node(len - size, numa_node_id());
1431 if (!paddc)
1432 return -ENOMEM;
1433
1434 if (copy_from_user(paddc, user + size, len - size) != 0) {
1435 ret = -EFAULT;
1436 goto free;
1437 }
1438
1439 t = xt_find_table_lock(AF_INET, name);
1440 if (!t || IS_ERR(t)) {
1441 ret = t ? PTR_ERR(t) : -ENOENT;
1442 goto free;
1443 }
1444
1445 write_lock_bh(&t->lock);
1446 private = t->private;
1447 if (private->number != num_counters) {
1448 ret = -EINVAL;
1449 goto unlock_up_free;
1450 }
1451
1452 i = 0;
1453 /* Choose the copy that is on our node */
1454 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1455 IPT_ENTRY_ITERATE(loc_cpu_entry,
1456 private->size,
1457 add_counter_to_entry,
1458 paddc,
1459 &i);
1460 unlock_up_free:
1461 write_unlock_bh(&t->lock);
1462 xt_table_unlock(t);
1463 module_put(t->me);
1464 free:
1465 vfree(paddc);
1466
1467 return ret;
1468}
1469
1470#ifdef CONFIG_COMPAT
1471struct compat_ipt_replace {
1472 char name[IPT_TABLE_MAXNAMELEN];
1473 u32 valid_hooks;
1474 u32 num_entries;
1475 u32 size;
6e23ae2a
PM
1476 u32 hook_entry[NF_INET_NUMHOOKS];
1477 u32 underflow[NF_INET_NUMHOOKS];
2722971c
DM
1478 u32 num_counters;
1479 compat_uptr_t counters; /* struct ipt_counters * */
1480 struct compat_ipt_entry entries[0];
1481};
1482
a18aa31b
PM
1483static int
1484compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1485 compat_uint_t *size, struct xt_counters *counters,
1486 unsigned int *i)
2722971c 1487{
3e597c60 1488 struct ipt_entry_target *t;
2722971c
DM
1489 struct compat_ipt_entry __user *ce;
1490 u_int16_t target_offset, next_offset;
1491 compat_uint_t origsize;
1492 int ret;
1493
1494 ret = -EFAULT;
1495 origsize = *size;
1496 ce = (struct compat_ipt_entry __user *)*dstptr;
7800007c 1497 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
2722971c
DM
1498 goto out;
1499
a18aa31b
PM
1500 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1501 goto out;
1502
2722971c 1503 *dstptr += sizeof(struct compat_ipt_entry);
30c08c41
PM
1504 *size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1505
ac8e27fd 1506 ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
2722971c
DM
1507 target_offset = e->target_offset - (origsize - *size);
1508 if (ret)
1509 goto out;
1510 t = ipt_get_target(e);
9fa492cd 1511 ret = xt_compat_target_to_user(t, dstptr, size);
2722971c
DM
1512 if (ret)
1513 goto out;
1514 ret = -EFAULT;
1515 next_offset = e->next_offset - (origsize - *size);
7800007c 1516 if (put_user(target_offset, &ce->target_offset))
2722971c 1517 goto out;
7800007c 1518 if (put_user(next_offset, &ce->next_offset))
2722971c 1519 goto out;
a18aa31b
PM
1520
1521 (*i)++;
2722971c
DM
1522 return 0;
1523out:
1524 return ret;
1525}
1526
1527static inline int
4c1b52bc 1528compat_find_calc_match(struct ipt_entry_match *m,
4b478248
PM
1529 const char *name,
1530 const struct ipt_ip *ip,
1531 unsigned int hookmask,
1532 int *size, int *i)
2722971c 1533{
6709dbbb 1534 struct xt_match *match;
2722971c
DM
1535
1536 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
4b478248 1537 m->u.user.revision),
2722971c
DM
1538 "ipt_%s", m->u.user.name);
1539 if (IS_ERR(match) || !match) {
1540 duprintf("compat_check_calc_match: `%s' not found\n",
4b478248 1541 m->u.user.name);
2722971c
DM
1542 return match ? PTR_ERR(match) : -ENOENT;
1543 }
1544 m->u.kernel.match = match;
9fa492cd 1545 *size += xt_compat_match_offset(match);
2722971c
DM
1546
1547 (*i)++;
1548 return 0;
1549}
1550
4c1b52bc
DM
1551static inline int
1552compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1553{
1554 if (i && (*i)-- == 0)
1555 return 1;
1556
1557 module_put(m->u.kernel.match->me);
1558 return 0;
1559}
1560
1561static inline int
73cd598d 1562compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
4c1b52bc
DM
1563{
1564 struct ipt_entry_target *t;
1565
1566 if (i && (*i)-- == 0)
1567 return 1;
1568
1569 /* Cleanup all matches */
73cd598d
PM
1570 COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1571 t = compat_ipt_get_target(e);
4c1b52bc
DM
1572 module_put(t->u.kernel.target->me);
1573 return 0;
1574}
1575
2722971c 1576static inline int
73cd598d 1577check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
4b478248
PM
1578 struct xt_table_info *newinfo,
1579 unsigned int *size,
1580 unsigned char *base,
1581 unsigned char *limit,
1582 unsigned int *hook_entries,
1583 unsigned int *underflows,
1584 unsigned int *i,
1585 const char *name)
2722971c
DM
1586{
1587 struct ipt_entry_target *t;
6709dbbb 1588 struct xt_target *target;
e5b5ef7d 1589 unsigned int entry_offset;
2722971c
DM
1590 int ret, off, h, j;
1591
1592 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1593 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1594 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1595 duprintf("Bad offset %p, limit = %p\n", e, limit);
1596 return -EINVAL;
1597 }
1598
1599 if (e->next_offset < sizeof(struct compat_ipt_entry) +
4b478248 1600 sizeof(struct compat_xt_entry_target)) {
2722971c
DM
1601 duprintf("checking: element %p size %u\n",
1602 e, e->next_offset);
1603 return -EINVAL;
1604 }
1605
73cd598d
PM
1606 /* For purposes of check_entry casting the compat entry is fine */
1607 ret = check_entry((struct ipt_entry *)e, name);
a96be246
DM
1608 if (ret)
1609 return ret;
590bdf7f 1610
30c08c41 1611 off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
2722971c
DM
1612 entry_offset = (void *)e - (void *)base;
1613 j = 0;
73cd598d
PM
1614 ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
1615 &e->ip, e->comefrom, &off, &j);
2722971c 1616 if (ret != 0)
4c1b52bc 1617 goto release_matches;
2722971c 1618
73cd598d 1619 t = compat_ipt_get_target(e);
2722971c 1620 target = try_then_request_module(xt_find_target(AF_INET,
4b478248
PM
1621 t->u.user.name,
1622 t->u.user.revision),
2722971c
DM
1623 "ipt_%s", t->u.user.name);
1624 if (IS_ERR(target) || !target) {
a96be246 1625 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
4b478248 1626 t->u.user.name);
2722971c 1627 ret = target ? PTR_ERR(target) : -ENOENT;
4c1b52bc 1628 goto release_matches;
2722971c
DM
1629 }
1630 t->u.kernel.target = target;
1631
9fa492cd 1632 off += xt_compat_target_offset(target);
2722971c
DM
1633 *size += off;
1634 ret = compat_add_offset(entry_offset, off);
1635 if (ret)
1636 goto out;
1637
1638 /* Check hooks & underflows */
6e23ae2a 1639 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
2722971c
DM
1640 if ((unsigned char *)e - base == hook_entries[h])
1641 newinfo->hook_entry[h] = hook_entries[h];
1642 if ((unsigned char *)e - base == underflows[h])
1643 newinfo->underflow[h] = underflows[h];
1644 }
1645
1646 /* Clear counters and comefrom */
73cd598d 1647 memset(&e->counters, 0, sizeof(e->counters));
2722971c
DM
1648 e->comefrom = 0;
1649
1650 (*i)++;
1651 return 0;
bec71b16 1652
2722971c 1653out:
bec71b16 1654 module_put(t->u.kernel.target->me);
4c1b52bc
DM
1655release_matches:
1656 IPT_MATCH_ITERATE(e, compat_release_match, &j);
2722971c
DM
1657 return ret;
1658}
1659
4b478248 1660static int
73cd598d 1661compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
4b478248
PM
1662 unsigned int *size, const char *name,
1663 struct xt_table_info *newinfo, unsigned char *base)
2722971c
DM
1664{
1665 struct ipt_entry_target *t;
6709dbbb 1666 struct xt_target *target;
2722971c
DM
1667 struct ipt_entry *de;
1668 unsigned int origsize;
920b868a 1669 int ret, h;
2722971c
DM
1670
1671 ret = 0;
1672 origsize = *size;
1673 de = (struct ipt_entry *)*dstptr;
1674 memcpy(de, e, sizeof(struct ipt_entry));
73cd598d 1675 memcpy(&de->counters, &e->counters, sizeof(e->counters));
2722971c 1676
73cd598d 1677 *dstptr += sizeof(struct ipt_entry);
30c08c41
PM
1678 *size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1679
73cd598d
PM
1680 ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
1681 dstptr, size);
2722971c 1682 if (ret)
f6677f43 1683 return ret;
2722971c 1684 de->target_offset = e->target_offset - (origsize - *size);
73cd598d 1685 t = compat_ipt_get_target(e);
2722971c 1686 target = t->u.kernel.target;
9fa492cd 1687 xt_compat_target_from_user(t, dstptr, size);
2722971c
DM
1688
1689 de->next_offset = e->next_offset - (origsize - *size);
6e23ae2a 1690 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
2722971c
DM
1691 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1692 newinfo->hook_entry[h] -= origsize - *size;
1693 if ((unsigned char *)de - base < newinfo->underflow[h])
1694 newinfo->underflow[h] -= origsize - *size;
1695 }
f6677f43
DM
1696 return ret;
1697}
1698
4c1b52bc 1699static inline int compat_check_entry(struct ipt_entry *e, const char *name,
4b478248 1700 unsigned int *i)
f6677f43 1701{
4c1b52bc 1702 int j, ret;
f6677f43 1703
4c1b52bc
DM
1704 j = 0;
1705 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
f6677f43 1706 if (ret)
4c1b52bc
DM
1707 goto cleanup_matches;
1708
1709 ret = check_target(e, name);
1710 if (ret)
1711 goto cleanup_matches;
f6677f43 1712
4c1b52bc
DM
1713 (*i)++;
1714 return 0;
1715
1716 cleanup_matches:
1717 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1718 return ret;
f6677f43
DM
1719}
1720
1da177e4 1721static int
2722971c 1722translate_compat_table(const char *name,
4b478248
PM
1723 unsigned int valid_hooks,
1724 struct xt_table_info **pinfo,
1725 void **pentry0,
1726 unsigned int total_size,
1727 unsigned int number,
1728 unsigned int *hook_entries,
1729 unsigned int *underflows)
1da177e4 1730{
920b868a 1731 unsigned int i, j;
2722971c
DM
1732 struct xt_table_info *newinfo, *info;
1733 void *pos, *entry0, *entry1;
1734 unsigned int size;
1da177e4 1735 int ret;
1da177e4 1736
2722971c
DM
1737 info = *pinfo;
1738 entry0 = *pentry0;
1739 size = total_size;
1740 info->number = number;
1741
1742 /* Init all hooks to impossible value. */
6e23ae2a 1743 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
2722971c
DM
1744 info->hook_entry[i] = 0xFFFFFFFF;
1745 info->underflow[i] = 0xFFFFFFFF;
1746 }
1747
1748 duprintf("translate_compat_table: size %u\n", info->size);
920b868a 1749 j = 0;
2722971c
DM
1750 xt_compat_lock(AF_INET);
1751 /* Walk through entries, checking offsets. */
73cd598d
PM
1752 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1753 check_compat_entry_size_and_hooks,
1754 info, &size, entry0,
1755 entry0 + total_size,
1756 hook_entries, underflows, &j, name);
2722971c
DM
1757 if (ret != 0)
1758 goto out_unlock;
1759
1760 ret = -EINVAL;
920b868a 1761 if (j != number) {
2722971c 1762 duprintf("translate_compat_table: %u not %u entries\n",
920b868a 1763 j, number);
2722971c
DM
1764 goto out_unlock;
1765 }
1766
1767 /* Check hooks all assigned */
6e23ae2a 1768 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
2722971c
DM
1769 /* Only hooks which are valid */
1770 if (!(valid_hooks & (1 << i)))
1771 continue;
1772 if (info->hook_entry[i] == 0xFFFFFFFF) {
1773 duprintf("Invalid hook entry %u %u\n",
1774 i, hook_entries[i]);
1775 goto out_unlock;
1da177e4 1776 }
2722971c
DM
1777 if (info->underflow[i] == 0xFFFFFFFF) {
1778 duprintf("Invalid underflow %u %u\n",
1779 i, underflows[i]);
1780 goto out_unlock;
1781 }
1782 }
1783
1784 ret = -ENOMEM;
1785 newinfo = xt_alloc_table_info(size);
1786 if (!newinfo)
1787 goto out_unlock;
1788
1789 newinfo->number = number;
6e23ae2a 1790 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
2722971c
DM
1791 newinfo->hook_entry[i] = info->hook_entry[i];
1792 newinfo->underflow[i] = info->underflow[i];
1793 }
1794 entry1 = newinfo->entries[raw_smp_processor_id()];
1795 pos = entry1;
4b478248 1796 size = total_size;
73cd598d
PM
1797 ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
1798 compat_copy_entry_from_user, &pos, &size,
1799 name, newinfo, entry1);
2722971c
DM
1800 compat_flush_offsets();
1801 xt_compat_unlock(AF_INET);
1802 if (ret)
1803 goto free_newinfo;
1804
1805 ret = -ELOOP;
1806 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1807 goto free_newinfo;
1808
4c1b52bc 1809 i = 0;
f6677f43 1810 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
4b478248 1811 name, &i);
4c1b52bc
DM
1812 if (ret) {
1813 j -= i;
73cd598d
PM
1814 COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1815 compat_release_entry, &j);
4c1b52bc
DM
1816 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1817 xt_free_table_info(newinfo);
1818 return ret;
1819 }
f6677f43 1820
2722971c 1821 /* And one copy for every other CPU */
fb1bb34d 1822 for_each_possible_cpu(i)
2722971c
DM
1823 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1824 memcpy(newinfo->entries[i], entry1, newinfo->size);
1825
1826 *pinfo = newinfo;
1827 *pentry0 = entry1;
1828 xt_free_table_info(info);
1829 return 0;
1da177e4 1830
2722971c
DM
1831free_newinfo:
1832 xt_free_table_info(newinfo);
1833out:
73cd598d 1834 COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1da177e4 1835 return ret;
2722971c 1836out_unlock:
ef4512e7 1837 compat_flush_offsets();
2722971c
DM
1838 xt_compat_unlock(AF_INET);
1839 goto out;
1da177e4
LT
1840}
1841
1842static int
2722971c 1843compat_do_replace(void __user *user, unsigned int len)
1da177e4
LT
1844{
1845 int ret;
2722971c
DM
1846 struct compat_ipt_replace tmp;
1847 struct xt_table_info *newinfo;
1848 void *loc_cpu_entry;
1da177e4
LT
1849
1850 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1851 return -EFAULT;
1852
1853 /* Hack: Causes ipchains to give correct error msg --RR */
1854 if (len != sizeof(tmp) + tmp.size)
1855 return -ENOPROTOOPT;
1856
ee4bb818 1857 /* overflow check */
259d4e41 1858 if (tmp.size >= INT_MAX / num_possible_cpus())
ee4bb818
KK
1859 return -ENOMEM;
1860 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1861 return -ENOMEM;
1862
2e4e6a17 1863 newinfo = xt_alloc_table_info(tmp.size);
1da177e4
LT
1864 if (!newinfo)
1865 return -ENOMEM;
1866
31836064
ED
1867 /* choose the copy that is our node/cpu */
1868 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1869 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1da177e4
LT
1870 tmp.size) != 0) {
1871 ret = -EFAULT;
1872 goto free_newinfo;
1873 }
1874
2722971c 1875 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
4b478248
PM
1876 &newinfo, &loc_cpu_entry, tmp.size,
1877 tmp.num_entries, tmp.hook_entry,
1878 tmp.underflow);
2722971c 1879 if (ret != 0)
1da177e4 1880 goto free_newinfo;
1da177e4 1881
2722971c 1882 duprintf("compat_do_replace: Translated table\n");
1da177e4 1883
4b478248
PM
1884 ret = __do_replace(tmp.name, tmp.valid_hooks, newinfo,
1885 tmp.num_counters, compat_ptr(tmp.counters));
2722971c
DM
1886 if (ret)
1887 goto free_newinfo_untrans;
1888 return 0;
1da177e4 1889
2722971c 1890 free_newinfo_untrans:
4b478248 1891 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
2722971c
DM
1892 free_newinfo:
1893 xt_free_table_info(newinfo);
1894 return ret;
1895}
1da177e4 1896
2722971c
DM
1897static int
1898compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
4b478248 1899 unsigned int len)
2722971c
DM
1900{
1901 int ret;
1da177e4 1902
2722971c
DM
1903 if (!capable(CAP_NET_ADMIN))
1904 return -EPERM;
1da177e4 1905
2722971c
DM
1906 switch (cmd) {
1907 case IPT_SO_SET_REPLACE:
1908 ret = compat_do_replace(user, len);
1909 break;
1da177e4 1910
2722971c
DM
1911 case IPT_SO_SET_ADD_COUNTERS:
1912 ret = do_add_counters(user, len, 1);
1913 break;
1914
1915 default:
1916 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1917 ret = -EINVAL;
1918 }
1da177e4 1919
1da177e4
LT
1920 return ret;
1921}
1922
4b478248 1923struct compat_ipt_get_entries {
2722971c
DM
1924 char name[IPT_TABLE_MAXNAMELEN];
1925 compat_uint_t size;
1926 struct compat_ipt_entry entrytable[0];
1927};
1da177e4 1928
4b478248
PM
1929static int
1930compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1931 void __user *userptr)
2722971c 1932{
2722971c
DM
1933 struct xt_counters *counters;
1934 struct xt_table_info *private = table->private;
1935 void __user *pos;
1936 unsigned int size;
1937 int ret = 0;
1938 void *loc_cpu_entry;
a18aa31b 1939 unsigned int i = 0;
1da177e4 1940
2722971c
DM
1941 counters = alloc_counters(table);
1942 if (IS_ERR(counters))
1943 return PTR_ERR(counters);
1944
1945 /* choose the copy that is on our node/cpu, ...
1946 * This choice is lazy (because current thread is
1947 * allowed to migrate to another cpu)
1948 */
1949 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1950 pos = userptr;
1951 size = total_size;
1952 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
a18aa31b
PM
1953 compat_copy_entry_to_user,
1954 &pos, &size, counters, &i);
2722971c 1955
2722971c
DM
1956 vfree(counters);
1957 return ret;
1da177e4
LT
1958}
1959
1960static int
2722971c 1961compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1da177e4 1962{
2722971c
DM
1963 int ret;
1964 struct compat_ipt_get_entries get;
e60a13e0 1965 struct xt_table *t;
1da177e4 1966
2722971c
DM
1967 if (*len < sizeof(get)) {
1968 duprintf("compat_get_entries: %u < %u\n",
4b478248 1969 *len, (unsigned int)sizeof(get));
1da177e4 1970 return -EINVAL;
2722971c 1971 }
1da177e4 1972
2722971c
DM
1973 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1974 return -EFAULT;
1da177e4 1975
2722971c
DM
1976 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1977 duprintf("compat_get_entries: %u != %u\n", *len,
4b478248
PM
1978 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1979 get.size));
2722971c 1980 return -EINVAL;
1da177e4
LT
1981 }
1982
2722971c
DM
1983 xt_compat_lock(AF_INET);
1984 t = xt_find_table_lock(AF_INET, get.name);
1985 if (t && !IS_ERR(t)) {
1986 struct xt_table_info *private = t->private;
1987 struct xt_table_info info;
1988 duprintf("t->private->number = %u\n",
1989 private->number);
1990 ret = compat_table_info(private, &info);
1991 if (!ret && get.size == info.size) {
1992 ret = compat_copy_entries_to_user(private->size,
4b478248 1993 t, uptr->entrytable);
2722971c
DM
1994 } else if (!ret) {
1995 duprintf("compat_get_entries: I've got %u not %u!\n",
1996 private->size,
1997 get.size);
1998 ret = -EINVAL;
1999 }
2000 compat_flush_offsets();
2001 module_put(t->me);
2002 xt_table_unlock(t);
2003 } else
1da177e4 2004 ret = t ? PTR_ERR(t) : -ENOENT;
1da177e4 2005
2722971c
DM
2006 xt_compat_unlock(AF_INET);
2007 return ret;
2008}
1da177e4 2009
79030ed0
PM
2010static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
2011
2722971c
DM
2012static int
2013compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2014{
2015 int ret;
1da177e4 2016
82fac054
BS
2017 if (!capable(CAP_NET_ADMIN))
2018 return -EPERM;
2019
2722971c
DM
2020 switch (cmd) {
2021 case IPT_SO_GET_INFO:
2022 ret = get_info(user, len, 1);
2023 break;
2024 case IPT_SO_GET_ENTRIES:
2025 ret = compat_get_entries(user, len);
2026 break;
2027 default:
79030ed0 2028 ret = do_ipt_get_ctl(sk, cmd, user, len);
2722971c 2029 }
1da177e4
LT
2030 return ret;
2031}
2722971c 2032#endif
1da177e4
LT
2033
2034static int
2035do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2036{
2037 int ret;
2038
2039 if (!capable(CAP_NET_ADMIN))
2040 return -EPERM;
2041
2042 switch (cmd) {
2043 case IPT_SO_SET_REPLACE:
2044 ret = do_replace(user, len);
2045 break;
2046
2047 case IPT_SO_SET_ADD_COUNTERS:
2722971c 2048 ret = do_add_counters(user, len, 0);
1da177e4
LT
2049 break;
2050
2051 default:
2052 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2053 ret = -EINVAL;
2054 }
2055
2056 return ret;
2057}
2058
2059static int
2060do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2061{
2062 int ret;
2063
2064 if (!capable(CAP_NET_ADMIN))
2065 return -EPERM;
2066
2067 switch (cmd) {
2722971c
DM
2068 case IPT_SO_GET_INFO:
2069 ret = get_info(user, len, 0);
2070 break;
1da177e4 2071
2722971c
DM
2072 case IPT_SO_GET_ENTRIES:
2073 ret = get_entries(user, len);
1da177e4 2074 break;
1da177e4
LT
2075
2076 case IPT_SO_GET_REVISION_MATCH:
2077 case IPT_SO_GET_REVISION_TARGET: {
2078 struct ipt_get_revision rev;
2e4e6a17 2079 int target;
1da177e4
LT
2080
2081 if (*len != sizeof(rev)) {
2082 ret = -EINVAL;
2083 break;
2084 }
2085 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2086 ret = -EFAULT;
2087 break;
2088 }
2089
2090 if (cmd == IPT_SO_GET_REVISION_TARGET)
2e4e6a17 2091 target = 1;
1da177e4 2092 else
2e4e6a17 2093 target = 0;
1da177e4 2094
2e4e6a17
HW
2095 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2096 rev.revision,
2097 target, &ret),
1da177e4
LT
2098 "ipt_%s", rev.name);
2099 break;
2100 }
2101
2102 default:
2103 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2104 ret = -EINVAL;
2105 }
2106
2107 return ret;
2108}
2109
2e4e6a17 2110int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1da177e4
LT
2111{
2112 int ret;
2e4e6a17 2113 struct xt_table_info *newinfo;
259d4e41 2114 struct xt_table_info bootstrap
1da177e4 2115 = { 0, 0, 0, { 0 }, { 0 }, { } };
31836064 2116 void *loc_cpu_entry;
1da177e4 2117
2e4e6a17 2118 newinfo = xt_alloc_table_info(repl->size);
1da177e4
LT
2119 if (!newinfo)
2120 return -ENOMEM;
2121
31836064
ED
2122 /* choose the copy on our node/cpu
2123 * but dont care of preemption
2124 */
2125 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2126 memcpy(loc_cpu_entry, repl->entries, repl->size);
1da177e4
LT
2127
2128 ret = translate_table(table->name, table->valid_hooks,
31836064 2129 newinfo, loc_cpu_entry, repl->size,
1da177e4
LT
2130 repl->num_entries,
2131 repl->hook_entry,
2132 repl->underflow);
2133 if (ret != 0) {
2e4e6a17 2134 xt_free_table_info(newinfo);
1da177e4
LT
2135 return ret;
2136 }
2137
da298d3a
PM
2138 ret = xt_register_table(table, &bootstrap, newinfo);
2139 if (ret != 0) {
2e4e6a17 2140 xt_free_table_info(newinfo);
1da177e4
LT
2141 return ret;
2142 }
2143
2e4e6a17 2144 return 0;
1da177e4
LT
2145}
2146
e60a13e0 2147void ipt_unregister_table(struct xt_table *table)
1da177e4 2148{
2e4e6a17 2149 struct xt_table_info *private;
31836064
ED
2150 void *loc_cpu_entry;
2151
e905a9ed 2152 private = xt_unregister_table(table);
1da177e4
LT
2153
2154 /* Decrease module usage counts and free resources */
2e4e6a17
HW
2155 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2156 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2157 xt_free_table_info(private);
1da177e4
LT
2158}
2159
2160/* Returns 1 if the type and code is matched by the range, 0 otherwise */
1d93a9cb 2161static inline bool
1da177e4
LT
2162icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2163 u_int8_t type, u_int8_t code,
1d93a9cb 2164 bool invert)
1da177e4
LT
2165{
2166 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2167 ^ invert;
2168}
2169
1d93a9cb 2170static bool
1da177e4
LT
2171icmp_match(const struct sk_buff *skb,
2172 const struct net_device *in,
2173 const struct net_device *out,
c4986734 2174 const struct xt_match *match,
1da177e4
LT
2175 const void *matchinfo,
2176 int offset,
2e4e6a17 2177 unsigned int protoff,
cff533ac 2178 bool *hotdrop)
1da177e4
LT
2179{
2180 struct icmphdr _icmph, *ic;
2181 const struct ipt_icmp *icmpinfo = matchinfo;
2182
2183 /* Must not be a fragment. */
2184 if (offset)
1d93a9cb 2185 return false;
1da177e4 2186
2e4e6a17 2187 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1da177e4
LT
2188 if (ic == NULL) {
2189 /* We've been asked to examine this packet, and we
2190 * can't. Hence, no choice but to drop.
2191 */
2192 duprintf("Dropping evil ICMP tinygram.\n");
cff533ac 2193 *hotdrop = true;
1d93a9cb 2194 return false;
1da177e4
LT
2195 }
2196
2197 return icmp_type_code_match(icmpinfo->type,
2198 icmpinfo->code[0],
2199 icmpinfo->code[1],
2200 ic->type, ic->code,
2201 !!(icmpinfo->invflags&IPT_ICMP_INV));
2202}
2203
2204/* Called when user tries to insert an entry of this type. */
ccb79bdc 2205static bool
1da177e4 2206icmp_checkentry(const char *tablename,
2e4e6a17 2207 const void *info,
c4986734 2208 const struct xt_match *match,
1da177e4 2209 void *matchinfo,
1da177e4
LT
2210 unsigned int hook_mask)
2211{
2212 const struct ipt_icmp *icmpinfo = matchinfo;
2213
1d5cd909
PM
2214 /* Must specify no unknown invflags */
2215 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1da177e4
LT
2216}
2217
2218/* The built-in targets: standard (NULL) and error. */
9f15c530 2219static struct xt_target ipt_standard_target __read_mostly = {
1da177e4 2220 .name = IPT_STANDARD_TARGET,
1d5cd909 2221 .targetsize = sizeof(int),
a45049c5 2222 .family = AF_INET,
2722971c 2223#ifdef CONFIG_COMPAT
9fa492cd
PM
2224 .compatsize = sizeof(compat_int_t),
2225 .compat_from_user = compat_standard_from_user,
2226 .compat_to_user = compat_standard_to_user,
2722971c 2227#endif
1da177e4
LT
2228};
2229
9f15c530 2230static struct xt_target ipt_error_target __read_mostly = {
1da177e4
LT
2231 .name = IPT_ERROR_TARGET,
2232 .target = ipt_error,
1d5cd909 2233 .targetsize = IPT_FUNCTION_MAXNAMELEN,
a45049c5 2234 .family = AF_INET,
1da177e4
LT
2235};
2236
2237static struct nf_sockopt_ops ipt_sockopts = {
2238 .pf = PF_INET,
2239 .set_optmin = IPT_BASE_CTL,
2240 .set_optmax = IPT_SO_SET_MAX+1,
2241 .set = do_ipt_set_ctl,
2722971c
DM
2242#ifdef CONFIG_COMPAT
2243 .compat_set = compat_do_ipt_set_ctl,
2244#endif
1da177e4
LT
2245 .get_optmin = IPT_BASE_CTL,
2246 .get_optmax = IPT_SO_GET_MAX+1,
2247 .get = do_ipt_get_ctl,
2722971c
DM
2248#ifdef CONFIG_COMPAT
2249 .compat_get = compat_do_ipt_get_ctl,
2250#endif
16fcec35 2251 .owner = THIS_MODULE,
1da177e4
LT
2252};
2253
9f15c530 2254static struct xt_match icmp_matchstruct __read_mostly = {
1da177e4 2255 .name = "icmp",
1d5cd909
PM
2256 .match = icmp_match,
2257 .matchsize = sizeof(struct ipt_icmp),
2258 .proto = IPPROTO_ICMP,
a45049c5 2259 .family = AF_INET,
1d5cd909 2260 .checkentry = icmp_checkentry,
1da177e4
LT
2261};
2262
65b4b4e8 2263static int __init ip_tables_init(void)
1da177e4
LT
2264{
2265 int ret;
2266
0eff66e6
PM
2267 ret = xt_proto_init(AF_INET);
2268 if (ret < 0)
2269 goto err1;
2e4e6a17 2270
1da177e4 2271 /* Noone else will be downing sem now, so we won't sleep */
0eff66e6
PM
2272 ret = xt_register_target(&ipt_standard_target);
2273 if (ret < 0)
2274 goto err2;
2275 ret = xt_register_target(&ipt_error_target);
2276 if (ret < 0)
2277 goto err3;
2278 ret = xt_register_match(&icmp_matchstruct);
2279 if (ret < 0)
2280 goto err4;
1da177e4
LT
2281
2282 /* Register setsockopt */
2283 ret = nf_register_sockopt(&ipt_sockopts);
0eff66e6
PM
2284 if (ret < 0)
2285 goto err5;
1da177e4 2286
0236e667 2287 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1da177e4 2288 return 0;
0eff66e6
PM
2289
2290err5:
2291 xt_unregister_match(&icmp_matchstruct);
2292err4:
2293 xt_unregister_target(&ipt_error_target);
2294err3:
2295 xt_unregister_target(&ipt_standard_target);
2296err2:
2297 xt_proto_fini(AF_INET);
2298err1:
2299 return ret;
1da177e4
LT
2300}
2301
65b4b4e8 2302static void __exit ip_tables_fini(void)
1da177e4
LT
2303{
2304 nf_unregister_sockopt(&ipt_sockopts);
2e4e6a17 2305
a45049c5
PNA
2306 xt_unregister_match(&icmp_matchstruct);
2307 xt_unregister_target(&ipt_error_target);
2308 xt_unregister_target(&ipt_standard_target);
2e4e6a17
HW
2309
2310 xt_proto_fini(AF_INET);
1da177e4
LT
2311}
2312
2313EXPORT_SYMBOL(ipt_register_table);
2314EXPORT_SYMBOL(ipt_unregister_table);
1da177e4 2315EXPORT_SYMBOL(ipt_do_table);
65b4b4e8
AM
2316module_init(ip_tables_init);
2317module_exit(ip_tables_fini);
This page took 0.516457 seconds and 5 git commands to generate.