87d369244bd9433c431e38c782a5b2690d5efa92
[deliverable/linux.git] / net / ipv4 / netfilter / ip_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/cache.h>
12 #include <linux/capability.h>
13 #include <linux/skbuff.h>
14 #include <linux/kmod.h>
15 #include <linux/vmalloc.h>
16 #include <linux/netdevice.h>
17 #include <linux/module.h>
18 #include <linux/icmp.h>
19 #include <net/ip.h>
20 #include <net/compat.h>
21 #include <asm/uaccess.h>
22 #include <linux/mutex.h>
23 #include <linux/proc_fs.h>
24 #include <linux/err.h>
25 #include <linux/cpumask.h>
26
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_ipv4/ip_tables.h>
29
30 MODULE_LICENSE("GPL");
31 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
32 MODULE_DESCRIPTION("IPv4 packet filter");
33
34 /*#define DEBUG_IP_FIREWALL*/
35 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
36 /*#define DEBUG_IP_FIREWALL_USER*/
37
38 #ifdef DEBUG_IP_FIREWALL
39 #define dprintf(format, args...) printk(format , ## args)
40 #else
41 #define dprintf(format, args...)
42 #endif
43
44 #ifdef DEBUG_IP_FIREWALL_USER
45 #define duprintf(format, args...) printk(format , ## args)
46 #else
47 #define duprintf(format, args...)
48 #endif
49
50 #ifdef CONFIG_NETFILTER_DEBUG
51 #define IP_NF_ASSERT(x) \
52 do { \
53 if (!(x)) \
54 printk("IP_NF_ASSERT: %s:%s:%u\n", \
55 __FUNCTION__, __FILE__, __LINE__); \
56 } while(0)
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 /*
68 We keep a set of rules for each CPU, so we can avoid write-locking
69 them in the softirq when updating the counters and therefore
70 only need to read-lock in the softirq; doing a write_lock_bh() in user
71 context stops packets coming through and allows user context to read
72 the counters or update the rules.
73
74 Hence the start of any table is given by get_table() below. */
75
76 /* Returns whether matches rule or not. */
77 static inline int
78 ip_packet_match(const struct iphdr *ip,
79 const char *indev,
80 const char *outdev,
81 const struct ipt_ip *ipinfo,
82 int isfrag)
83 {
84 size_t i;
85 unsigned long ret;
86
87 #define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
88
89 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
90 IPT_INV_SRCIP)
91 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
92 IPT_INV_DSTIP)) {
93 dprintf("Source or dest mismatch.\n");
94
95 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
96 NIPQUAD(ip->saddr),
97 NIPQUAD(ipinfo->smsk.s_addr),
98 NIPQUAD(ipinfo->src.s_addr),
99 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
100 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
101 NIPQUAD(ip->daddr),
102 NIPQUAD(ipinfo->dmsk.s_addr),
103 NIPQUAD(ipinfo->dst.s_addr),
104 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
105 return 0;
106 }
107
108 /* Look for ifname matches; this should unroll nicely. */
109 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
110 ret |= (((const unsigned long *)indev)[i]
111 ^ ((const unsigned long *)ipinfo->iniface)[i])
112 & ((const unsigned long *)ipinfo->iniface_mask)[i];
113 }
114
115 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
116 dprintf("VIA in mismatch (%s vs %s).%s\n",
117 indev, ipinfo->iniface,
118 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
119 return 0;
120 }
121
122 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
123 ret |= (((const unsigned long *)outdev)[i]
124 ^ ((const unsigned long *)ipinfo->outiface)[i])
125 & ((const unsigned long *)ipinfo->outiface_mask)[i];
126 }
127
128 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
129 dprintf("VIA out mismatch (%s vs %s).%s\n",
130 outdev, ipinfo->outiface,
131 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
132 return 0;
133 }
134
135 /* Check specific protocol */
136 if (ipinfo->proto
137 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
138 dprintf("Packet protocol %hi does not match %hi.%s\n",
139 ip->protocol, ipinfo->proto,
140 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
141 return 0;
142 }
143
144 /* If we have a fragment rule but the packet is not a fragment
145 * then we return zero */
146 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
147 dprintf("Fragment rule but not fragment.%s\n",
148 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
149 return 0;
150 }
151
152 return 1;
153 }
154
155 static inline bool
156 ip_checkentry(const struct ipt_ip *ip)
157 {
158 if (ip->flags & ~IPT_F_MASK) {
159 duprintf("Unknown flag bits set: %08X\n",
160 ip->flags & ~IPT_F_MASK);
161 return false;
162 }
163 if (ip->invflags & ~IPT_INV_MASK) {
164 duprintf("Unknown invflag bits set: %08X\n",
165 ip->invflags & ~IPT_INV_MASK);
166 return false;
167 }
168 return true;
169 }
170
171 static unsigned int
172 ipt_error(struct sk_buff *skb,
173 const struct net_device *in,
174 const struct net_device *out,
175 unsigned int hooknum,
176 const struct xt_target *target,
177 const void *targinfo)
178 {
179 if (net_ratelimit())
180 printk("ip_tables: error: `%s'\n", (char *)targinfo);
181
182 return NF_DROP;
183 }
184
185 static inline
186 bool do_match(struct ipt_entry_match *m,
187 const struct sk_buff *skb,
188 const struct net_device *in,
189 const struct net_device *out,
190 int offset,
191 bool *hotdrop)
192 {
193 /* Stop iteration if it doesn't match */
194 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
195 offset, ip_hdrlen(skb), hotdrop))
196 return true;
197 else
198 return false;
199 }
200
201 static inline struct ipt_entry *
202 get_entry(void *base, unsigned int offset)
203 {
204 return (struct ipt_entry *)(base + offset);
205 }
206
207 /* All zeroes == unconditional rule. */
208 static inline int
209 unconditional(const struct ipt_ip *ip)
210 {
211 unsigned int i;
212
213 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
214 if (((__u32 *)ip)[i])
215 return 0;
216
217 return 1;
218 }
219
220 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
221 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
222 static const char *hooknames[] = {
223 [NF_INET_PRE_ROUTING] = "PREROUTING",
224 [NF_INET_LOCAL_IN] = "INPUT",
225 [NF_INET_FORWARD] = "FORWARD",
226 [NF_INET_LOCAL_OUT] = "OUTPUT",
227 [NF_INET_POST_ROUTING] = "POSTROUTING",
228 };
229
230 enum nf_ip_trace_comments {
231 NF_IP_TRACE_COMMENT_RULE,
232 NF_IP_TRACE_COMMENT_RETURN,
233 NF_IP_TRACE_COMMENT_POLICY,
234 };
235
236 static const char *comments[] = {
237 [NF_IP_TRACE_COMMENT_RULE] = "rule",
238 [NF_IP_TRACE_COMMENT_RETURN] = "return",
239 [NF_IP_TRACE_COMMENT_POLICY] = "policy",
240 };
241
242 static struct nf_loginfo trace_loginfo = {
243 .type = NF_LOG_TYPE_LOG,
244 .u = {
245 .log = {
246 .level = 4,
247 .logflags = NF_LOG_MASK,
248 },
249 },
250 };
251
252 static inline int
253 get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
254 char *hookname, char **chainname,
255 char **comment, unsigned int *rulenum)
256 {
257 struct ipt_standard_target *t = (void *)ipt_get_target(s);
258
259 if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
260 /* Head of user chain: ERROR target with chainname */
261 *chainname = t->target.data;
262 (*rulenum) = 0;
263 } else if (s == e) {
264 (*rulenum)++;
265
266 if (s->target_offset == sizeof(struct ipt_entry)
267 && strcmp(t->target.u.kernel.target->name,
268 IPT_STANDARD_TARGET) == 0
269 && t->verdict < 0
270 && unconditional(&s->ip)) {
271 /* Tail of chains: STANDARD target (return/policy) */
272 *comment = *chainname == hookname
273 ? (char *)comments[NF_IP_TRACE_COMMENT_POLICY]
274 : (char *)comments[NF_IP_TRACE_COMMENT_RETURN];
275 }
276 return 1;
277 } else
278 (*rulenum)++;
279
280 return 0;
281 }
282
283 static void trace_packet(struct sk_buff *skb,
284 unsigned int hook,
285 const struct net_device *in,
286 const struct net_device *out,
287 char *tablename,
288 struct xt_table_info *private,
289 struct ipt_entry *e)
290 {
291 void *table_base;
292 struct ipt_entry *root;
293 char *hookname, *chainname, *comment;
294 unsigned int rulenum = 0;
295
296 table_base = (void *)private->entries[smp_processor_id()];
297 root = get_entry(table_base, private->hook_entry[hook]);
298
299 hookname = chainname = (char *)hooknames[hook];
300 comment = (char *)comments[NF_IP_TRACE_COMMENT_RULE];
301
302 IPT_ENTRY_ITERATE(root,
303 private->size - private->hook_entry[hook],
304 get_chainname_rulenum,
305 e, hookname, &chainname, &comment, &rulenum);
306
307 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
308 "TRACE: %s:%s:%s:%u ",
309 tablename, chainname, comment, rulenum);
310 }
311 #endif
312
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
315 ipt_do_table(struct sk_buff *skb,
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
320 {
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 u_int16_t offset;
323 struct iphdr *ip;
324 u_int16_t datalen;
325 bool hotdrop = false;
326 /* Initializing verdict to NF_DROP keeps gcc happy. */
327 unsigned int verdict = NF_DROP;
328 const char *indev, *outdev;
329 void *table_base;
330 struct ipt_entry *e, *back;
331 struct xt_table_info *private;
332
333 /* Initialization */
334 ip = ip_hdr(skb);
335 datalen = skb->len - ip->ihl * 4;
336 indev = in ? in->name : nulldevname;
337 outdev = out ? out->name : nulldevname;
338 /* We handle fragments by dealing with the first fragment as
339 * if it was a normal packet. All other fragments are treated
340 * normally, except that they will NEVER match rules that ask
341 * things we don't know, ie. tcp syn flag or ports). If the
342 * rule is also a fragment-specific rule, non-fragments won't
343 * match it. */
344 offset = ntohs(ip->frag_off) & IP_OFFSET;
345
346 read_lock_bh(&table->lock);
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348 private = table->private;
349 table_base = (void *)private->entries[smp_processor_id()];
350 e = get_entry(table_base, private->hook_entry[hook]);
351
352 /* For return from builtin chain */
353 back = get_entry(table_base, private->underflow[hook]);
354
355 do {
356 IP_NF_ASSERT(e);
357 IP_NF_ASSERT(back);
358 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
359 struct ipt_entry_target *t;
360
361 if (IPT_MATCH_ITERATE(e, do_match,
362 skb, in, out,
363 offset, &hotdrop) != 0)
364 goto no_match;
365
366 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
367
368 t = ipt_get_target(e);
369 IP_NF_ASSERT(t->u.kernel.target);
370
371 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
372 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
373 /* The packet is traced: log it */
374 if (unlikely(skb->nf_trace))
375 trace_packet(skb, hook, in, out,
376 table->name, private, e);
377 #endif
378 /* Standard target? */
379 if (!t->u.kernel.target->target) {
380 int v;
381
382 v = ((struct ipt_standard_target *)t)->verdict;
383 if (v < 0) {
384 /* Pop from stack? */
385 if (v != IPT_RETURN) {
386 verdict = (unsigned)(-v) - 1;
387 break;
388 }
389 e = back;
390 back = get_entry(table_base,
391 back->comefrom);
392 continue;
393 }
394 if (table_base + v != (void *)e + e->next_offset
395 && !(e->ip.flags & IPT_F_GOTO)) {
396 /* Save old back ptr in next entry */
397 struct ipt_entry *next
398 = (void *)e + e->next_offset;
399 next->comefrom
400 = (void *)back - table_base;
401 /* set back pointer to next entry */
402 back = next;
403 }
404
405 e = get_entry(table_base, v);
406 } else {
407 /* Targets which reenter must return
408 abs. verdicts */
409 #ifdef CONFIG_NETFILTER_DEBUG
410 ((struct ipt_entry *)table_base)->comefrom
411 = 0xeeeeeeec;
412 #endif
413 verdict = t->u.kernel.target->target(skb,
414 in, out,
415 hook,
416 t->u.kernel.target,
417 t->data);
418
419 #ifdef CONFIG_NETFILTER_DEBUG
420 if (((struct ipt_entry *)table_base)->comefrom
421 != 0xeeeeeeec
422 && verdict == IPT_CONTINUE) {
423 printk("Target %s reentered!\n",
424 t->u.kernel.target->name);
425 verdict = NF_DROP;
426 }
427 ((struct ipt_entry *)table_base)->comefrom
428 = 0x57acc001;
429 #endif
430 /* Target might have changed stuff. */
431 ip = ip_hdr(skb);
432 datalen = skb->len - ip->ihl * 4;
433
434 if (verdict == IPT_CONTINUE)
435 e = (void *)e + e->next_offset;
436 else
437 /* Verdict */
438 break;
439 }
440 } else {
441
442 no_match:
443 e = (void *)e + e->next_offset;
444 }
445 } while (!hotdrop);
446
447 read_unlock_bh(&table->lock);
448
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
456 }
457
458 /* Figures out from what hook each rule can be called: returns 0 if
459 there are loops. Puts hook bitmask in comefrom. */
460 static int
461 mark_source_chains(struct xt_table_info *newinfo,
462 unsigned int valid_hooks, void *entry0)
463 {
464 unsigned int hook;
465
466 /* No recursion; use packet counter to save back ptrs (reset
467 to 0 as we leave), and comefrom to save source hook bitmask */
468 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
469 unsigned int pos = newinfo->hook_entry[hook];
470 struct ipt_entry *e
471 = (struct ipt_entry *)(entry0 + pos);
472
473 if (!(valid_hooks & (1 << hook)))
474 continue;
475
476 /* Set initial back pointer. */
477 e->counters.pcnt = pos;
478
479 for (;;) {
480 struct ipt_standard_target *t
481 = (void *)ipt_get_target(e);
482 int visited = e->comefrom & (1 << hook);
483
484 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
485 printk("iptables: loop hook %u pos %u %08X.\n",
486 hook, pos, e->comefrom);
487 return 0;
488 }
489 e->comefrom
490 |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
491
492 /* Unconditional return/END. */
493 if ((e->target_offset == sizeof(struct ipt_entry)
494 && (strcmp(t->target.u.user.name,
495 IPT_STANDARD_TARGET) == 0)
496 && t->verdict < 0
497 && unconditional(&e->ip)) || visited) {
498 unsigned int oldpos, size;
499
500 if (t->verdict < -NF_MAX_VERDICT - 1) {
501 duprintf("mark_source_chains: bad "
502 "negative verdict (%i)\n",
503 t->verdict);
504 return 0;
505 }
506
507 /* Return: backtrack through the last
508 big jump. */
509 do {
510 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
511 #ifdef DEBUG_IP_FIREWALL_USER
512 if (e->comefrom
513 & (1 << NF_INET_NUMHOOKS)) {
514 duprintf("Back unset "
515 "on hook %u "
516 "rule %u\n",
517 hook, pos);
518 }
519 #endif
520 oldpos = pos;
521 pos = e->counters.pcnt;
522 e->counters.pcnt = 0;
523
524 /* We're at the start. */
525 if (pos == oldpos)
526 goto next;
527
528 e = (struct ipt_entry *)
529 (entry0 + pos);
530 } while (oldpos == pos + e->next_offset);
531
532 /* Move along one */
533 size = e->next_offset;
534 e = (struct ipt_entry *)
535 (entry0 + pos + size);
536 e->counters.pcnt = pos;
537 pos += size;
538 } else {
539 int newpos = t->verdict;
540
541 if (strcmp(t->target.u.user.name,
542 IPT_STANDARD_TARGET) == 0
543 && newpos >= 0) {
544 if (newpos > newinfo->size -
545 sizeof(struct ipt_entry)) {
546 duprintf("mark_source_chains: "
547 "bad verdict (%i)\n",
548 newpos);
549 return 0;
550 }
551 /* This a jump; chase it. */
552 duprintf("Jump rule %u -> %u\n",
553 pos, newpos);
554 } else {
555 /* ... this is a fallthru */
556 newpos = pos + e->next_offset;
557 }
558 e = (struct ipt_entry *)
559 (entry0 + newpos);
560 e->counters.pcnt = pos;
561 pos = newpos;
562 }
563 }
564 next:
565 duprintf("Finished chain %u\n", hook);
566 }
567 return 1;
568 }
569
570 static inline int
571 cleanup_match(struct ipt_entry_match *m, unsigned int *i)
572 {
573 if (i && (*i)-- == 0)
574 return 1;
575
576 if (m->u.kernel.match->destroy)
577 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
578 module_put(m->u.kernel.match->me);
579 return 0;
580 }
581
582 static inline int
583 check_entry(struct ipt_entry *e, const char *name)
584 {
585 struct ipt_entry_target *t;
586
587 if (!ip_checkentry(&e->ip)) {
588 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
589 return -EINVAL;
590 }
591
592 if (e->target_offset + sizeof(struct ipt_entry_target) > e->next_offset)
593 return -EINVAL;
594
595 t = ipt_get_target(e);
596 if (e->target_offset + t->u.target_size > e->next_offset)
597 return -EINVAL;
598
599 return 0;
600 }
601
602 static inline int check_match(struct ipt_entry_match *m, const char *name,
603 const struct ipt_ip *ip, unsigned int hookmask,
604 unsigned int *i)
605 {
606 struct xt_match *match;
607 int ret;
608
609 match = m->u.kernel.match;
610 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
611 name, hookmask, ip->proto,
612 ip->invflags & IPT_INV_PROTO);
613 if (!ret && m->u.kernel.match->checkentry
614 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
615 hookmask)) {
616 duprintf("ip_tables: check failed for `%s'.\n",
617 m->u.kernel.match->name);
618 ret = -EINVAL;
619 }
620 if (!ret)
621 (*i)++;
622 return ret;
623 }
624
625 static inline int
626 find_check_match(struct ipt_entry_match *m,
627 const char *name,
628 const struct ipt_ip *ip,
629 unsigned int hookmask,
630 unsigned int *i)
631 {
632 struct xt_match *match;
633 int ret;
634
635 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
636 m->u.user.revision),
637 "ipt_%s", m->u.user.name);
638 if (IS_ERR(match) || !match) {
639 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
640 return match ? PTR_ERR(match) : -ENOENT;
641 }
642 m->u.kernel.match = match;
643
644 ret = check_match(m, name, ip, hookmask, i);
645 if (ret)
646 goto err;
647
648 return 0;
649 err:
650 module_put(m->u.kernel.match->me);
651 return ret;
652 }
653
654 static inline int check_target(struct ipt_entry *e, const char *name)
655 {
656 struct ipt_entry_target *t;
657 struct xt_target *target;
658 int ret;
659
660 t = ipt_get_target(e);
661 target = t->u.kernel.target;
662 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
663 name, e->comefrom, e->ip.proto,
664 e->ip.invflags & IPT_INV_PROTO);
665 if (!ret && t->u.kernel.target->checkentry
666 && !t->u.kernel.target->checkentry(name, e, target,
667 t->data, e->comefrom)) {
668 duprintf("ip_tables: check failed for `%s'.\n",
669 t->u.kernel.target->name);
670 ret = -EINVAL;
671 }
672 return ret;
673 }
674
675 static inline int
676 find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
677 unsigned int *i)
678 {
679 struct ipt_entry_target *t;
680 struct xt_target *target;
681 int ret;
682 unsigned int j;
683
684 ret = check_entry(e, name);
685 if (ret)
686 return ret;
687
688 j = 0;
689 ret = IPT_MATCH_ITERATE(e, find_check_match, name, &e->ip,
690 e->comefrom, &j);
691 if (ret != 0)
692 goto cleanup_matches;
693
694 t = ipt_get_target(e);
695 target = try_then_request_module(xt_find_target(AF_INET,
696 t->u.user.name,
697 t->u.user.revision),
698 "ipt_%s", t->u.user.name);
699 if (IS_ERR(target) || !target) {
700 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
701 ret = target ? PTR_ERR(target) : -ENOENT;
702 goto cleanup_matches;
703 }
704 t->u.kernel.target = target;
705
706 ret = check_target(e, name);
707 if (ret)
708 goto err;
709
710 (*i)++;
711 return 0;
712 err:
713 module_put(t->u.kernel.target->me);
714 cleanup_matches:
715 IPT_MATCH_ITERATE(e, cleanup_match, &j);
716 return ret;
717 }
718
719 static inline int
720 check_entry_size_and_hooks(struct ipt_entry *e,
721 struct xt_table_info *newinfo,
722 unsigned char *base,
723 unsigned char *limit,
724 const unsigned int *hook_entries,
725 const unsigned int *underflows,
726 unsigned int *i)
727 {
728 unsigned int h;
729
730 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
731 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
732 duprintf("Bad offset %p\n", e);
733 return -EINVAL;
734 }
735
736 if (e->next_offset
737 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
738 duprintf("checking: element %p size %u\n",
739 e, e->next_offset);
740 return -EINVAL;
741 }
742
743 /* Check hooks & underflows */
744 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
745 if ((unsigned char *)e - base == hook_entries[h])
746 newinfo->hook_entry[h] = hook_entries[h];
747 if ((unsigned char *)e - base == underflows[h])
748 newinfo->underflow[h] = underflows[h];
749 }
750
751 /* FIXME: underflows must be unconditional, standard verdicts
752 < 0 (not IPT_RETURN). --RR */
753
754 /* Clear counters and comefrom */
755 e->counters = ((struct xt_counters) { 0, 0 });
756 e->comefrom = 0;
757
758 (*i)++;
759 return 0;
760 }
761
762 static inline int
763 cleanup_entry(struct ipt_entry *e, unsigned int *i)
764 {
765 struct ipt_entry_target *t;
766
767 if (i && (*i)-- == 0)
768 return 1;
769
770 /* Cleanup all matches */
771 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
772 t = ipt_get_target(e);
773 if (t->u.kernel.target->destroy)
774 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
775 module_put(t->u.kernel.target->me);
776 return 0;
777 }
778
779 /* Checks and translates the user-supplied table segment (held in
780 newinfo) */
781 static int
782 translate_table(const char *name,
783 unsigned int valid_hooks,
784 struct xt_table_info *newinfo,
785 void *entry0,
786 unsigned int size,
787 unsigned int number,
788 const unsigned int *hook_entries,
789 const unsigned int *underflows)
790 {
791 unsigned int i;
792 int ret;
793
794 newinfo->size = size;
795 newinfo->number = number;
796
797 /* Init all hooks to impossible value. */
798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
799 newinfo->hook_entry[i] = 0xFFFFFFFF;
800 newinfo->underflow[i] = 0xFFFFFFFF;
801 }
802
803 duprintf("translate_table: size %u\n", newinfo->size);
804 i = 0;
805 /* Walk through entries, checking offsets. */
806 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
807 check_entry_size_and_hooks,
808 newinfo,
809 entry0,
810 entry0 + size,
811 hook_entries, underflows, &i);
812 if (ret != 0)
813 return ret;
814
815 if (i != number) {
816 duprintf("translate_table: %u not %u entries\n",
817 i, number);
818 return -EINVAL;
819 }
820
821 /* Check hooks all assigned */
822 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 /* Only hooks which are valid */
824 if (!(valid_hooks & (1 << i)))
825 continue;
826 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
827 duprintf("Invalid hook entry %u %u\n",
828 i, hook_entries[i]);
829 return -EINVAL;
830 }
831 if (newinfo->underflow[i] == 0xFFFFFFFF) {
832 duprintf("Invalid underflow %u %u\n",
833 i, underflows[i]);
834 return -EINVAL;
835 }
836 }
837
838 if (!mark_source_chains(newinfo, valid_hooks, entry0))
839 return -ELOOP;
840
841 /* Finally, each sanity check must pass */
842 i = 0;
843 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
844 find_check_entry, name, size, &i);
845
846 if (ret != 0) {
847 IPT_ENTRY_ITERATE(entry0, newinfo->size,
848 cleanup_entry, &i);
849 return ret;
850 }
851
852 /* And one copy for every other CPU */
853 for_each_possible_cpu(i) {
854 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
855 memcpy(newinfo->entries[i], entry0, newinfo->size);
856 }
857
858 return ret;
859 }
860
861 /* Gets counters. */
862 static inline int
863 add_entry_to_counter(const struct ipt_entry *e,
864 struct xt_counters total[],
865 unsigned int *i)
866 {
867 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
868
869 (*i)++;
870 return 0;
871 }
872
873 static inline int
874 set_entry_to_counter(const struct ipt_entry *e,
875 struct ipt_counters total[],
876 unsigned int *i)
877 {
878 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
879
880 (*i)++;
881 return 0;
882 }
883
884 static void
885 get_counters(const struct xt_table_info *t,
886 struct xt_counters counters[])
887 {
888 unsigned int cpu;
889 unsigned int i;
890 unsigned int curcpu;
891
892 /* Instead of clearing (by a previous call to memset())
893 * the counters and using adds, we set the counters
894 * with data used by 'current' CPU
895 * We dont care about preemption here.
896 */
897 curcpu = raw_smp_processor_id();
898
899 i = 0;
900 IPT_ENTRY_ITERATE(t->entries[curcpu],
901 t->size,
902 set_entry_to_counter,
903 counters,
904 &i);
905
906 for_each_possible_cpu(cpu) {
907 if (cpu == curcpu)
908 continue;
909 i = 0;
910 IPT_ENTRY_ITERATE(t->entries[cpu],
911 t->size,
912 add_entry_to_counter,
913 counters,
914 &i);
915 }
916 }
917
918 static inline struct xt_counters * alloc_counters(struct xt_table *table)
919 {
920 unsigned int countersize;
921 struct xt_counters *counters;
922 struct xt_table_info *private = table->private;
923
924 /* We need atomic snapshot of counters: rest doesn't change
925 (other than comefrom, which userspace doesn't care
926 about). */
927 countersize = sizeof(struct xt_counters) * private->number;
928 counters = vmalloc_node(countersize, numa_node_id());
929
930 if (counters == NULL)
931 return ERR_PTR(-ENOMEM);
932
933 /* First, sum counters... */
934 write_lock_bh(&table->lock);
935 get_counters(private, counters);
936 write_unlock_bh(&table->lock);
937
938 return counters;
939 }
940
941 static int
942 copy_entries_to_user(unsigned int total_size,
943 struct xt_table *table,
944 void __user *userptr)
945 {
946 unsigned int off, num;
947 struct ipt_entry *e;
948 struct xt_counters *counters;
949 struct xt_table_info *private = table->private;
950 int ret = 0;
951 void *loc_cpu_entry;
952
953 counters = alloc_counters(table);
954 if (IS_ERR(counters))
955 return PTR_ERR(counters);
956
957 /* choose the copy that is on our node/cpu, ...
958 * This choice is lazy (because current thread is
959 * allowed to migrate to another cpu)
960 */
961 loc_cpu_entry = private->entries[raw_smp_processor_id()];
962 /* ... then copy entire thing ... */
963 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
964 ret = -EFAULT;
965 goto free_counters;
966 }
967
968 /* FIXME: use iterator macros --RR */
969 /* ... then go back and fix counters and names */
970 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
971 unsigned int i;
972 struct ipt_entry_match *m;
973 struct ipt_entry_target *t;
974
975 e = (struct ipt_entry *)(loc_cpu_entry + off);
976 if (copy_to_user(userptr + off
977 + offsetof(struct ipt_entry, counters),
978 &counters[num],
979 sizeof(counters[num])) != 0) {
980 ret = -EFAULT;
981 goto free_counters;
982 }
983
984 for (i = sizeof(struct ipt_entry);
985 i < e->target_offset;
986 i += m->u.match_size) {
987 m = (void *)e + i;
988
989 if (copy_to_user(userptr + off + i
990 + offsetof(struct ipt_entry_match,
991 u.user.name),
992 m->u.kernel.match->name,
993 strlen(m->u.kernel.match->name)+1)
994 != 0) {
995 ret = -EFAULT;
996 goto free_counters;
997 }
998 }
999
1000 t = ipt_get_target(e);
1001 if (copy_to_user(userptr + off + e->target_offset
1002 + offsetof(struct ipt_entry_target,
1003 u.user.name),
1004 t->u.kernel.target->name,
1005 strlen(t->u.kernel.target->name)+1) != 0) {
1006 ret = -EFAULT;
1007 goto free_counters;
1008 }
1009 }
1010
1011 free_counters:
1012 vfree(counters);
1013 return ret;
1014 }
1015
1016 #ifdef CONFIG_COMPAT
1017 struct compat_delta {
1018 struct compat_delta *next;
1019 unsigned int offset;
1020 short delta;
1021 };
1022
1023 static struct compat_delta *compat_offsets = NULL;
1024
1025 static int compat_add_offset(unsigned int offset, short delta)
1026 {
1027 struct compat_delta *tmp;
1028
1029 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
1030 if (!tmp)
1031 return -ENOMEM;
1032 tmp->offset = offset;
1033 tmp->delta = delta;
1034 if (compat_offsets) {
1035 tmp->next = compat_offsets->next;
1036 compat_offsets->next = tmp;
1037 } else {
1038 compat_offsets = tmp;
1039 tmp->next = NULL;
1040 }
1041 return 0;
1042 }
1043
1044 static void compat_flush_offsets(void)
1045 {
1046 struct compat_delta *tmp, *next;
1047
1048 if (compat_offsets) {
1049 for(tmp = compat_offsets; tmp; tmp = next) {
1050 next = tmp->next;
1051 kfree(tmp);
1052 }
1053 compat_offsets = NULL;
1054 }
1055 }
1056
1057 static short compat_calc_jump(unsigned int offset)
1058 {
1059 struct compat_delta *tmp;
1060 short delta;
1061
1062 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
1063 if (tmp->offset < offset)
1064 delta += tmp->delta;
1065 return delta;
1066 }
1067
1068 static void compat_standard_from_user(void *dst, void *src)
1069 {
1070 int v = *(compat_int_t *)src;
1071
1072 if (v > 0)
1073 v += compat_calc_jump(v);
1074 memcpy(dst, &v, sizeof(v));
1075 }
1076
1077 static int compat_standard_to_user(void __user *dst, void *src)
1078 {
1079 compat_int_t cv = *(int *)src;
1080
1081 if (cv > 0)
1082 cv -= compat_calc_jump(cv);
1083 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1084 }
1085
1086 static inline int
1087 compat_calc_match(struct ipt_entry_match *m, int * size)
1088 {
1089 *size += xt_compat_match_offset(m->u.kernel.match);
1090 return 0;
1091 }
1092
1093 static int compat_calc_entry(struct ipt_entry *e,
1094 const struct xt_table_info *info,
1095 void *base, struct xt_table_info *newinfo)
1096 {
1097 struct ipt_entry_target *t;
1098 unsigned int entry_offset;
1099 int off, i, ret;
1100
1101 off = 0;
1102 entry_offset = (void *)e - base;
1103 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
1104 t = ipt_get_target(e);
1105 off += xt_compat_target_offset(t->u.kernel.target);
1106 newinfo->size -= off;
1107 ret = compat_add_offset(entry_offset, off);
1108 if (ret)
1109 return ret;
1110
1111 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1112 if (info->hook_entry[i] && (e < (struct ipt_entry *)
1113 (base + info->hook_entry[i])))
1114 newinfo->hook_entry[i] -= off;
1115 if (info->underflow[i] && (e < (struct ipt_entry *)
1116 (base + info->underflow[i])))
1117 newinfo->underflow[i] -= off;
1118 }
1119 return 0;
1120 }
1121
1122 static int compat_table_info(const struct xt_table_info *info,
1123 struct xt_table_info *newinfo)
1124 {
1125 void *loc_cpu_entry;
1126
1127 if (!newinfo || !info)
1128 return -EINVAL;
1129
1130 /* we dont care about newinfo->entries[] */
1131 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1132 newinfo->initial_entries = 0;
1133 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1134 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1135 compat_calc_entry, info, loc_cpu_entry, newinfo);
1136 }
1137 #endif
1138
1139 static int get_info(void __user *user, int *len, int compat)
1140 {
1141 char name[IPT_TABLE_MAXNAMELEN];
1142 struct xt_table *t;
1143 int ret;
1144
1145 if (*len != sizeof(struct ipt_getinfo)) {
1146 duprintf("length %u != %u\n", *len,
1147 (unsigned int)sizeof(struct ipt_getinfo));
1148 return -EINVAL;
1149 }
1150
1151 if (copy_from_user(name, user, sizeof(name)) != 0)
1152 return -EFAULT;
1153
1154 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1155 #ifdef CONFIG_COMPAT
1156 if (compat)
1157 xt_compat_lock(AF_INET);
1158 #endif
1159 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1160 "iptable_%s", name);
1161 if (t && !IS_ERR(t)) {
1162 struct ipt_getinfo info;
1163 struct xt_table_info *private = t->private;
1164
1165 #ifdef CONFIG_COMPAT
1166 if (compat) {
1167 struct xt_table_info tmp;
1168 ret = compat_table_info(private, &tmp);
1169 compat_flush_offsets();
1170 private = &tmp;
1171 }
1172 #endif
1173 info.valid_hooks = t->valid_hooks;
1174 memcpy(info.hook_entry, private->hook_entry,
1175 sizeof(info.hook_entry));
1176 memcpy(info.underflow, private->underflow,
1177 sizeof(info.underflow));
1178 info.num_entries = private->number;
1179 info.size = private->size;
1180 strcpy(info.name, name);
1181
1182 if (copy_to_user(user, &info, *len) != 0)
1183 ret = -EFAULT;
1184 else
1185 ret = 0;
1186
1187 xt_table_unlock(t);
1188 module_put(t->me);
1189 } else
1190 ret = t ? PTR_ERR(t) : -ENOENT;
1191 #ifdef CONFIG_COMPAT
1192 if (compat)
1193 xt_compat_unlock(AF_INET);
1194 #endif
1195 return ret;
1196 }
1197
1198 static int
1199 get_entries(struct ipt_get_entries __user *uptr, int *len)
1200 {
1201 int ret;
1202 struct ipt_get_entries get;
1203 struct xt_table *t;
1204
1205 if (*len < sizeof(get)) {
1206 duprintf("get_entries: %u < %d\n", *len,
1207 (unsigned int)sizeof(get));
1208 return -EINVAL;
1209 }
1210 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1211 return -EFAULT;
1212 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1213 duprintf("get_entries: %u != %u\n", *len,
1214 (unsigned int)(sizeof(struct ipt_get_entries) +
1215 get.size));
1216 return -EINVAL;
1217 }
1218
1219 t = xt_find_table_lock(AF_INET, get.name);
1220 if (t && !IS_ERR(t)) {
1221 struct xt_table_info *private = t->private;
1222 duprintf("t->private->number = %u\n",
1223 private->number);
1224 if (get.size == private->size)
1225 ret = copy_entries_to_user(private->size,
1226 t, uptr->entrytable);
1227 else {
1228 duprintf("get_entries: I've got %u not %u!\n",
1229 private->size,
1230 get.size);
1231 ret = -EINVAL;
1232 }
1233 module_put(t->me);
1234 xt_table_unlock(t);
1235 } else
1236 ret = t ? PTR_ERR(t) : -ENOENT;
1237
1238 return ret;
1239 }
1240
1241 static int
1242 __do_replace(const char *name, unsigned int valid_hooks,
1243 struct xt_table_info *newinfo, unsigned int num_counters,
1244 void __user *counters_ptr)
1245 {
1246 int ret;
1247 struct xt_table *t;
1248 struct xt_table_info *oldinfo;
1249 struct xt_counters *counters;
1250 void *loc_cpu_old_entry;
1251
1252 ret = 0;
1253 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1254 if (!counters) {
1255 ret = -ENOMEM;
1256 goto out;
1257 }
1258
1259 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1260 "iptable_%s", name);
1261 if (!t || IS_ERR(t)) {
1262 ret = t ? PTR_ERR(t) : -ENOENT;
1263 goto free_newinfo_counters_untrans;
1264 }
1265
1266 /* You lied! */
1267 if (valid_hooks != t->valid_hooks) {
1268 duprintf("Valid hook crap: %08X vs %08X\n",
1269 valid_hooks, t->valid_hooks);
1270 ret = -EINVAL;
1271 goto put_module;
1272 }
1273
1274 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1275 if (!oldinfo)
1276 goto put_module;
1277
1278 /* Update module usage count based on number of rules */
1279 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1280 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1281 if ((oldinfo->number > oldinfo->initial_entries) ||
1282 (newinfo->number <= oldinfo->initial_entries))
1283 module_put(t->me);
1284 if ((oldinfo->number > oldinfo->initial_entries) &&
1285 (newinfo->number <= oldinfo->initial_entries))
1286 module_put(t->me);
1287
1288 /* Get the old counters. */
1289 get_counters(oldinfo, counters);
1290 /* Decrease module usage counts and free resource */
1291 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1292 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1293 xt_free_table_info(oldinfo);
1294 if (copy_to_user(counters_ptr, counters,
1295 sizeof(struct xt_counters) * num_counters) != 0)
1296 ret = -EFAULT;
1297 vfree(counters);
1298 xt_table_unlock(t);
1299 return ret;
1300
1301 put_module:
1302 module_put(t->me);
1303 xt_table_unlock(t);
1304 free_newinfo_counters_untrans:
1305 vfree(counters);
1306 out:
1307 return ret;
1308 }
1309
1310 static int
1311 do_replace(void __user *user, unsigned int len)
1312 {
1313 int ret;
1314 struct ipt_replace tmp;
1315 struct xt_table_info *newinfo;
1316 void *loc_cpu_entry;
1317
1318 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1319 return -EFAULT;
1320
1321 /* Hack: Causes ipchains to give correct error msg --RR */
1322 if (len != sizeof(tmp) + tmp.size)
1323 return -ENOPROTOOPT;
1324
1325 /* overflow check */
1326 if (tmp.size >= INT_MAX / num_possible_cpus())
1327 return -ENOMEM;
1328 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1329 return -ENOMEM;
1330
1331 newinfo = xt_alloc_table_info(tmp.size);
1332 if (!newinfo)
1333 return -ENOMEM;
1334
1335 /* choose the copy that is our node/cpu */
1336 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1337 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1338 tmp.size) != 0) {
1339 ret = -EFAULT;
1340 goto free_newinfo;
1341 }
1342
1343 ret = translate_table(tmp.name, tmp.valid_hooks,
1344 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1345 tmp.hook_entry, tmp.underflow);
1346 if (ret != 0)
1347 goto free_newinfo;
1348
1349 duprintf("ip_tables: Translated table\n");
1350
1351 ret = __do_replace(tmp.name, tmp.valid_hooks,
1352 newinfo, tmp.num_counters,
1353 tmp.counters);
1354 if (ret)
1355 goto free_newinfo_untrans;
1356 return 0;
1357
1358 free_newinfo_untrans:
1359 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1360 free_newinfo:
1361 xt_free_table_info(newinfo);
1362 return ret;
1363 }
1364
1365 /* We're lazy, and add to the first CPU; overflow works its fey magic
1366 * and everything is OK. */
1367 static inline int
1368 add_counter_to_entry(struct ipt_entry *e,
1369 const struct xt_counters addme[],
1370 unsigned int *i)
1371 {
1372 #if 0
1373 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1374 *i,
1375 (long unsigned int)e->counters.pcnt,
1376 (long unsigned int)e->counters.bcnt,
1377 (long unsigned int)addme[*i].pcnt,
1378 (long unsigned int)addme[*i].bcnt);
1379 #endif
1380
1381 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1382
1383 (*i)++;
1384 return 0;
1385 }
1386
1387 static int
1388 do_add_counters(void __user *user, unsigned int len, int compat)
1389 {
1390 unsigned int i;
1391 struct xt_counters_info tmp;
1392 struct xt_counters *paddc;
1393 unsigned int num_counters;
1394 char *name;
1395 int size;
1396 void *ptmp;
1397 struct xt_table *t;
1398 struct xt_table_info *private;
1399 int ret = 0;
1400 void *loc_cpu_entry;
1401 #ifdef CONFIG_COMPAT
1402 struct compat_xt_counters_info compat_tmp;
1403
1404 if (compat) {
1405 ptmp = &compat_tmp;
1406 size = sizeof(struct compat_xt_counters_info);
1407 } else
1408 #endif
1409 {
1410 ptmp = &tmp;
1411 size = sizeof(struct xt_counters_info);
1412 }
1413
1414 if (copy_from_user(ptmp, user, size) != 0)
1415 return -EFAULT;
1416
1417 #ifdef CONFIG_COMPAT
1418 if (compat) {
1419 num_counters = compat_tmp.num_counters;
1420 name = compat_tmp.name;
1421 } else
1422 #endif
1423 {
1424 num_counters = tmp.num_counters;
1425 name = tmp.name;
1426 }
1427
1428 if (len != size + num_counters * sizeof(struct xt_counters))
1429 return -EINVAL;
1430
1431 paddc = vmalloc_node(len - size, numa_node_id());
1432 if (!paddc)
1433 return -ENOMEM;
1434
1435 if (copy_from_user(paddc, user + size, len - size) != 0) {
1436 ret = -EFAULT;
1437 goto free;
1438 }
1439
1440 t = xt_find_table_lock(AF_INET, name);
1441 if (!t || IS_ERR(t)) {
1442 ret = t ? PTR_ERR(t) : -ENOENT;
1443 goto free;
1444 }
1445
1446 write_lock_bh(&t->lock);
1447 private = t->private;
1448 if (private->number != num_counters) {
1449 ret = -EINVAL;
1450 goto unlock_up_free;
1451 }
1452
1453 i = 0;
1454 /* Choose the copy that is on our node */
1455 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1456 IPT_ENTRY_ITERATE(loc_cpu_entry,
1457 private->size,
1458 add_counter_to_entry,
1459 paddc,
1460 &i);
1461 unlock_up_free:
1462 write_unlock_bh(&t->lock);
1463 xt_table_unlock(t);
1464 module_put(t->me);
1465 free:
1466 vfree(paddc);
1467
1468 return ret;
1469 }
1470
1471 #ifdef CONFIG_COMPAT
1472 struct compat_ipt_replace {
1473 char name[IPT_TABLE_MAXNAMELEN];
1474 u32 valid_hooks;
1475 u32 num_entries;
1476 u32 size;
1477 u32 hook_entry[NF_INET_NUMHOOKS];
1478 u32 underflow[NF_INET_NUMHOOKS];
1479 u32 num_counters;
1480 compat_uptr_t counters; /* struct ipt_counters * */
1481 struct compat_ipt_entry entries[0];
1482 };
1483
1484 static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
1485 void __user **dstptr, compat_uint_t *size)
1486 {
1487 return xt_compat_match_to_user(m, dstptr, size);
1488 }
1489
1490 static int
1491 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1492 compat_uint_t *size, struct xt_counters *counters,
1493 unsigned int *i)
1494 {
1495 struct ipt_entry_target *t;
1496 struct compat_ipt_entry __user *ce;
1497 u_int16_t target_offset, next_offset;
1498 compat_uint_t origsize;
1499 int ret;
1500
1501 ret = -EFAULT;
1502 origsize = *size;
1503 ce = (struct compat_ipt_entry __user *)*dstptr;
1504 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
1505 goto out;
1506
1507 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1508 goto out;
1509
1510 *dstptr += sizeof(struct compat_ipt_entry);
1511 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1512 target_offset = e->target_offset - (origsize - *size);
1513 if (ret)
1514 goto out;
1515 t = ipt_get_target(e);
1516 ret = xt_compat_target_to_user(t, dstptr, size);
1517 if (ret)
1518 goto out;
1519 ret = -EFAULT;
1520 next_offset = e->next_offset - (origsize - *size);
1521 if (put_user(target_offset, &ce->target_offset))
1522 goto out;
1523 if (put_user(next_offset, &ce->next_offset))
1524 goto out;
1525
1526 (*i)++;
1527 return 0;
1528 out:
1529 return ret;
1530 }
1531
1532 static inline int
1533 compat_find_calc_match(struct ipt_entry_match *m,
1534 const char *name,
1535 const struct ipt_ip *ip,
1536 unsigned int hookmask,
1537 int *size, int *i)
1538 {
1539 struct xt_match *match;
1540
1541 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1542 m->u.user.revision),
1543 "ipt_%s", m->u.user.name);
1544 if (IS_ERR(match) || !match) {
1545 duprintf("compat_check_calc_match: `%s' not found\n",
1546 m->u.user.name);
1547 return match ? PTR_ERR(match) : -ENOENT;
1548 }
1549 m->u.kernel.match = match;
1550 *size += xt_compat_match_offset(match);
1551
1552 (*i)++;
1553 return 0;
1554 }
1555
1556 static inline int
1557 compat_release_match(struct ipt_entry_match *m, unsigned int *i)
1558 {
1559 if (i && (*i)-- == 0)
1560 return 1;
1561
1562 module_put(m->u.kernel.match->me);
1563 return 0;
1564 }
1565
1566 static inline int
1567 compat_release_entry(struct ipt_entry *e, unsigned int *i)
1568 {
1569 struct ipt_entry_target *t;
1570
1571 if (i && (*i)-- == 0)
1572 return 1;
1573
1574 /* Cleanup all matches */
1575 IPT_MATCH_ITERATE(e, compat_release_match, NULL);
1576 t = ipt_get_target(e);
1577 module_put(t->u.kernel.target->me);
1578 return 0;
1579 }
1580
1581 static inline int
1582 check_compat_entry_size_and_hooks(struct ipt_entry *e,
1583 struct xt_table_info *newinfo,
1584 unsigned int *size,
1585 unsigned char *base,
1586 unsigned char *limit,
1587 unsigned int *hook_entries,
1588 unsigned int *underflows,
1589 unsigned int *i,
1590 const char *name)
1591 {
1592 struct ipt_entry_target *t;
1593 struct xt_target *target;
1594 unsigned int entry_offset;
1595 int ret, off, h, j;
1596
1597 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1598 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1599 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1600 duprintf("Bad offset %p, limit = %p\n", e, limit);
1601 return -EINVAL;
1602 }
1603
1604 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1605 sizeof(struct compat_xt_entry_target)) {
1606 duprintf("checking: element %p size %u\n",
1607 e, e->next_offset);
1608 return -EINVAL;
1609 }
1610
1611 ret = check_entry(e, name);
1612 if (ret)
1613 return ret;
1614
1615 off = 0;
1616 entry_offset = (void *)e - (void *)base;
1617 j = 0;
1618 ret = IPT_MATCH_ITERATE(e, compat_find_calc_match, name, &e->ip,
1619 e->comefrom, &off, &j);
1620 if (ret != 0)
1621 goto release_matches;
1622
1623 t = ipt_get_target(e);
1624 target = try_then_request_module(xt_find_target(AF_INET,
1625 t->u.user.name,
1626 t->u.user.revision),
1627 "ipt_%s", t->u.user.name);
1628 if (IS_ERR(target) || !target) {
1629 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1630 t->u.user.name);
1631 ret = target ? PTR_ERR(target) : -ENOENT;
1632 goto release_matches;
1633 }
1634 t->u.kernel.target = target;
1635
1636 off += xt_compat_target_offset(target);
1637 *size += off;
1638 ret = compat_add_offset(entry_offset, off);
1639 if (ret)
1640 goto out;
1641
1642 /* Check hooks & underflows */
1643 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1644 if ((unsigned char *)e - base == hook_entries[h])
1645 newinfo->hook_entry[h] = hook_entries[h];
1646 if ((unsigned char *)e - base == underflows[h])
1647 newinfo->underflow[h] = underflows[h];
1648 }
1649
1650 /* Clear counters and comefrom */
1651 e->counters = ((struct ipt_counters) { 0, 0 });
1652 e->comefrom = 0;
1653
1654 (*i)++;
1655 return 0;
1656
1657 out:
1658 module_put(t->u.kernel.target->me);
1659 release_matches:
1660 IPT_MATCH_ITERATE(e, compat_release_match, &j);
1661 return ret;
1662 }
1663
1664 static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1665 void **dstptr, compat_uint_t *size, const char *name,
1666 const struct ipt_ip *ip, unsigned int hookmask)
1667 {
1668 xt_compat_match_from_user(m, dstptr, size);
1669 return 0;
1670 }
1671
1672 static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1673 unsigned int *size, const char *name,
1674 struct xt_table_info *newinfo, unsigned char *base)
1675 {
1676 struct ipt_entry_target *t;
1677 struct xt_target *target;
1678 struct ipt_entry *de;
1679 unsigned int origsize;
1680 int ret, h;
1681
1682 ret = 0;
1683 origsize = *size;
1684 de = (struct ipt_entry *)*dstptr;
1685 memcpy(de, e, sizeof(struct ipt_entry));
1686
1687 *dstptr += sizeof(struct compat_ipt_entry);
1688 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
1689 name, &de->ip, de->comefrom);
1690 if (ret)
1691 return ret;
1692 de->target_offset = e->target_offset - (origsize - *size);
1693 t = ipt_get_target(e);
1694 target = t->u.kernel.target;
1695 xt_compat_target_from_user(t, dstptr, size);
1696
1697 de->next_offset = e->next_offset - (origsize - *size);
1698 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1699 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1700 newinfo->hook_entry[h] -= origsize - *size;
1701 if ((unsigned char *)de - base < newinfo->underflow[h])
1702 newinfo->underflow[h] -= origsize - *size;
1703 }
1704 return ret;
1705 }
1706
1707 static inline int compat_check_entry(struct ipt_entry *e, const char *name,
1708 unsigned int *i)
1709 {
1710 int j, ret;
1711
1712 j = 0;
1713 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
1714 if (ret)
1715 goto cleanup_matches;
1716
1717 ret = check_target(e, name);
1718 if (ret)
1719 goto cleanup_matches;
1720
1721 (*i)++;
1722 return 0;
1723
1724 cleanup_matches:
1725 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1726 return ret;
1727 }
1728
1729 static int
1730 translate_compat_table(const char *name,
1731 unsigned int valid_hooks,
1732 struct xt_table_info **pinfo,
1733 void **pentry0,
1734 unsigned int total_size,
1735 unsigned int number,
1736 unsigned int *hook_entries,
1737 unsigned int *underflows)
1738 {
1739 unsigned int i, j;
1740 struct xt_table_info *newinfo, *info;
1741 void *pos, *entry0, *entry1;
1742 unsigned int size;
1743 int ret;
1744
1745 info = *pinfo;
1746 entry0 = *pentry0;
1747 size = total_size;
1748 info->number = number;
1749
1750 /* Init all hooks to impossible value. */
1751 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1752 info->hook_entry[i] = 0xFFFFFFFF;
1753 info->underflow[i] = 0xFFFFFFFF;
1754 }
1755
1756 duprintf("translate_compat_table: size %u\n", info->size);
1757 j = 0;
1758 xt_compat_lock(AF_INET);
1759 /* Walk through entries, checking offsets. */
1760 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1761 check_compat_entry_size_and_hooks,
1762 info, &size, entry0,
1763 entry0 + total_size,
1764 hook_entries, underflows, &j, name);
1765 if (ret != 0)
1766 goto out_unlock;
1767
1768 ret = -EINVAL;
1769 if (j != number) {
1770 duprintf("translate_compat_table: %u not %u entries\n",
1771 j, number);
1772 goto out_unlock;
1773 }
1774
1775 /* Check hooks all assigned */
1776 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1777 /* Only hooks which are valid */
1778 if (!(valid_hooks & (1 << i)))
1779 continue;
1780 if (info->hook_entry[i] == 0xFFFFFFFF) {
1781 duprintf("Invalid hook entry %u %u\n",
1782 i, hook_entries[i]);
1783 goto out_unlock;
1784 }
1785 if (info->underflow[i] == 0xFFFFFFFF) {
1786 duprintf("Invalid underflow %u %u\n",
1787 i, underflows[i]);
1788 goto out_unlock;
1789 }
1790 }
1791
1792 ret = -ENOMEM;
1793 newinfo = xt_alloc_table_info(size);
1794 if (!newinfo)
1795 goto out_unlock;
1796
1797 newinfo->number = number;
1798 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1799 newinfo->hook_entry[i] = info->hook_entry[i];
1800 newinfo->underflow[i] = info->underflow[i];
1801 }
1802 entry1 = newinfo->entries[raw_smp_processor_id()];
1803 pos = entry1;
1804 size = total_size;
1805 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1806 compat_copy_entry_from_user, &pos, &size,
1807 name, newinfo, entry1);
1808 compat_flush_offsets();
1809 xt_compat_unlock(AF_INET);
1810 if (ret)
1811 goto free_newinfo;
1812
1813 ret = -ELOOP;
1814 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1815 goto free_newinfo;
1816
1817 i = 0;
1818 ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1819 name, &i);
1820 if (ret) {
1821 j -= i;
1822 IPT_ENTRY_ITERATE_CONTINUE(entry1, newinfo->size, i,
1823 compat_release_entry, &j);
1824 IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
1825 xt_free_table_info(newinfo);
1826 return ret;
1827 }
1828
1829 /* And one copy for every other CPU */
1830 for_each_possible_cpu(i)
1831 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1832 memcpy(newinfo->entries[i], entry1, newinfo->size);
1833
1834 *pinfo = newinfo;
1835 *pentry0 = entry1;
1836 xt_free_table_info(info);
1837 return 0;
1838
1839 free_newinfo:
1840 xt_free_table_info(newinfo);
1841 out:
1842 IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1843 return ret;
1844 out_unlock:
1845 compat_flush_offsets();
1846 xt_compat_unlock(AF_INET);
1847 goto out;
1848 }
1849
1850 static int
1851 compat_do_replace(void __user *user, unsigned int len)
1852 {
1853 int ret;
1854 struct compat_ipt_replace tmp;
1855 struct xt_table_info *newinfo;
1856 void *loc_cpu_entry;
1857
1858 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1859 return -EFAULT;
1860
1861 /* Hack: Causes ipchains to give correct error msg --RR */
1862 if (len != sizeof(tmp) + tmp.size)
1863 return -ENOPROTOOPT;
1864
1865 /* overflow check */
1866 if (tmp.size >= INT_MAX / num_possible_cpus())
1867 return -ENOMEM;
1868 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1869 return -ENOMEM;
1870
1871 newinfo = xt_alloc_table_info(tmp.size);
1872 if (!newinfo)
1873 return -ENOMEM;
1874
1875 /* choose the copy that is our node/cpu */
1876 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1877 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1878 tmp.size) != 0) {
1879 ret = -EFAULT;
1880 goto free_newinfo;
1881 }
1882
1883 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1884 &newinfo, &loc_cpu_entry, tmp.size,
1885 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1886 if (ret != 0)
1887 goto free_newinfo;
1888
1889 duprintf("compat_do_replace: Translated table\n");
1890
1891 ret = __do_replace(tmp.name, tmp.valid_hooks,
1892 newinfo, tmp.num_counters,
1893 compat_ptr(tmp.counters));
1894 if (ret)
1895 goto free_newinfo_untrans;
1896 return 0;
1897
1898 free_newinfo_untrans:
1899 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1900 free_newinfo:
1901 xt_free_table_info(newinfo);
1902 return ret;
1903 }
1904
1905 static int
1906 compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1907 unsigned int len)
1908 {
1909 int ret;
1910
1911 if (!capable(CAP_NET_ADMIN))
1912 return -EPERM;
1913
1914 switch (cmd) {
1915 case IPT_SO_SET_REPLACE:
1916 ret = compat_do_replace(user, len);
1917 break;
1918
1919 case IPT_SO_SET_ADD_COUNTERS:
1920 ret = do_add_counters(user, len, 1);
1921 break;
1922
1923 default:
1924 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1925 ret = -EINVAL;
1926 }
1927
1928 return ret;
1929 }
1930
1931 struct compat_ipt_get_entries
1932 {
1933 char name[IPT_TABLE_MAXNAMELEN];
1934 compat_uint_t size;
1935 struct compat_ipt_entry entrytable[0];
1936 };
1937
1938 static int compat_copy_entries_to_user(unsigned int total_size,
1939 struct xt_table *table, void __user *userptr)
1940 {
1941 struct xt_counters *counters;
1942 struct xt_table_info *private = table->private;
1943 void __user *pos;
1944 unsigned int size;
1945 int ret = 0;
1946 void *loc_cpu_entry;
1947 unsigned int i = 0;
1948
1949 counters = alloc_counters(table);
1950 if (IS_ERR(counters))
1951 return PTR_ERR(counters);
1952
1953 /* choose the copy that is on our node/cpu, ...
1954 * This choice is lazy (because current thread is
1955 * allowed to migrate to another cpu)
1956 */
1957 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1958 pos = userptr;
1959 size = total_size;
1960 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1961 compat_copy_entry_to_user,
1962 &pos, &size, counters, &i);
1963
1964 vfree(counters);
1965 return ret;
1966 }
1967
1968 static int
1969 compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1970 {
1971 int ret;
1972 struct compat_ipt_get_entries get;
1973 struct xt_table *t;
1974
1975
1976 if (*len < sizeof(get)) {
1977 duprintf("compat_get_entries: %u < %u\n",
1978 *len, (unsigned int)sizeof(get));
1979 return -EINVAL;
1980 }
1981
1982 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1983 return -EFAULT;
1984
1985 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1986 duprintf("compat_get_entries: %u != %u\n", *len,
1987 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1988 get.size));
1989 return -EINVAL;
1990 }
1991
1992 xt_compat_lock(AF_INET);
1993 t = xt_find_table_lock(AF_INET, get.name);
1994 if (t && !IS_ERR(t)) {
1995 struct xt_table_info *private = t->private;
1996 struct xt_table_info info;
1997 duprintf("t->private->number = %u\n",
1998 private->number);
1999 ret = compat_table_info(private, &info);
2000 if (!ret && get.size == info.size) {
2001 ret = compat_copy_entries_to_user(private->size,
2002 t, uptr->entrytable);
2003 } else if (!ret) {
2004 duprintf("compat_get_entries: I've got %u not %u!\n",
2005 private->size,
2006 get.size);
2007 ret = -EINVAL;
2008 }
2009 compat_flush_offsets();
2010 module_put(t->me);
2011 xt_table_unlock(t);
2012 } else
2013 ret = t ? PTR_ERR(t) : -ENOENT;
2014
2015 xt_compat_unlock(AF_INET);
2016 return ret;
2017 }
2018
2019 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
2020
2021 static int
2022 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2023 {
2024 int ret;
2025
2026 if (!capable(CAP_NET_ADMIN))
2027 return -EPERM;
2028
2029 switch (cmd) {
2030 case IPT_SO_GET_INFO:
2031 ret = get_info(user, len, 1);
2032 break;
2033 case IPT_SO_GET_ENTRIES:
2034 ret = compat_get_entries(user, len);
2035 break;
2036 default:
2037 ret = do_ipt_get_ctl(sk, cmd, user, len);
2038 }
2039 return ret;
2040 }
2041 #endif
2042
2043 static int
2044 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2045 {
2046 int ret;
2047
2048 if (!capable(CAP_NET_ADMIN))
2049 return -EPERM;
2050
2051 switch (cmd) {
2052 case IPT_SO_SET_REPLACE:
2053 ret = do_replace(user, len);
2054 break;
2055
2056 case IPT_SO_SET_ADD_COUNTERS:
2057 ret = do_add_counters(user, len, 0);
2058 break;
2059
2060 default:
2061 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
2062 ret = -EINVAL;
2063 }
2064
2065 return ret;
2066 }
2067
2068 static int
2069 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2070 {
2071 int ret;
2072
2073 if (!capable(CAP_NET_ADMIN))
2074 return -EPERM;
2075
2076 switch (cmd) {
2077 case IPT_SO_GET_INFO:
2078 ret = get_info(user, len, 0);
2079 break;
2080
2081 case IPT_SO_GET_ENTRIES:
2082 ret = get_entries(user, len);
2083 break;
2084
2085 case IPT_SO_GET_REVISION_MATCH:
2086 case IPT_SO_GET_REVISION_TARGET: {
2087 struct ipt_get_revision rev;
2088 int target;
2089
2090 if (*len != sizeof(rev)) {
2091 ret = -EINVAL;
2092 break;
2093 }
2094 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2095 ret = -EFAULT;
2096 break;
2097 }
2098
2099 if (cmd == IPT_SO_GET_REVISION_TARGET)
2100 target = 1;
2101 else
2102 target = 0;
2103
2104 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2105 rev.revision,
2106 target, &ret),
2107 "ipt_%s", rev.name);
2108 break;
2109 }
2110
2111 default:
2112 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2113 ret = -EINVAL;
2114 }
2115
2116 return ret;
2117 }
2118
2119 int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
2120 {
2121 int ret;
2122 struct xt_table_info *newinfo;
2123 struct xt_table_info bootstrap
2124 = { 0, 0, 0, { 0 }, { 0 }, { } };
2125 void *loc_cpu_entry;
2126
2127 newinfo = xt_alloc_table_info(repl->size);
2128 if (!newinfo)
2129 return -ENOMEM;
2130
2131 /* choose the copy on our node/cpu
2132 * but dont care of preemption
2133 */
2134 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2135 memcpy(loc_cpu_entry, repl->entries, repl->size);
2136
2137 ret = translate_table(table->name, table->valid_hooks,
2138 newinfo, loc_cpu_entry, repl->size,
2139 repl->num_entries,
2140 repl->hook_entry,
2141 repl->underflow);
2142 if (ret != 0) {
2143 xt_free_table_info(newinfo);
2144 return ret;
2145 }
2146
2147 ret = xt_register_table(table, &bootstrap, newinfo);
2148 if (ret != 0) {
2149 xt_free_table_info(newinfo);
2150 return ret;
2151 }
2152
2153 return 0;
2154 }
2155
2156 void ipt_unregister_table(struct xt_table *table)
2157 {
2158 struct xt_table_info *private;
2159 void *loc_cpu_entry;
2160
2161 private = xt_unregister_table(table);
2162
2163 /* Decrease module usage counts and free resources */
2164 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2165 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2166 xt_free_table_info(private);
2167 }
2168
2169 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2170 static inline bool
2171 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2172 u_int8_t type, u_int8_t code,
2173 bool invert)
2174 {
2175 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2176 ^ invert;
2177 }
2178
2179 static bool
2180 icmp_match(const struct sk_buff *skb,
2181 const struct net_device *in,
2182 const struct net_device *out,
2183 const struct xt_match *match,
2184 const void *matchinfo,
2185 int offset,
2186 unsigned int protoff,
2187 bool *hotdrop)
2188 {
2189 struct icmphdr _icmph, *ic;
2190 const struct ipt_icmp *icmpinfo = matchinfo;
2191
2192 /* Must not be a fragment. */
2193 if (offset)
2194 return false;
2195
2196 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
2197 if (ic == NULL) {
2198 /* We've been asked to examine this packet, and we
2199 * can't. Hence, no choice but to drop.
2200 */
2201 duprintf("Dropping evil ICMP tinygram.\n");
2202 *hotdrop = true;
2203 return false;
2204 }
2205
2206 return icmp_type_code_match(icmpinfo->type,
2207 icmpinfo->code[0],
2208 icmpinfo->code[1],
2209 ic->type, ic->code,
2210 !!(icmpinfo->invflags&IPT_ICMP_INV));
2211 }
2212
2213 /* Called when user tries to insert an entry of this type. */
2214 static bool
2215 icmp_checkentry(const char *tablename,
2216 const void *info,
2217 const struct xt_match *match,
2218 void *matchinfo,
2219 unsigned int hook_mask)
2220 {
2221 const struct ipt_icmp *icmpinfo = matchinfo;
2222
2223 /* Must specify no unknown invflags */
2224 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
2225 }
2226
2227 /* The built-in targets: standard (NULL) and error. */
2228 static struct xt_target ipt_standard_target __read_mostly = {
2229 .name = IPT_STANDARD_TARGET,
2230 .targetsize = sizeof(int),
2231 .family = AF_INET,
2232 #ifdef CONFIG_COMPAT
2233 .compatsize = sizeof(compat_int_t),
2234 .compat_from_user = compat_standard_from_user,
2235 .compat_to_user = compat_standard_to_user,
2236 #endif
2237 };
2238
2239 static struct xt_target ipt_error_target __read_mostly = {
2240 .name = IPT_ERROR_TARGET,
2241 .target = ipt_error,
2242 .targetsize = IPT_FUNCTION_MAXNAMELEN,
2243 .family = AF_INET,
2244 };
2245
2246 static struct nf_sockopt_ops ipt_sockopts = {
2247 .pf = PF_INET,
2248 .set_optmin = IPT_BASE_CTL,
2249 .set_optmax = IPT_SO_SET_MAX+1,
2250 .set = do_ipt_set_ctl,
2251 #ifdef CONFIG_COMPAT
2252 .compat_set = compat_do_ipt_set_ctl,
2253 #endif
2254 .get_optmin = IPT_BASE_CTL,
2255 .get_optmax = IPT_SO_GET_MAX+1,
2256 .get = do_ipt_get_ctl,
2257 #ifdef CONFIG_COMPAT
2258 .compat_get = compat_do_ipt_get_ctl,
2259 #endif
2260 .owner = THIS_MODULE,
2261 };
2262
2263 static struct xt_match icmp_matchstruct __read_mostly = {
2264 .name = "icmp",
2265 .match = icmp_match,
2266 .matchsize = sizeof(struct ipt_icmp),
2267 .proto = IPPROTO_ICMP,
2268 .family = AF_INET,
2269 .checkentry = icmp_checkentry,
2270 };
2271
2272 static int __init ip_tables_init(void)
2273 {
2274 int ret;
2275
2276 ret = xt_proto_init(AF_INET);
2277 if (ret < 0)
2278 goto err1;
2279
2280 /* Noone else will be downing sem now, so we won't sleep */
2281 ret = xt_register_target(&ipt_standard_target);
2282 if (ret < 0)
2283 goto err2;
2284 ret = xt_register_target(&ipt_error_target);
2285 if (ret < 0)
2286 goto err3;
2287 ret = xt_register_match(&icmp_matchstruct);
2288 if (ret < 0)
2289 goto err4;
2290
2291 /* Register setsockopt */
2292 ret = nf_register_sockopt(&ipt_sockopts);
2293 if (ret < 0)
2294 goto err5;
2295
2296 printk(KERN_INFO "ip_tables: (C) 2000-2006 Netfilter Core Team\n");
2297 return 0;
2298
2299 err5:
2300 xt_unregister_match(&icmp_matchstruct);
2301 err4:
2302 xt_unregister_target(&ipt_error_target);
2303 err3:
2304 xt_unregister_target(&ipt_standard_target);
2305 err2:
2306 xt_proto_fini(AF_INET);
2307 err1:
2308 return ret;
2309 }
2310
2311 static void __exit ip_tables_fini(void)
2312 {
2313 nf_unregister_sockopt(&ipt_sockopts);
2314
2315 xt_unregister_match(&icmp_matchstruct);
2316 xt_unregister_target(&ipt_error_target);
2317 xt_unregister_target(&ipt_standard_target);
2318
2319 xt_proto_fini(AF_INET);
2320 }
2321
2322 EXPORT_SYMBOL(ipt_register_table);
2323 EXPORT_SYMBOL(ipt_unregister_table);
2324 EXPORT_SYMBOL(ipt_do_table);
2325 module_init(ip_tables_init);
2326 module_exit(ip_tables_fini);
This page took 0.080669 seconds and 5 git commands to generate.