Merge tag 'microblaze-4.7-rc1' of git://git.monstr.eu/linux-2.6-microblaze
[deliverable/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 #ifdef CONFIG_NETFILTER_DEBUG
43 #define IP_NF_ASSERT(x) WARN_ON(!(x))
44 #else
45 #define IP_NF_ASSERT(x)
46 #endif
47
48 void *ip6t_alloc_initial_table(const struct xt_table *info)
49 {
50 return xt_alloc_initial_table(ip6t, IP6T);
51 }
52 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
53
54 /*
55 We keep a set of rules for each CPU, so we can avoid write-locking
56 them in the softirq when updating the counters and therefore
57 only need to read-lock in the softirq; doing a write_lock_bh() in user
58 context stops packets coming through and allows user context to read
59 the counters or update the rules.
60
61 Hence the start of any table is given by get_table() below. */
62
63 /* Returns whether matches rule or not. */
64 /* Performance critical - called for every packet */
65 static inline bool
66 ip6_packet_match(const struct sk_buff *skb,
67 const char *indev,
68 const char *outdev,
69 const struct ip6t_ip6 *ip6info,
70 unsigned int *protoff,
71 int *fragoff, bool *hotdrop)
72 {
73 unsigned long ret;
74 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
75
76 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
77
78 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
79 &ip6info->src), IP6T_INV_SRCIP) ||
80 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
81 &ip6info->dst), IP6T_INV_DSTIP))
82 return false;
83
84 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
85
86 if (FWINV(ret != 0, IP6T_INV_VIA_IN))
87 return false;
88
89 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
90
91 if (FWINV(ret != 0, IP6T_INV_VIA_OUT))
92 return false;
93
94 /* ... might want to do something with class and flowlabel here ... */
95
96 /* look for the desired protocol header */
97 if (ip6info->flags & IP6T_F_PROTO) {
98 int protohdr;
99 unsigned short _frag_off;
100
101 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
102 if (protohdr < 0) {
103 if (_frag_off == 0)
104 *hotdrop = true;
105 return false;
106 }
107 *fragoff = _frag_off;
108
109 if (ip6info->proto == protohdr) {
110 if (ip6info->invflags & IP6T_INV_PROTO)
111 return false;
112
113 return true;
114 }
115
116 /* We need match for the '-p all', too! */
117 if ((ip6info->proto != 0) &&
118 !(ip6info->invflags & IP6T_INV_PROTO))
119 return false;
120 }
121 return true;
122 }
123
124 /* should be ip6 safe */
125 static bool
126 ip6_checkentry(const struct ip6t_ip6 *ipv6)
127 {
128 if (ipv6->flags & ~IP6T_F_MASK)
129 return false;
130 if (ipv6->invflags & ~IP6T_INV_MASK)
131 return false;
132
133 return true;
134 }
135
136 static unsigned int
137 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
138 {
139 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
140
141 return NF_DROP;
142 }
143
144 static inline struct ip6t_entry *
145 get_entry(const void *base, unsigned int offset)
146 {
147 return (struct ip6t_entry *)(base + offset);
148 }
149
150 /* All zeroes == unconditional rule. */
151 /* Mildly perf critical (only if packet tracing is on) */
152 static inline bool unconditional(const struct ip6t_entry *e)
153 {
154 static const struct ip6t_ip6 uncond;
155
156 return e->target_offset == sizeof(struct ip6t_entry) &&
157 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
158 }
159
160 static inline const struct xt_entry_target *
161 ip6t_get_target_c(const struct ip6t_entry *e)
162 {
163 return ip6t_get_target((struct ip6t_entry *)e);
164 }
165
166 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
167 /* This cries for unification! */
168 static const char *const hooknames[] = {
169 [NF_INET_PRE_ROUTING] = "PREROUTING",
170 [NF_INET_LOCAL_IN] = "INPUT",
171 [NF_INET_FORWARD] = "FORWARD",
172 [NF_INET_LOCAL_OUT] = "OUTPUT",
173 [NF_INET_POST_ROUTING] = "POSTROUTING",
174 };
175
176 enum nf_ip_trace_comments {
177 NF_IP6_TRACE_COMMENT_RULE,
178 NF_IP6_TRACE_COMMENT_RETURN,
179 NF_IP6_TRACE_COMMENT_POLICY,
180 };
181
182 static const char *const comments[] = {
183 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
184 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
185 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
186 };
187
188 static struct nf_loginfo trace_loginfo = {
189 .type = NF_LOG_TYPE_LOG,
190 .u = {
191 .log = {
192 .level = LOGLEVEL_WARNING,
193 .logflags = NF_LOG_MASK,
194 },
195 },
196 };
197
198 /* Mildly perf critical (only if packet tracing is on) */
199 static inline int
200 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
201 const char *hookname, const char **chainname,
202 const char **comment, unsigned int *rulenum)
203 {
204 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
205
206 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
207 /* Head of user chain: ERROR target with chainname */
208 *chainname = t->target.data;
209 (*rulenum) = 0;
210 } else if (s == e) {
211 (*rulenum)++;
212
213 if (unconditional(s) &&
214 strcmp(t->target.u.kernel.target->name,
215 XT_STANDARD_TARGET) == 0 &&
216 t->verdict < 0) {
217 /* Tail of chains: STANDARD target (return/policy) */
218 *comment = *chainname == hookname
219 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
220 : comments[NF_IP6_TRACE_COMMENT_RETURN];
221 }
222 return 1;
223 } else
224 (*rulenum)++;
225
226 return 0;
227 }
228
229 static void trace_packet(struct net *net,
230 const struct sk_buff *skb,
231 unsigned int hook,
232 const struct net_device *in,
233 const struct net_device *out,
234 const char *tablename,
235 const struct xt_table_info *private,
236 const struct ip6t_entry *e)
237 {
238 const struct ip6t_entry *root;
239 const char *hookname, *chainname, *comment;
240 const struct ip6t_entry *iter;
241 unsigned int rulenum = 0;
242
243 root = get_entry(private->entries, private->hook_entry[hook]);
244
245 hookname = chainname = hooknames[hook];
246 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
247
248 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
249 if (get_chainname_rulenum(iter, e, hookname,
250 &chainname, &comment, &rulenum) != 0)
251 break;
252
253 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
254 "TRACE: %s:%s:%s:%u ",
255 tablename, chainname, comment, rulenum);
256 }
257 #endif
258
259 static inline struct ip6t_entry *
260 ip6t_next_entry(const struct ip6t_entry *entry)
261 {
262 return (void *)entry + entry->next_offset;
263 }
264
265 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
266 unsigned int
267 ip6t_do_table(struct sk_buff *skb,
268 const struct nf_hook_state *state,
269 struct xt_table *table)
270 {
271 unsigned int hook = state->hook;
272 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
273 /* Initializing verdict to NF_DROP keeps gcc happy. */
274 unsigned int verdict = NF_DROP;
275 const char *indev, *outdev;
276 const void *table_base;
277 struct ip6t_entry *e, **jumpstack;
278 unsigned int stackidx, cpu;
279 const struct xt_table_info *private;
280 struct xt_action_param acpar;
281 unsigned int addend;
282
283 /* Initialization */
284 stackidx = 0;
285 indev = state->in ? state->in->name : nulldevname;
286 outdev = state->out ? state->out->name : nulldevname;
287 /* We handle fragments by dealing with the first fragment as
288 * if it was a normal packet. All other fragments are treated
289 * normally, except that they will NEVER match rules that ask
290 * things we don't know, ie. tcp syn flag or ports). If the
291 * rule is also a fragment-specific rule, non-fragments won't
292 * match it. */
293 acpar.hotdrop = false;
294 acpar.net = state->net;
295 acpar.in = state->in;
296 acpar.out = state->out;
297 acpar.family = NFPROTO_IPV6;
298 acpar.hooknum = hook;
299
300 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
301
302 local_bh_disable();
303 addend = xt_write_recseq_begin();
304 private = table->private;
305 /*
306 * Ensure we load private-> members after we've fetched the base
307 * pointer.
308 */
309 smp_read_barrier_depends();
310 cpu = smp_processor_id();
311 table_base = private->entries;
312 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
313
314 /* Switch to alternate jumpstack if we're being invoked via TEE.
315 * TEE issues XT_CONTINUE verdict on original skb so we must not
316 * clobber the jumpstack.
317 *
318 * For recursion via REJECT or SYNPROXY the stack will be clobbered
319 * but it is no problem since absolute verdict is issued by these.
320 */
321 if (static_key_false(&xt_tee_enabled))
322 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
323
324 e = get_entry(table_base, private->hook_entry[hook]);
325
326 do {
327 const struct xt_entry_target *t;
328 const struct xt_entry_match *ematch;
329 struct xt_counters *counter;
330
331 IP_NF_ASSERT(e);
332 acpar.thoff = 0;
333 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
334 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
335 no_match:
336 e = ip6t_next_entry(e);
337 continue;
338 }
339
340 xt_ematch_foreach(ematch, e) {
341 acpar.match = ematch->u.kernel.match;
342 acpar.matchinfo = ematch->data;
343 if (!acpar.match->match(skb, &acpar))
344 goto no_match;
345 }
346
347 counter = xt_get_this_cpu_counter(&e->counters);
348 ADD_COUNTER(*counter, skb->len, 1);
349
350 t = ip6t_get_target_c(e);
351 IP_NF_ASSERT(t->u.kernel.target);
352
353 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
354 /* The packet is traced: log it */
355 if (unlikely(skb->nf_trace))
356 trace_packet(state->net, skb, hook, state->in,
357 state->out, table->name, private, e);
358 #endif
359 /* Standard target? */
360 if (!t->u.kernel.target->target) {
361 int v;
362
363 v = ((struct xt_standard_target *)t)->verdict;
364 if (v < 0) {
365 /* Pop from stack? */
366 if (v != XT_RETURN) {
367 verdict = (unsigned int)(-v) - 1;
368 break;
369 }
370 if (stackidx == 0)
371 e = get_entry(table_base,
372 private->underflow[hook]);
373 else
374 e = ip6t_next_entry(jumpstack[--stackidx]);
375 continue;
376 }
377 if (table_base + v != ip6t_next_entry(e) &&
378 !(e->ipv6.flags & IP6T_F_GOTO)) {
379 jumpstack[stackidx++] = e;
380 }
381
382 e = get_entry(table_base, v);
383 continue;
384 }
385
386 acpar.target = t->u.kernel.target;
387 acpar.targinfo = t->data;
388
389 verdict = t->u.kernel.target->target(skb, &acpar);
390 if (verdict == XT_CONTINUE)
391 e = ip6t_next_entry(e);
392 else
393 /* Verdict */
394 break;
395 } while (!acpar.hotdrop);
396
397 xt_write_recseq_end(addend);
398 local_bh_enable();
399
400 if (acpar.hotdrop)
401 return NF_DROP;
402 else return verdict;
403 }
404
405 static bool find_jump_target(const struct xt_table_info *t,
406 const struct ip6t_entry *target)
407 {
408 struct ip6t_entry *iter;
409
410 xt_entry_foreach(iter, t->entries, t->size) {
411 if (iter == target)
412 return true;
413 }
414 return false;
415 }
416
417 /* Figures out from what hook each rule can be called: returns 0 if
418 there are loops. Puts hook bitmask in comefrom. */
419 static int
420 mark_source_chains(const struct xt_table_info *newinfo,
421 unsigned int valid_hooks, void *entry0)
422 {
423 unsigned int hook;
424
425 /* No recursion; use packet counter to save back ptrs (reset
426 to 0 as we leave), and comefrom to save source hook bitmask */
427 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
428 unsigned int pos = newinfo->hook_entry[hook];
429 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
430
431 if (!(valid_hooks & (1 << hook)))
432 continue;
433
434 /* Set initial back pointer. */
435 e->counters.pcnt = pos;
436
437 for (;;) {
438 const struct xt_standard_target *t
439 = (void *)ip6t_get_target_c(e);
440 int visited = e->comefrom & (1 << hook);
441
442 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
443 return 0;
444
445 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
446
447 /* Unconditional return/END. */
448 if ((unconditional(e) &&
449 (strcmp(t->target.u.user.name,
450 XT_STANDARD_TARGET) == 0) &&
451 t->verdict < 0) || visited) {
452 unsigned int oldpos, size;
453
454 if ((strcmp(t->target.u.user.name,
455 XT_STANDARD_TARGET) == 0) &&
456 t->verdict < -NF_MAX_VERDICT - 1)
457 return 0;
458
459 /* Return: backtrack through the last
460 big jump. */
461 do {
462 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
463 oldpos = pos;
464 pos = e->counters.pcnt;
465 e->counters.pcnt = 0;
466
467 /* We're at the start. */
468 if (pos == oldpos)
469 goto next;
470
471 e = (struct ip6t_entry *)
472 (entry0 + pos);
473 } while (oldpos == pos + e->next_offset);
474
475 /* Move along one */
476 size = e->next_offset;
477 e = (struct ip6t_entry *)
478 (entry0 + pos + size);
479 if (pos + size >= newinfo->size)
480 return 0;
481 e->counters.pcnt = pos;
482 pos += size;
483 } else {
484 int newpos = t->verdict;
485
486 if (strcmp(t->target.u.user.name,
487 XT_STANDARD_TARGET) == 0 &&
488 newpos >= 0) {
489 /* This a jump; chase it. */
490 e = (struct ip6t_entry *)
491 (entry0 + newpos);
492 if (!find_jump_target(newinfo, e))
493 return 0;
494 } else {
495 /* ... this is a fallthru */
496 newpos = pos + e->next_offset;
497 if (newpos >= newinfo->size)
498 return 0;
499 }
500 e = (struct ip6t_entry *)
501 (entry0 + newpos);
502 e->counters.pcnt = pos;
503 pos = newpos;
504 }
505 }
506 next: ;
507 }
508 return 1;
509 }
510
511 static void cleanup_match(struct xt_entry_match *m, struct net *net)
512 {
513 struct xt_mtdtor_param par;
514
515 par.net = net;
516 par.match = m->u.kernel.match;
517 par.matchinfo = m->data;
518 par.family = NFPROTO_IPV6;
519 if (par.match->destroy != NULL)
520 par.match->destroy(&par);
521 module_put(par.match->me);
522 }
523
524 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
525 {
526 const struct ip6t_ip6 *ipv6 = par->entryinfo;
527
528 par->match = m->u.kernel.match;
529 par->matchinfo = m->data;
530
531 return xt_check_match(par, m->u.match_size - sizeof(*m),
532 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
533 }
534
535 static int
536 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
537 {
538 struct xt_match *match;
539 int ret;
540
541 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
542 m->u.user.revision);
543 if (IS_ERR(match))
544 return PTR_ERR(match);
545
546 m->u.kernel.match = match;
547
548 ret = check_match(m, par);
549 if (ret)
550 goto err;
551
552 return 0;
553 err:
554 module_put(m->u.kernel.match->me);
555 return ret;
556 }
557
558 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
559 {
560 struct xt_entry_target *t = ip6t_get_target(e);
561 struct xt_tgchk_param par = {
562 .net = net,
563 .table = name,
564 .entryinfo = e,
565 .target = t->u.kernel.target,
566 .targinfo = t->data,
567 .hook_mask = e->comefrom,
568 .family = NFPROTO_IPV6,
569 };
570
571 t = ip6t_get_target(e);
572 return xt_check_target(&par, t->u.target_size - sizeof(*t),
573 e->ipv6.proto,
574 e->ipv6.invflags & IP6T_INV_PROTO);
575 }
576
577 static int
578 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
579 unsigned int size)
580 {
581 struct xt_entry_target *t;
582 struct xt_target *target;
583 int ret;
584 unsigned int j;
585 struct xt_mtchk_param mtpar;
586 struct xt_entry_match *ematch;
587 unsigned long pcnt;
588
589 pcnt = xt_percpu_counter_alloc();
590 if (IS_ERR_VALUE(pcnt))
591 return -ENOMEM;
592 e->counters.pcnt = pcnt;
593
594 j = 0;
595 mtpar.net = net;
596 mtpar.table = name;
597 mtpar.entryinfo = &e->ipv6;
598 mtpar.hook_mask = e->comefrom;
599 mtpar.family = NFPROTO_IPV6;
600 xt_ematch_foreach(ematch, e) {
601 ret = find_check_match(ematch, &mtpar);
602 if (ret != 0)
603 goto cleanup_matches;
604 ++j;
605 }
606
607 t = ip6t_get_target(e);
608 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
609 t->u.user.revision);
610 if (IS_ERR(target)) {
611 ret = PTR_ERR(target);
612 goto cleanup_matches;
613 }
614 t->u.kernel.target = target;
615
616 ret = check_target(e, net, name);
617 if (ret)
618 goto err;
619 return 0;
620 err:
621 module_put(t->u.kernel.target->me);
622 cleanup_matches:
623 xt_ematch_foreach(ematch, e) {
624 if (j-- == 0)
625 break;
626 cleanup_match(ematch, net);
627 }
628
629 xt_percpu_counter_free(e->counters.pcnt);
630
631 return ret;
632 }
633
634 static bool check_underflow(const struct ip6t_entry *e)
635 {
636 const struct xt_entry_target *t;
637 unsigned int verdict;
638
639 if (!unconditional(e))
640 return false;
641 t = ip6t_get_target_c(e);
642 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
643 return false;
644 verdict = ((struct xt_standard_target *)t)->verdict;
645 verdict = -verdict - 1;
646 return verdict == NF_DROP || verdict == NF_ACCEPT;
647 }
648
649 static int
650 check_entry_size_and_hooks(struct ip6t_entry *e,
651 struct xt_table_info *newinfo,
652 const unsigned char *base,
653 const unsigned char *limit,
654 const unsigned int *hook_entries,
655 const unsigned int *underflows,
656 unsigned int valid_hooks)
657 {
658 unsigned int h;
659 int err;
660
661 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
662 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
663 (unsigned char *)e + e->next_offset > limit)
664 return -EINVAL;
665
666 if (e->next_offset
667 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
668 return -EINVAL;
669
670 if (!ip6_checkentry(&e->ipv6))
671 return -EINVAL;
672
673 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
674 e->next_offset);
675 if (err)
676 return err;
677
678 /* Check hooks & underflows */
679 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
680 if (!(valid_hooks & (1 << h)))
681 continue;
682 if ((unsigned char *)e - base == hook_entries[h])
683 newinfo->hook_entry[h] = hook_entries[h];
684 if ((unsigned char *)e - base == underflows[h]) {
685 if (!check_underflow(e))
686 return -EINVAL;
687
688 newinfo->underflow[h] = underflows[h];
689 }
690 }
691
692 /* Clear counters and comefrom */
693 e->counters = ((struct xt_counters) { 0, 0 });
694 e->comefrom = 0;
695 return 0;
696 }
697
698 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
699 {
700 struct xt_tgdtor_param par;
701 struct xt_entry_target *t;
702 struct xt_entry_match *ematch;
703
704 /* Cleanup all matches */
705 xt_ematch_foreach(ematch, e)
706 cleanup_match(ematch, net);
707 t = ip6t_get_target(e);
708
709 par.net = net;
710 par.target = t->u.kernel.target;
711 par.targinfo = t->data;
712 par.family = NFPROTO_IPV6;
713 if (par.target->destroy != NULL)
714 par.target->destroy(&par);
715 module_put(par.target->me);
716
717 xt_percpu_counter_free(e->counters.pcnt);
718 }
719
720 /* Checks and translates the user-supplied table segment (held in
721 newinfo) */
722 static int
723 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
724 const struct ip6t_replace *repl)
725 {
726 struct ip6t_entry *iter;
727 unsigned int i;
728 int ret = 0;
729
730 newinfo->size = repl->size;
731 newinfo->number = repl->num_entries;
732
733 /* Init all hooks to impossible value. */
734 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
735 newinfo->hook_entry[i] = 0xFFFFFFFF;
736 newinfo->underflow[i] = 0xFFFFFFFF;
737 }
738
739 i = 0;
740 /* Walk through entries, checking offsets. */
741 xt_entry_foreach(iter, entry0, newinfo->size) {
742 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
743 entry0 + repl->size,
744 repl->hook_entry,
745 repl->underflow,
746 repl->valid_hooks);
747 if (ret != 0)
748 return ret;
749 ++i;
750 if (strcmp(ip6t_get_target(iter)->u.user.name,
751 XT_ERROR_TARGET) == 0)
752 ++newinfo->stacksize;
753 }
754
755 if (i != repl->num_entries)
756 return -EINVAL;
757
758 /* Check hooks all assigned */
759 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
760 /* Only hooks which are valid */
761 if (!(repl->valid_hooks & (1 << i)))
762 continue;
763 if (newinfo->hook_entry[i] == 0xFFFFFFFF)
764 return -EINVAL;
765 if (newinfo->underflow[i] == 0xFFFFFFFF)
766 return -EINVAL;
767 }
768
769 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
770 return -ELOOP;
771
772 /* Finally, each sanity check must pass */
773 i = 0;
774 xt_entry_foreach(iter, entry0, newinfo->size) {
775 ret = find_check_entry(iter, net, repl->name, repl->size);
776 if (ret != 0)
777 break;
778 ++i;
779 }
780
781 if (ret != 0) {
782 xt_entry_foreach(iter, entry0, newinfo->size) {
783 if (i-- == 0)
784 break;
785 cleanup_entry(iter, net);
786 }
787 return ret;
788 }
789
790 return ret;
791 }
792
793 static void
794 get_counters(const struct xt_table_info *t,
795 struct xt_counters counters[])
796 {
797 struct ip6t_entry *iter;
798 unsigned int cpu;
799 unsigned int i;
800
801 for_each_possible_cpu(cpu) {
802 seqcount_t *s = &per_cpu(xt_recseq, cpu);
803
804 i = 0;
805 xt_entry_foreach(iter, t->entries, t->size) {
806 struct xt_counters *tmp;
807 u64 bcnt, pcnt;
808 unsigned int start;
809
810 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
811 do {
812 start = read_seqcount_begin(s);
813 bcnt = tmp->bcnt;
814 pcnt = tmp->pcnt;
815 } while (read_seqcount_retry(s, start));
816
817 ADD_COUNTER(counters[i], bcnt, pcnt);
818 ++i;
819 }
820 }
821 }
822
823 static struct xt_counters *alloc_counters(const struct xt_table *table)
824 {
825 unsigned int countersize;
826 struct xt_counters *counters;
827 const struct xt_table_info *private = table->private;
828
829 /* We need atomic snapshot of counters: rest doesn't change
830 (other than comefrom, which userspace doesn't care
831 about). */
832 countersize = sizeof(struct xt_counters) * private->number;
833 counters = vzalloc(countersize);
834
835 if (counters == NULL)
836 return ERR_PTR(-ENOMEM);
837
838 get_counters(private, counters);
839
840 return counters;
841 }
842
843 static int
844 copy_entries_to_user(unsigned int total_size,
845 const struct xt_table *table,
846 void __user *userptr)
847 {
848 unsigned int off, num;
849 const struct ip6t_entry *e;
850 struct xt_counters *counters;
851 const struct xt_table_info *private = table->private;
852 int ret = 0;
853 const void *loc_cpu_entry;
854
855 counters = alloc_counters(table);
856 if (IS_ERR(counters))
857 return PTR_ERR(counters);
858
859 loc_cpu_entry = private->entries;
860 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
861 ret = -EFAULT;
862 goto free_counters;
863 }
864
865 /* FIXME: use iterator macros --RR */
866 /* ... then go back and fix counters and names */
867 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
868 unsigned int i;
869 const struct xt_entry_match *m;
870 const struct xt_entry_target *t;
871
872 e = (struct ip6t_entry *)(loc_cpu_entry + off);
873 if (copy_to_user(userptr + off
874 + offsetof(struct ip6t_entry, counters),
875 &counters[num],
876 sizeof(counters[num])) != 0) {
877 ret = -EFAULT;
878 goto free_counters;
879 }
880
881 for (i = sizeof(struct ip6t_entry);
882 i < e->target_offset;
883 i += m->u.match_size) {
884 m = (void *)e + i;
885
886 if (copy_to_user(userptr + off + i
887 + offsetof(struct xt_entry_match,
888 u.user.name),
889 m->u.kernel.match->name,
890 strlen(m->u.kernel.match->name)+1)
891 != 0) {
892 ret = -EFAULT;
893 goto free_counters;
894 }
895 }
896
897 t = ip6t_get_target_c(e);
898 if (copy_to_user(userptr + off + e->target_offset
899 + offsetof(struct xt_entry_target,
900 u.user.name),
901 t->u.kernel.target->name,
902 strlen(t->u.kernel.target->name)+1) != 0) {
903 ret = -EFAULT;
904 goto free_counters;
905 }
906 }
907
908 free_counters:
909 vfree(counters);
910 return ret;
911 }
912
913 #ifdef CONFIG_COMPAT
914 static void compat_standard_from_user(void *dst, const void *src)
915 {
916 int v = *(compat_int_t *)src;
917
918 if (v > 0)
919 v += xt_compat_calc_jump(AF_INET6, v);
920 memcpy(dst, &v, sizeof(v));
921 }
922
923 static int compat_standard_to_user(void __user *dst, const void *src)
924 {
925 compat_int_t cv = *(int *)src;
926
927 if (cv > 0)
928 cv -= xt_compat_calc_jump(AF_INET6, cv);
929 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
930 }
931
932 static int compat_calc_entry(const struct ip6t_entry *e,
933 const struct xt_table_info *info,
934 const void *base, struct xt_table_info *newinfo)
935 {
936 const struct xt_entry_match *ematch;
937 const struct xt_entry_target *t;
938 unsigned int entry_offset;
939 int off, i, ret;
940
941 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
942 entry_offset = (void *)e - base;
943 xt_ematch_foreach(ematch, e)
944 off += xt_compat_match_offset(ematch->u.kernel.match);
945 t = ip6t_get_target_c(e);
946 off += xt_compat_target_offset(t->u.kernel.target);
947 newinfo->size -= off;
948 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
949 if (ret)
950 return ret;
951
952 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
953 if (info->hook_entry[i] &&
954 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
955 newinfo->hook_entry[i] -= off;
956 if (info->underflow[i] &&
957 (e < (struct ip6t_entry *)(base + info->underflow[i])))
958 newinfo->underflow[i] -= off;
959 }
960 return 0;
961 }
962
963 static int compat_table_info(const struct xt_table_info *info,
964 struct xt_table_info *newinfo)
965 {
966 struct ip6t_entry *iter;
967 const void *loc_cpu_entry;
968 int ret;
969
970 if (!newinfo || !info)
971 return -EINVAL;
972
973 /* we dont care about newinfo->entries */
974 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
975 newinfo->initial_entries = 0;
976 loc_cpu_entry = info->entries;
977 xt_compat_init_offsets(AF_INET6, info->number);
978 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
979 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
980 if (ret != 0)
981 return ret;
982 }
983 return 0;
984 }
985 #endif
986
987 static int get_info(struct net *net, void __user *user,
988 const int *len, int compat)
989 {
990 char name[XT_TABLE_MAXNAMELEN];
991 struct xt_table *t;
992 int ret;
993
994 if (*len != sizeof(struct ip6t_getinfo))
995 return -EINVAL;
996
997 if (copy_from_user(name, user, sizeof(name)) != 0)
998 return -EFAULT;
999
1000 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1001 #ifdef CONFIG_COMPAT
1002 if (compat)
1003 xt_compat_lock(AF_INET6);
1004 #endif
1005 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1006 "ip6table_%s", name);
1007 if (!IS_ERR_OR_NULL(t)) {
1008 struct ip6t_getinfo info;
1009 const struct xt_table_info *private = t->private;
1010 #ifdef CONFIG_COMPAT
1011 struct xt_table_info tmp;
1012
1013 if (compat) {
1014 ret = compat_table_info(private, &tmp);
1015 xt_compat_flush_offsets(AF_INET6);
1016 private = &tmp;
1017 }
1018 #endif
1019 memset(&info, 0, sizeof(info));
1020 info.valid_hooks = t->valid_hooks;
1021 memcpy(info.hook_entry, private->hook_entry,
1022 sizeof(info.hook_entry));
1023 memcpy(info.underflow, private->underflow,
1024 sizeof(info.underflow));
1025 info.num_entries = private->number;
1026 info.size = private->size;
1027 strcpy(info.name, name);
1028
1029 if (copy_to_user(user, &info, *len) != 0)
1030 ret = -EFAULT;
1031 else
1032 ret = 0;
1033
1034 xt_table_unlock(t);
1035 module_put(t->me);
1036 } else
1037 ret = t ? PTR_ERR(t) : -ENOENT;
1038 #ifdef CONFIG_COMPAT
1039 if (compat)
1040 xt_compat_unlock(AF_INET6);
1041 #endif
1042 return ret;
1043 }
1044
1045 static int
1046 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1047 const int *len)
1048 {
1049 int ret;
1050 struct ip6t_get_entries get;
1051 struct xt_table *t;
1052
1053 if (*len < sizeof(get))
1054 return -EINVAL;
1055 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1056 return -EFAULT;
1057 if (*len != sizeof(struct ip6t_get_entries) + get.size)
1058 return -EINVAL;
1059
1060 get.name[sizeof(get.name) - 1] = '\0';
1061
1062 t = xt_find_table_lock(net, AF_INET6, get.name);
1063 if (!IS_ERR_OR_NULL(t)) {
1064 struct xt_table_info *private = t->private;
1065 if (get.size == private->size)
1066 ret = copy_entries_to_user(private->size,
1067 t, uptr->entrytable);
1068 else
1069 ret = -EAGAIN;
1070
1071 module_put(t->me);
1072 xt_table_unlock(t);
1073 } else
1074 ret = t ? PTR_ERR(t) : -ENOENT;
1075
1076 return ret;
1077 }
1078
1079 static int
1080 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1081 struct xt_table_info *newinfo, unsigned int num_counters,
1082 void __user *counters_ptr)
1083 {
1084 int ret;
1085 struct xt_table *t;
1086 struct xt_table_info *oldinfo;
1087 struct xt_counters *counters;
1088 struct ip6t_entry *iter;
1089
1090 ret = 0;
1091 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1092 if (!counters) {
1093 ret = -ENOMEM;
1094 goto out;
1095 }
1096
1097 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1098 "ip6table_%s", name);
1099 if (IS_ERR_OR_NULL(t)) {
1100 ret = t ? PTR_ERR(t) : -ENOENT;
1101 goto free_newinfo_counters_untrans;
1102 }
1103
1104 /* You lied! */
1105 if (valid_hooks != t->valid_hooks) {
1106 ret = -EINVAL;
1107 goto put_module;
1108 }
1109
1110 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1111 if (!oldinfo)
1112 goto put_module;
1113
1114 /* Update module usage count based on number of rules */
1115 if ((oldinfo->number > oldinfo->initial_entries) ||
1116 (newinfo->number <= oldinfo->initial_entries))
1117 module_put(t->me);
1118 if ((oldinfo->number > oldinfo->initial_entries) &&
1119 (newinfo->number <= oldinfo->initial_entries))
1120 module_put(t->me);
1121
1122 /* Get the old counters, and synchronize with replace */
1123 get_counters(oldinfo, counters);
1124
1125 /* Decrease module usage counts and free resource */
1126 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1127 cleanup_entry(iter, net);
1128
1129 xt_free_table_info(oldinfo);
1130 if (copy_to_user(counters_ptr, counters,
1131 sizeof(struct xt_counters) * num_counters) != 0) {
1132 /* Silent error, can't fail, new table is already in place */
1133 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1134 }
1135 vfree(counters);
1136 xt_table_unlock(t);
1137 return ret;
1138
1139 put_module:
1140 module_put(t->me);
1141 xt_table_unlock(t);
1142 free_newinfo_counters_untrans:
1143 vfree(counters);
1144 out:
1145 return ret;
1146 }
1147
1148 static int
1149 do_replace(struct net *net, const void __user *user, unsigned int len)
1150 {
1151 int ret;
1152 struct ip6t_replace tmp;
1153 struct xt_table_info *newinfo;
1154 void *loc_cpu_entry;
1155 struct ip6t_entry *iter;
1156
1157 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1158 return -EFAULT;
1159
1160 /* overflow check */
1161 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1162 return -ENOMEM;
1163 if (tmp.num_counters == 0)
1164 return -EINVAL;
1165
1166 tmp.name[sizeof(tmp.name)-1] = 0;
1167
1168 newinfo = xt_alloc_table_info(tmp.size);
1169 if (!newinfo)
1170 return -ENOMEM;
1171
1172 loc_cpu_entry = newinfo->entries;
1173 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1174 tmp.size) != 0) {
1175 ret = -EFAULT;
1176 goto free_newinfo;
1177 }
1178
1179 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1180 if (ret != 0)
1181 goto free_newinfo;
1182
1183 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1184 tmp.num_counters, tmp.counters);
1185 if (ret)
1186 goto free_newinfo_untrans;
1187 return 0;
1188
1189 free_newinfo_untrans:
1190 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1191 cleanup_entry(iter, net);
1192 free_newinfo:
1193 xt_free_table_info(newinfo);
1194 return ret;
1195 }
1196
1197 static int
1198 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1199 int compat)
1200 {
1201 unsigned int i;
1202 struct xt_counters_info tmp;
1203 struct xt_counters *paddc;
1204 struct xt_table *t;
1205 const struct xt_table_info *private;
1206 int ret = 0;
1207 struct ip6t_entry *iter;
1208 unsigned int addend;
1209
1210 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1211 if (IS_ERR(paddc))
1212 return PTR_ERR(paddc);
1213 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1214 if (IS_ERR_OR_NULL(t)) {
1215 ret = t ? PTR_ERR(t) : -ENOENT;
1216 goto free;
1217 }
1218
1219 local_bh_disable();
1220 private = t->private;
1221 if (private->number != tmp.num_counters) {
1222 ret = -EINVAL;
1223 goto unlock_up_free;
1224 }
1225
1226 i = 0;
1227 addend = xt_write_recseq_begin();
1228 xt_entry_foreach(iter, private->entries, private->size) {
1229 struct xt_counters *tmp;
1230
1231 tmp = xt_get_this_cpu_counter(&iter->counters);
1232 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1233 ++i;
1234 }
1235 xt_write_recseq_end(addend);
1236 unlock_up_free:
1237 local_bh_enable();
1238 xt_table_unlock(t);
1239 module_put(t->me);
1240 free:
1241 vfree(paddc);
1242
1243 return ret;
1244 }
1245
1246 #ifdef CONFIG_COMPAT
1247 struct compat_ip6t_replace {
1248 char name[XT_TABLE_MAXNAMELEN];
1249 u32 valid_hooks;
1250 u32 num_entries;
1251 u32 size;
1252 u32 hook_entry[NF_INET_NUMHOOKS];
1253 u32 underflow[NF_INET_NUMHOOKS];
1254 u32 num_counters;
1255 compat_uptr_t counters; /* struct xt_counters * */
1256 struct compat_ip6t_entry entries[0];
1257 };
1258
1259 static int
1260 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1261 unsigned int *size, struct xt_counters *counters,
1262 unsigned int i)
1263 {
1264 struct xt_entry_target *t;
1265 struct compat_ip6t_entry __user *ce;
1266 u_int16_t target_offset, next_offset;
1267 compat_uint_t origsize;
1268 const struct xt_entry_match *ematch;
1269 int ret = 0;
1270
1271 origsize = *size;
1272 ce = (struct compat_ip6t_entry __user *)*dstptr;
1273 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1274 copy_to_user(&ce->counters, &counters[i],
1275 sizeof(counters[i])) != 0)
1276 return -EFAULT;
1277
1278 *dstptr += sizeof(struct compat_ip6t_entry);
1279 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1280
1281 xt_ematch_foreach(ematch, e) {
1282 ret = xt_compat_match_to_user(ematch, dstptr, size);
1283 if (ret != 0)
1284 return ret;
1285 }
1286 target_offset = e->target_offset - (origsize - *size);
1287 t = ip6t_get_target(e);
1288 ret = xt_compat_target_to_user(t, dstptr, size);
1289 if (ret)
1290 return ret;
1291 next_offset = e->next_offset - (origsize - *size);
1292 if (put_user(target_offset, &ce->target_offset) != 0 ||
1293 put_user(next_offset, &ce->next_offset) != 0)
1294 return -EFAULT;
1295 return 0;
1296 }
1297
1298 static int
1299 compat_find_calc_match(struct xt_entry_match *m,
1300 const struct ip6t_ip6 *ipv6,
1301 int *size)
1302 {
1303 struct xt_match *match;
1304
1305 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1306 m->u.user.revision);
1307 if (IS_ERR(match))
1308 return PTR_ERR(match);
1309
1310 m->u.kernel.match = match;
1311 *size += xt_compat_match_offset(match);
1312 return 0;
1313 }
1314
1315 static void compat_release_entry(struct compat_ip6t_entry *e)
1316 {
1317 struct xt_entry_target *t;
1318 struct xt_entry_match *ematch;
1319
1320 /* Cleanup all matches */
1321 xt_ematch_foreach(ematch, e)
1322 module_put(ematch->u.kernel.match->me);
1323 t = compat_ip6t_get_target(e);
1324 module_put(t->u.kernel.target->me);
1325 }
1326
1327 static int
1328 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1329 struct xt_table_info *newinfo,
1330 unsigned int *size,
1331 const unsigned char *base,
1332 const unsigned char *limit)
1333 {
1334 struct xt_entry_match *ematch;
1335 struct xt_entry_target *t;
1336 struct xt_target *target;
1337 unsigned int entry_offset;
1338 unsigned int j;
1339 int ret, off;
1340
1341 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1342 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1343 (unsigned char *)e + e->next_offset > limit)
1344 return -EINVAL;
1345
1346 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1347 sizeof(struct compat_xt_entry_target))
1348 return -EINVAL;
1349
1350 if (!ip6_checkentry(&e->ipv6))
1351 return -EINVAL;
1352
1353 ret = xt_compat_check_entry_offsets(e, e->elems,
1354 e->target_offset, e->next_offset);
1355 if (ret)
1356 return ret;
1357
1358 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1359 entry_offset = (void *)e - (void *)base;
1360 j = 0;
1361 xt_ematch_foreach(ematch, e) {
1362 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1363 if (ret != 0)
1364 goto release_matches;
1365 ++j;
1366 }
1367
1368 t = compat_ip6t_get_target(e);
1369 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1370 t->u.user.revision);
1371 if (IS_ERR(target)) {
1372 ret = PTR_ERR(target);
1373 goto release_matches;
1374 }
1375 t->u.kernel.target = target;
1376
1377 off += xt_compat_target_offset(target);
1378 *size += off;
1379 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1380 if (ret)
1381 goto out;
1382
1383 return 0;
1384
1385 out:
1386 module_put(t->u.kernel.target->me);
1387 release_matches:
1388 xt_ematch_foreach(ematch, e) {
1389 if (j-- == 0)
1390 break;
1391 module_put(ematch->u.kernel.match->me);
1392 }
1393 return ret;
1394 }
1395
1396 static void
1397 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1398 unsigned int *size,
1399 struct xt_table_info *newinfo, unsigned char *base)
1400 {
1401 struct xt_entry_target *t;
1402 struct ip6t_entry *de;
1403 unsigned int origsize;
1404 int h;
1405 struct xt_entry_match *ematch;
1406
1407 origsize = *size;
1408 de = (struct ip6t_entry *)*dstptr;
1409 memcpy(de, e, sizeof(struct ip6t_entry));
1410 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1411
1412 *dstptr += sizeof(struct ip6t_entry);
1413 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1414
1415 xt_ematch_foreach(ematch, e)
1416 xt_compat_match_from_user(ematch, dstptr, size);
1417
1418 de->target_offset = e->target_offset - (origsize - *size);
1419 t = compat_ip6t_get_target(e);
1420 xt_compat_target_from_user(t, dstptr, size);
1421
1422 de->next_offset = e->next_offset - (origsize - *size);
1423 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1424 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1425 newinfo->hook_entry[h] -= origsize - *size;
1426 if ((unsigned char *)de - base < newinfo->underflow[h])
1427 newinfo->underflow[h] -= origsize - *size;
1428 }
1429 }
1430
1431 static int
1432 translate_compat_table(struct net *net,
1433 struct xt_table_info **pinfo,
1434 void **pentry0,
1435 const struct compat_ip6t_replace *compatr)
1436 {
1437 unsigned int i, j;
1438 struct xt_table_info *newinfo, *info;
1439 void *pos, *entry0, *entry1;
1440 struct compat_ip6t_entry *iter0;
1441 struct ip6t_replace repl;
1442 unsigned int size;
1443 int ret = 0;
1444
1445 info = *pinfo;
1446 entry0 = *pentry0;
1447 size = compatr->size;
1448 info->number = compatr->num_entries;
1449
1450 j = 0;
1451 xt_compat_lock(AF_INET6);
1452 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1453 /* Walk through entries, checking offsets. */
1454 xt_entry_foreach(iter0, entry0, compatr->size) {
1455 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1456 entry0,
1457 entry0 + compatr->size);
1458 if (ret != 0)
1459 goto out_unlock;
1460 ++j;
1461 }
1462
1463 ret = -EINVAL;
1464 if (j != compatr->num_entries)
1465 goto out_unlock;
1466
1467 ret = -ENOMEM;
1468 newinfo = xt_alloc_table_info(size);
1469 if (!newinfo)
1470 goto out_unlock;
1471
1472 newinfo->number = compatr->num_entries;
1473 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1474 newinfo->hook_entry[i] = compatr->hook_entry[i];
1475 newinfo->underflow[i] = compatr->underflow[i];
1476 }
1477 entry1 = newinfo->entries;
1478 pos = entry1;
1479 size = compatr->size;
1480 xt_entry_foreach(iter0, entry0, compatr->size)
1481 compat_copy_entry_from_user(iter0, &pos, &size,
1482 newinfo, entry1);
1483
1484 /* all module references in entry0 are now gone. */
1485 xt_compat_flush_offsets(AF_INET6);
1486 xt_compat_unlock(AF_INET6);
1487
1488 memcpy(&repl, compatr, sizeof(*compatr));
1489
1490 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1491 repl.hook_entry[i] = newinfo->hook_entry[i];
1492 repl.underflow[i] = newinfo->underflow[i];
1493 }
1494
1495 repl.num_counters = 0;
1496 repl.counters = NULL;
1497 repl.size = newinfo->size;
1498 ret = translate_table(net, newinfo, entry1, &repl);
1499 if (ret)
1500 goto free_newinfo;
1501
1502 *pinfo = newinfo;
1503 *pentry0 = entry1;
1504 xt_free_table_info(info);
1505 return 0;
1506
1507 free_newinfo:
1508 xt_free_table_info(newinfo);
1509 return ret;
1510 out_unlock:
1511 xt_compat_flush_offsets(AF_INET6);
1512 xt_compat_unlock(AF_INET6);
1513 xt_entry_foreach(iter0, entry0, compatr->size) {
1514 if (j-- == 0)
1515 break;
1516 compat_release_entry(iter0);
1517 }
1518 return ret;
1519 }
1520
1521 static int
1522 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1523 {
1524 int ret;
1525 struct compat_ip6t_replace tmp;
1526 struct xt_table_info *newinfo;
1527 void *loc_cpu_entry;
1528 struct ip6t_entry *iter;
1529
1530 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1531 return -EFAULT;
1532
1533 /* overflow check */
1534 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1535 return -ENOMEM;
1536 if (tmp.num_counters == 0)
1537 return -EINVAL;
1538
1539 tmp.name[sizeof(tmp.name)-1] = 0;
1540
1541 newinfo = xt_alloc_table_info(tmp.size);
1542 if (!newinfo)
1543 return -ENOMEM;
1544
1545 loc_cpu_entry = newinfo->entries;
1546 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1547 tmp.size) != 0) {
1548 ret = -EFAULT;
1549 goto free_newinfo;
1550 }
1551
1552 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1553 if (ret != 0)
1554 goto free_newinfo;
1555
1556 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1557 tmp.num_counters, compat_ptr(tmp.counters));
1558 if (ret)
1559 goto free_newinfo_untrans;
1560 return 0;
1561
1562 free_newinfo_untrans:
1563 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1564 cleanup_entry(iter, net);
1565 free_newinfo:
1566 xt_free_table_info(newinfo);
1567 return ret;
1568 }
1569
1570 static int
1571 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1572 unsigned int len)
1573 {
1574 int ret;
1575
1576 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1577 return -EPERM;
1578
1579 switch (cmd) {
1580 case IP6T_SO_SET_REPLACE:
1581 ret = compat_do_replace(sock_net(sk), user, len);
1582 break;
1583
1584 case IP6T_SO_SET_ADD_COUNTERS:
1585 ret = do_add_counters(sock_net(sk), user, len, 1);
1586 break;
1587
1588 default:
1589 ret = -EINVAL;
1590 }
1591
1592 return ret;
1593 }
1594
1595 struct compat_ip6t_get_entries {
1596 char name[XT_TABLE_MAXNAMELEN];
1597 compat_uint_t size;
1598 struct compat_ip6t_entry entrytable[0];
1599 };
1600
1601 static int
1602 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1603 void __user *userptr)
1604 {
1605 struct xt_counters *counters;
1606 const struct xt_table_info *private = table->private;
1607 void __user *pos;
1608 unsigned int size;
1609 int ret = 0;
1610 unsigned int i = 0;
1611 struct ip6t_entry *iter;
1612
1613 counters = alloc_counters(table);
1614 if (IS_ERR(counters))
1615 return PTR_ERR(counters);
1616
1617 pos = userptr;
1618 size = total_size;
1619 xt_entry_foreach(iter, private->entries, total_size) {
1620 ret = compat_copy_entry_to_user(iter, &pos,
1621 &size, counters, i++);
1622 if (ret != 0)
1623 break;
1624 }
1625
1626 vfree(counters);
1627 return ret;
1628 }
1629
1630 static int
1631 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1632 int *len)
1633 {
1634 int ret;
1635 struct compat_ip6t_get_entries get;
1636 struct xt_table *t;
1637
1638 if (*len < sizeof(get))
1639 return -EINVAL;
1640
1641 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1642 return -EFAULT;
1643
1644 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
1645 return -EINVAL;
1646
1647 get.name[sizeof(get.name) - 1] = '\0';
1648
1649 xt_compat_lock(AF_INET6);
1650 t = xt_find_table_lock(net, AF_INET6, get.name);
1651 if (!IS_ERR_OR_NULL(t)) {
1652 const struct xt_table_info *private = t->private;
1653 struct xt_table_info info;
1654 ret = compat_table_info(private, &info);
1655 if (!ret && get.size == info.size)
1656 ret = compat_copy_entries_to_user(private->size,
1657 t, uptr->entrytable);
1658 else if (!ret)
1659 ret = -EAGAIN;
1660
1661 xt_compat_flush_offsets(AF_INET6);
1662 module_put(t->me);
1663 xt_table_unlock(t);
1664 } else
1665 ret = t ? PTR_ERR(t) : -ENOENT;
1666
1667 xt_compat_unlock(AF_INET6);
1668 return ret;
1669 }
1670
1671 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1672
1673 static int
1674 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1675 {
1676 int ret;
1677
1678 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1679 return -EPERM;
1680
1681 switch (cmd) {
1682 case IP6T_SO_GET_INFO:
1683 ret = get_info(sock_net(sk), user, len, 1);
1684 break;
1685 case IP6T_SO_GET_ENTRIES:
1686 ret = compat_get_entries(sock_net(sk), user, len);
1687 break;
1688 default:
1689 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1690 }
1691 return ret;
1692 }
1693 #endif
1694
1695 static int
1696 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1697 {
1698 int ret;
1699
1700 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1701 return -EPERM;
1702
1703 switch (cmd) {
1704 case IP6T_SO_SET_REPLACE:
1705 ret = do_replace(sock_net(sk), user, len);
1706 break;
1707
1708 case IP6T_SO_SET_ADD_COUNTERS:
1709 ret = do_add_counters(sock_net(sk), user, len, 0);
1710 break;
1711
1712 default:
1713 ret = -EINVAL;
1714 }
1715
1716 return ret;
1717 }
1718
1719 static int
1720 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1721 {
1722 int ret;
1723
1724 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1725 return -EPERM;
1726
1727 switch (cmd) {
1728 case IP6T_SO_GET_INFO:
1729 ret = get_info(sock_net(sk), user, len, 0);
1730 break;
1731
1732 case IP6T_SO_GET_ENTRIES:
1733 ret = get_entries(sock_net(sk), user, len);
1734 break;
1735
1736 case IP6T_SO_GET_REVISION_MATCH:
1737 case IP6T_SO_GET_REVISION_TARGET: {
1738 struct xt_get_revision rev;
1739 int target;
1740
1741 if (*len != sizeof(rev)) {
1742 ret = -EINVAL;
1743 break;
1744 }
1745 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1746 ret = -EFAULT;
1747 break;
1748 }
1749 rev.name[sizeof(rev.name)-1] = 0;
1750
1751 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1752 target = 1;
1753 else
1754 target = 0;
1755
1756 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1757 rev.revision,
1758 target, &ret),
1759 "ip6t_%s", rev.name);
1760 break;
1761 }
1762
1763 default:
1764 ret = -EINVAL;
1765 }
1766
1767 return ret;
1768 }
1769
1770 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
1771 {
1772 struct xt_table_info *private;
1773 void *loc_cpu_entry;
1774 struct module *table_owner = table->me;
1775 struct ip6t_entry *iter;
1776
1777 private = xt_unregister_table(table);
1778
1779 /* Decrease module usage counts and free resources */
1780 loc_cpu_entry = private->entries;
1781 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1782 cleanup_entry(iter, net);
1783 if (private->number > private->initial_entries)
1784 module_put(table_owner);
1785 xt_free_table_info(private);
1786 }
1787
1788 int ip6t_register_table(struct net *net, const struct xt_table *table,
1789 const struct ip6t_replace *repl,
1790 const struct nf_hook_ops *ops,
1791 struct xt_table **res)
1792 {
1793 int ret;
1794 struct xt_table_info *newinfo;
1795 struct xt_table_info bootstrap = {0};
1796 void *loc_cpu_entry;
1797 struct xt_table *new_table;
1798
1799 newinfo = xt_alloc_table_info(repl->size);
1800 if (!newinfo)
1801 return -ENOMEM;
1802
1803 loc_cpu_entry = newinfo->entries;
1804 memcpy(loc_cpu_entry, repl->entries, repl->size);
1805
1806 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1807 if (ret != 0)
1808 goto out_free;
1809
1810 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1811 if (IS_ERR(new_table)) {
1812 ret = PTR_ERR(new_table);
1813 goto out_free;
1814 }
1815
1816 /* set res now, will see skbs right after nf_register_net_hooks */
1817 WRITE_ONCE(*res, new_table);
1818
1819 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1820 if (ret != 0) {
1821 __ip6t_unregister_table(net, new_table);
1822 *res = NULL;
1823 }
1824
1825 return ret;
1826
1827 out_free:
1828 xt_free_table_info(newinfo);
1829 return ret;
1830 }
1831
1832 void ip6t_unregister_table(struct net *net, struct xt_table *table,
1833 const struct nf_hook_ops *ops)
1834 {
1835 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1836 __ip6t_unregister_table(net, table);
1837 }
1838
1839 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1840 static inline bool
1841 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1842 u_int8_t type, u_int8_t code,
1843 bool invert)
1844 {
1845 return (type == test_type && code >= min_code && code <= max_code)
1846 ^ invert;
1847 }
1848
1849 static bool
1850 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1851 {
1852 const struct icmp6hdr *ic;
1853 struct icmp6hdr _icmph;
1854 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1855
1856 /* Must not be a fragment. */
1857 if (par->fragoff != 0)
1858 return false;
1859
1860 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1861 if (ic == NULL) {
1862 /* We've been asked to examine this packet, and we
1863 * can't. Hence, no choice but to drop.
1864 */
1865 par->hotdrop = true;
1866 return false;
1867 }
1868
1869 return icmp6_type_code_match(icmpinfo->type,
1870 icmpinfo->code[0],
1871 icmpinfo->code[1],
1872 ic->icmp6_type, ic->icmp6_code,
1873 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1874 }
1875
1876 /* Called when user tries to insert an entry of this type. */
1877 static int icmp6_checkentry(const struct xt_mtchk_param *par)
1878 {
1879 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1880
1881 /* Must specify no unknown invflags */
1882 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
1883 }
1884
1885 /* The built-in targets: standard (NULL) and error. */
1886 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
1887 {
1888 .name = XT_STANDARD_TARGET,
1889 .targetsize = sizeof(int),
1890 .family = NFPROTO_IPV6,
1891 #ifdef CONFIG_COMPAT
1892 .compatsize = sizeof(compat_int_t),
1893 .compat_from_user = compat_standard_from_user,
1894 .compat_to_user = compat_standard_to_user,
1895 #endif
1896 },
1897 {
1898 .name = XT_ERROR_TARGET,
1899 .target = ip6t_error,
1900 .targetsize = XT_FUNCTION_MAXNAMELEN,
1901 .family = NFPROTO_IPV6,
1902 },
1903 };
1904
1905 static struct nf_sockopt_ops ip6t_sockopts = {
1906 .pf = PF_INET6,
1907 .set_optmin = IP6T_BASE_CTL,
1908 .set_optmax = IP6T_SO_SET_MAX+1,
1909 .set = do_ip6t_set_ctl,
1910 #ifdef CONFIG_COMPAT
1911 .compat_set = compat_do_ip6t_set_ctl,
1912 #endif
1913 .get_optmin = IP6T_BASE_CTL,
1914 .get_optmax = IP6T_SO_GET_MAX+1,
1915 .get = do_ip6t_get_ctl,
1916 #ifdef CONFIG_COMPAT
1917 .compat_get = compat_do_ip6t_get_ctl,
1918 #endif
1919 .owner = THIS_MODULE,
1920 };
1921
1922 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
1923 {
1924 .name = "icmp6",
1925 .match = icmp6_match,
1926 .matchsize = sizeof(struct ip6t_icmp),
1927 .checkentry = icmp6_checkentry,
1928 .proto = IPPROTO_ICMPV6,
1929 .family = NFPROTO_IPV6,
1930 },
1931 };
1932
1933 static int __net_init ip6_tables_net_init(struct net *net)
1934 {
1935 return xt_proto_init(net, NFPROTO_IPV6);
1936 }
1937
1938 static void __net_exit ip6_tables_net_exit(struct net *net)
1939 {
1940 xt_proto_fini(net, NFPROTO_IPV6);
1941 }
1942
1943 static struct pernet_operations ip6_tables_net_ops = {
1944 .init = ip6_tables_net_init,
1945 .exit = ip6_tables_net_exit,
1946 };
1947
1948 static int __init ip6_tables_init(void)
1949 {
1950 int ret;
1951
1952 ret = register_pernet_subsys(&ip6_tables_net_ops);
1953 if (ret < 0)
1954 goto err1;
1955
1956 /* No one else will be downing sem now, so we won't sleep */
1957 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1958 if (ret < 0)
1959 goto err2;
1960 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1961 if (ret < 0)
1962 goto err4;
1963
1964 /* Register setsockopt */
1965 ret = nf_register_sockopt(&ip6t_sockopts);
1966 if (ret < 0)
1967 goto err5;
1968
1969 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1970 return 0;
1971
1972 err5:
1973 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1974 err4:
1975 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1976 err2:
1977 unregister_pernet_subsys(&ip6_tables_net_ops);
1978 err1:
1979 return ret;
1980 }
1981
1982 static void __exit ip6_tables_fini(void)
1983 {
1984 nf_unregister_sockopt(&ip6t_sockopts);
1985
1986 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1987 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1988 unregister_pernet_subsys(&ip6_tables_net_ops);
1989 }
1990
1991 EXPORT_SYMBOL(ip6t_register_table);
1992 EXPORT_SYMBOL(ip6t_unregister_table);
1993 EXPORT_SYMBOL(ip6t_do_table);
1994
1995 module_init(ip6_tables_init);
1996 module_exit(ip6_tables_fini);
This page took 0.072596 seconds and 6 git commands to generate.