Merge commit 'origin/master' into next
[deliverable/linux.git] / net / netfilter / x_tables.c
1 /*
2 * x_tables core - Backend for {ip,ip6,arp}_tables
3 *
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 *
6 * Based on existing ip_tables code which is
7 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/socket.h>
18 #include <linux/net.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/string.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mutex.h>
24 #include <linux/mm.h>
25 #include <net/net_namespace.h>
26
27 #include <linux/netfilter/x_tables.h>
28 #include <linux/netfilter_arp.h>
29
30
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
33 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
34
35 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
36
37 struct compat_delta {
38 struct compat_delta *next;
39 unsigned int offset;
40 short delta;
41 };
42
43 struct xt_af {
44 struct mutex mutex;
45 struct list_head match;
46 struct list_head target;
47 #ifdef CONFIG_COMPAT
48 struct mutex compat_mutex;
49 struct compat_delta *compat_offsets;
50 #endif
51 };
52
53 static struct xt_af *xt;
54
55 #ifdef DEBUG_IP_FIREWALL_USER
56 #define duprintf(format, args...) printk(format , ## args)
57 #else
58 #define duprintf(format, args...)
59 #endif
60
61 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
62 [NFPROTO_UNSPEC] = "x",
63 [NFPROTO_IPV4] = "ip",
64 [NFPROTO_ARP] = "arp",
65 [NFPROTO_BRIDGE] = "eb",
66 [NFPROTO_IPV6] = "ip6",
67 };
68
69 /* Registration hooks for targets. */
70 int
71 xt_register_target(struct xt_target *target)
72 {
73 u_int8_t af = target->family;
74 int ret;
75
76 ret = mutex_lock_interruptible(&xt[af].mutex);
77 if (ret != 0)
78 return ret;
79 list_add(&target->list, &xt[af].target);
80 mutex_unlock(&xt[af].mutex);
81 return ret;
82 }
83 EXPORT_SYMBOL(xt_register_target);
84
85 void
86 xt_unregister_target(struct xt_target *target)
87 {
88 u_int8_t af = target->family;
89
90 mutex_lock(&xt[af].mutex);
91 list_del(&target->list);
92 mutex_unlock(&xt[af].mutex);
93 }
94 EXPORT_SYMBOL(xt_unregister_target);
95
96 int
97 xt_register_targets(struct xt_target *target, unsigned int n)
98 {
99 unsigned int i;
100 int err = 0;
101
102 for (i = 0; i < n; i++) {
103 err = xt_register_target(&target[i]);
104 if (err)
105 goto err;
106 }
107 return err;
108
109 err:
110 if (i > 0)
111 xt_unregister_targets(target, i);
112 return err;
113 }
114 EXPORT_SYMBOL(xt_register_targets);
115
116 void
117 xt_unregister_targets(struct xt_target *target, unsigned int n)
118 {
119 unsigned int i;
120
121 for (i = 0; i < n; i++)
122 xt_unregister_target(&target[i]);
123 }
124 EXPORT_SYMBOL(xt_unregister_targets);
125
126 int
127 xt_register_match(struct xt_match *match)
128 {
129 u_int8_t af = match->family;
130 int ret;
131
132 ret = mutex_lock_interruptible(&xt[af].mutex);
133 if (ret != 0)
134 return ret;
135
136 list_add(&match->list, &xt[af].match);
137 mutex_unlock(&xt[af].mutex);
138
139 return ret;
140 }
141 EXPORT_SYMBOL(xt_register_match);
142
143 void
144 xt_unregister_match(struct xt_match *match)
145 {
146 u_int8_t af = match->family;
147
148 mutex_lock(&xt[af].mutex);
149 list_del(&match->list);
150 mutex_unlock(&xt[af].mutex);
151 }
152 EXPORT_SYMBOL(xt_unregister_match);
153
154 int
155 xt_register_matches(struct xt_match *match, unsigned int n)
156 {
157 unsigned int i;
158 int err = 0;
159
160 for (i = 0; i < n; i++) {
161 err = xt_register_match(&match[i]);
162 if (err)
163 goto err;
164 }
165 return err;
166
167 err:
168 if (i > 0)
169 xt_unregister_matches(match, i);
170 return err;
171 }
172 EXPORT_SYMBOL(xt_register_matches);
173
174 void
175 xt_unregister_matches(struct xt_match *match, unsigned int n)
176 {
177 unsigned int i;
178
179 for (i = 0; i < n; i++)
180 xt_unregister_match(&match[i]);
181 }
182 EXPORT_SYMBOL(xt_unregister_matches);
183
184
185 /*
186 * These are weird, but module loading must not be done with mutex
187 * held (since they will register), and we have to have a single
188 * function to use try_then_request_module().
189 */
190
191 /* Find match, grabs ref. Returns ERR_PTR() on error. */
192 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
193 {
194 struct xt_match *m;
195 int err = 0;
196
197 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
198 return ERR_PTR(-EINTR);
199
200 list_for_each_entry(m, &xt[af].match, list) {
201 if (strcmp(m->name, name) == 0) {
202 if (m->revision == revision) {
203 if (try_module_get(m->me)) {
204 mutex_unlock(&xt[af].mutex);
205 return m;
206 }
207 } else
208 err = -EPROTOTYPE; /* Found something. */
209 }
210 }
211 mutex_unlock(&xt[af].mutex);
212
213 if (af != NFPROTO_UNSPEC)
214 /* Try searching again in the family-independent list */
215 return xt_find_match(NFPROTO_UNSPEC, name, revision);
216
217 return ERR_PTR(err);
218 }
219 EXPORT_SYMBOL(xt_find_match);
220
221 /* Find target, grabs ref. Returns ERR_PTR() on error. */
222 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
223 {
224 struct xt_target *t;
225 int err = 0;
226
227 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
228 return ERR_PTR(-EINTR);
229
230 list_for_each_entry(t, &xt[af].target, list) {
231 if (strcmp(t->name, name) == 0) {
232 if (t->revision == revision) {
233 if (try_module_get(t->me)) {
234 mutex_unlock(&xt[af].mutex);
235 return t;
236 }
237 } else
238 err = -EPROTOTYPE; /* Found something. */
239 }
240 }
241 mutex_unlock(&xt[af].mutex);
242
243 if (af != NFPROTO_UNSPEC)
244 /* Try searching again in the family-independent list */
245 return xt_find_target(NFPROTO_UNSPEC, name, revision);
246
247 return ERR_PTR(err);
248 }
249 EXPORT_SYMBOL(xt_find_target);
250
251 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
252 {
253 struct xt_target *target;
254
255 target = try_then_request_module(xt_find_target(af, name, revision),
256 "%st_%s", xt_prefix[af], name);
257 if (IS_ERR(target) || !target)
258 return NULL;
259 return target;
260 }
261 EXPORT_SYMBOL_GPL(xt_request_find_target);
262
263 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
264 {
265 const struct xt_match *m;
266 int have_rev = 0;
267
268 list_for_each_entry(m, &xt[af].match, list) {
269 if (strcmp(m->name, name) == 0) {
270 if (m->revision > *bestp)
271 *bestp = m->revision;
272 if (m->revision == revision)
273 have_rev = 1;
274 }
275 }
276
277 if (af != NFPROTO_UNSPEC && !have_rev)
278 return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
279
280 return have_rev;
281 }
282
283 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
284 {
285 const struct xt_target *t;
286 int have_rev = 0;
287
288 list_for_each_entry(t, &xt[af].target, list) {
289 if (strcmp(t->name, name) == 0) {
290 if (t->revision > *bestp)
291 *bestp = t->revision;
292 if (t->revision == revision)
293 have_rev = 1;
294 }
295 }
296
297 if (af != NFPROTO_UNSPEC && !have_rev)
298 return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
299
300 return have_rev;
301 }
302
303 /* Returns true or false (if no such extension at all) */
304 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
305 int *err)
306 {
307 int have_rev, best = -1;
308
309 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
310 *err = -EINTR;
311 return 1;
312 }
313 if (target == 1)
314 have_rev = target_revfn(af, name, revision, &best);
315 else
316 have_rev = match_revfn(af, name, revision, &best);
317 mutex_unlock(&xt[af].mutex);
318
319 /* Nothing at all? Return 0 to try loading module. */
320 if (best == -1) {
321 *err = -ENOENT;
322 return 0;
323 }
324
325 *err = best;
326 if (!have_rev)
327 *err = -EPROTONOSUPPORT;
328 return 1;
329 }
330 EXPORT_SYMBOL_GPL(xt_find_revision);
331
332 int xt_check_match(struct xt_mtchk_param *par,
333 unsigned int size, u_int8_t proto, bool inv_proto)
334 {
335 if (XT_ALIGN(par->match->matchsize) != size &&
336 par->match->matchsize != -1) {
337 /*
338 * ebt_among is exempt from centralized matchsize checking
339 * because it uses a dynamic-size data set.
340 */
341 printk("%s_tables: %s match: invalid size %Zu != %u\n",
342 xt_prefix[par->family], par->match->name,
343 XT_ALIGN(par->match->matchsize), size);
344 return -EINVAL;
345 }
346 if (par->match->table != NULL &&
347 strcmp(par->match->table, par->table) != 0) {
348 printk("%s_tables: %s match: only valid in %s table, not %s\n",
349 xt_prefix[par->family], par->match->name,
350 par->match->table, par->table);
351 return -EINVAL;
352 }
353 if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
354 printk("%s_tables: %s match: bad hook_mask %#x/%#x\n",
355 xt_prefix[par->family], par->match->name,
356 par->hook_mask, par->match->hooks);
357 return -EINVAL;
358 }
359 if (par->match->proto && (par->match->proto != proto || inv_proto)) {
360 printk("%s_tables: %s match: only valid for protocol %u\n",
361 xt_prefix[par->family], par->match->name,
362 par->match->proto);
363 return -EINVAL;
364 }
365 if (par->match->checkentry != NULL && !par->match->checkentry(par))
366 return -EINVAL;
367 return 0;
368 }
369 EXPORT_SYMBOL_GPL(xt_check_match);
370
371 #ifdef CONFIG_COMPAT
372 int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
373 {
374 struct compat_delta *tmp;
375
376 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
377 if (!tmp)
378 return -ENOMEM;
379
380 tmp->offset = offset;
381 tmp->delta = delta;
382
383 if (xt[af].compat_offsets) {
384 tmp->next = xt[af].compat_offsets->next;
385 xt[af].compat_offsets->next = tmp;
386 } else {
387 xt[af].compat_offsets = tmp;
388 tmp->next = NULL;
389 }
390 return 0;
391 }
392 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
393
394 void xt_compat_flush_offsets(u_int8_t af)
395 {
396 struct compat_delta *tmp, *next;
397
398 if (xt[af].compat_offsets) {
399 for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
400 next = tmp->next;
401 kfree(tmp);
402 }
403 xt[af].compat_offsets = NULL;
404 }
405 }
406 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
407
408 short xt_compat_calc_jump(u_int8_t af, unsigned int offset)
409 {
410 struct compat_delta *tmp;
411 short delta;
412
413 for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
414 if (tmp->offset < offset)
415 delta += tmp->delta;
416 return delta;
417 }
418 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
419
420 int xt_compat_match_offset(const struct xt_match *match)
421 {
422 u_int16_t csize = match->compatsize ? : match->matchsize;
423 return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
424 }
425 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
426
427 int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
428 unsigned int *size)
429 {
430 const struct xt_match *match = m->u.kernel.match;
431 struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
432 int pad, off = xt_compat_match_offset(match);
433 u_int16_t msize = cm->u.user.match_size;
434
435 m = *dstptr;
436 memcpy(m, cm, sizeof(*cm));
437 if (match->compat_from_user)
438 match->compat_from_user(m->data, cm->data);
439 else
440 memcpy(m->data, cm->data, msize - sizeof(*cm));
441 pad = XT_ALIGN(match->matchsize) - match->matchsize;
442 if (pad > 0)
443 memset(m->data + match->matchsize, 0, pad);
444
445 msize += off;
446 m->u.user.match_size = msize;
447
448 *size += off;
449 *dstptr += msize;
450 return 0;
451 }
452 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
453
454 int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
455 unsigned int *size)
456 {
457 const struct xt_match *match = m->u.kernel.match;
458 struct compat_xt_entry_match __user *cm = *dstptr;
459 int off = xt_compat_match_offset(match);
460 u_int16_t msize = m->u.user.match_size - off;
461
462 if (copy_to_user(cm, m, sizeof(*cm)) ||
463 put_user(msize, &cm->u.user.match_size) ||
464 copy_to_user(cm->u.user.name, m->u.kernel.match->name,
465 strlen(m->u.kernel.match->name) + 1))
466 return -EFAULT;
467
468 if (match->compat_to_user) {
469 if (match->compat_to_user((void __user *)cm->data, m->data))
470 return -EFAULT;
471 } else {
472 if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
473 return -EFAULT;
474 }
475
476 *size -= off;
477 *dstptr += msize;
478 return 0;
479 }
480 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
481 #endif /* CONFIG_COMPAT */
482
483 int xt_check_target(struct xt_tgchk_param *par,
484 unsigned int size, u_int8_t proto, bool inv_proto)
485 {
486 if (XT_ALIGN(par->target->targetsize) != size) {
487 printk("%s_tables: %s target: invalid size %Zu != %u\n",
488 xt_prefix[par->family], par->target->name,
489 XT_ALIGN(par->target->targetsize), size);
490 return -EINVAL;
491 }
492 if (par->target->table != NULL &&
493 strcmp(par->target->table, par->table) != 0) {
494 printk("%s_tables: %s target: only valid in %s table, not %s\n",
495 xt_prefix[par->family], par->target->name,
496 par->target->table, par->table);
497 return -EINVAL;
498 }
499 if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
500 printk("%s_tables: %s target: bad hook_mask %#x/%#x\n",
501 xt_prefix[par->family], par->target->name,
502 par->hook_mask, par->target->hooks);
503 return -EINVAL;
504 }
505 if (par->target->proto && (par->target->proto != proto || inv_proto)) {
506 printk("%s_tables: %s target: only valid for protocol %u\n",
507 xt_prefix[par->family], par->target->name,
508 par->target->proto);
509 return -EINVAL;
510 }
511 if (par->target->checkentry != NULL && !par->target->checkentry(par))
512 return -EINVAL;
513 return 0;
514 }
515 EXPORT_SYMBOL_GPL(xt_check_target);
516
517 #ifdef CONFIG_COMPAT
518 int xt_compat_target_offset(const struct xt_target *target)
519 {
520 u_int16_t csize = target->compatsize ? : target->targetsize;
521 return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
522 }
523 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
524
525 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
526 unsigned int *size)
527 {
528 const struct xt_target *target = t->u.kernel.target;
529 struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
530 int pad, off = xt_compat_target_offset(target);
531 u_int16_t tsize = ct->u.user.target_size;
532
533 t = *dstptr;
534 memcpy(t, ct, sizeof(*ct));
535 if (target->compat_from_user)
536 target->compat_from_user(t->data, ct->data);
537 else
538 memcpy(t->data, ct->data, tsize - sizeof(*ct));
539 pad = XT_ALIGN(target->targetsize) - target->targetsize;
540 if (pad > 0)
541 memset(t->data + target->targetsize, 0, pad);
542
543 tsize += off;
544 t->u.user.target_size = tsize;
545
546 *size += off;
547 *dstptr += tsize;
548 }
549 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
550
551 int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
552 unsigned int *size)
553 {
554 const struct xt_target *target = t->u.kernel.target;
555 struct compat_xt_entry_target __user *ct = *dstptr;
556 int off = xt_compat_target_offset(target);
557 u_int16_t tsize = t->u.user.target_size - off;
558
559 if (copy_to_user(ct, t, sizeof(*ct)) ||
560 put_user(tsize, &ct->u.user.target_size) ||
561 copy_to_user(ct->u.user.name, t->u.kernel.target->name,
562 strlen(t->u.kernel.target->name) + 1))
563 return -EFAULT;
564
565 if (target->compat_to_user) {
566 if (target->compat_to_user((void __user *)ct->data, t->data))
567 return -EFAULT;
568 } else {
569 if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
570 return -EFAULT;
571 }
572
573 *size -= off;
574 *dstptr += tsize;
575 return 0;
576 }
577 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
578 #endif
579
580 struct xt_table_info *xt_alloc_table_info(unsigned int size)
581 {
582 struct xt_table_info *newinfo;
583 int cpu;
584
585 /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
586 if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
587 return NULL;
588
589 newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
590 if (!newinfo)
591 return NULL;
592
593 newinfo->size = size;
594
595 for_each_possible_cpu(cpu) {
596 if (size <= PAGE_SIZE)
597 newinfo->entries[cpu] = kmalloc_node(size,
598 GFP_KERNEL,
599 cpu_to_node(cpu));
600 else
601 newinfo->entries[cpu] = vmalloc_node(size,
602 cpu_to_node(cpu));
603
604 if (newinfo->entries[cpu] == NULL) {
605 xt_free_table_info(newinfo);
606 return NULL;
607 }
608 }
609
610 return newinfo;
611 }
612 EXPORT_SYMBOL(xt_alloc_table_info);
613
614 void xt_free_table_info(struct xt_table_info *info)
615 {
616 int cpu;
617
618 for_each_possible_cpu(cpu) {
619 if (info->size <= PAGE_SIZE)
620 kfree(info->entries[cpu]);
621 else
622 vfree(info->entries[cpu]);
623 }
624 kfree(info);
625 }
626 EXPORT_SYMBOL(xt_free_table_info);
627
628 /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
629 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
630 const char *name)
631 {
632 struct xt_table *t;
633
634 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
635 return ERR_PTR(-EINTR);
636
637 list_for_each_entry(t, &net->xt.tables[af], list)
638 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
639 return t;
640 mutex_unlock(&xt[af].mutex);
641 return NULL;
642 }
643 EXPORT_SYMBOL_GPL(xt_find_table_lock);
644
645 void xt_table_unlock(struct xt_table *table)
646 {
647 mutex_unlock(&xt[table->af].mutex);
648 }
649 EXPORT_SYMBOL_GPL(xt_table_unlock);
650
651 #ifdef CONFIG_COMPAT
652 void xt_compat_lock(u_int8_t af)
653 {
654 mutex_lock(&xt[af].compat_mutex);
655 }
656 EXPORT_SYMBOL_GPL(xt_compat_lock);
657
658 void xt_compat_unlock(u_int8_t af)
659 {
660 mutex_unlock(&xt[af].compat_mutex);
661 }
662 EXPORT_SYMBOL_GPL(xt_compat_unlock);
663 #endif
664
665 struct xt_table_info *
666 xt_replace_table(struct xt_table *table,
667 unsigned int num_counters,
668 struct xt_table_info *newinfo,
669 int *error)
670 {
671 struct xt_table_info *oldinfo, *private;
672
673 /* Do the substitution. */
674 write_lock_bh(&table->lock);
675 private = table->private;
676 /* Check inside lock: is the old number correct? */
677 if (num_counters != private->number) {
678 duprintf("num_counters != table->private->number (%u/%u)\n",
679 num_counters, private->number);
680 write_unlock_bh(&table->lock);
681 *error = -EAGAIN;
682 return NULL;
683 }
684 oldinfo = private;
685 table->private = newinfo;
686 newinfo->initial_entries = oldinfo->initial_entries;
687 write_unlock_bh(&table->lock);
688
689 return oldinfo;
690 }
691 EXPORT_SYMBOL_GPL(xt_replace_table);
692
693 struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
694 struct xt_table_info *bootstrap,
695 struct xt_table_info *newinfo)
696 {
697 int ret;
698 struct xt_table_info *private;
699 struct xt_table *t;
700
701 /* Don't add one object to multiple lists. */
702 table = kmemdup(table, sizeof(struct xt_table), GFP_KERNEL);
703 if (!table) {
704 ret = -ENOMEM;
705 goto out;
706 }
707
708 ret = mutex_lock_interruptible(&xt[table->af].mutex);
709 if (ret != 0)
710 goto out_free;
711
712 /* Don't autoload: we'd eat our tail... */
713 list_for_each_entry(t, &net->xt.tables[table->af], list) {
714 if (strcmp(t->name, table->name) == 0) {
715 ret = -EEXIST;
716 goto unlock;
717 }
718 }
719
720 /* Simplifies replace_table code. */
721 table->private = bootstrap;
722 rwlock_init(&table->lock);
723 if (!xt_replace_table(table, 0, newinfo, &ret))
724 goto unlock;
725
726 private = table->private;
727 duprintf("table->private->number = %u\n", private->number);
728
729 /* save number of initial entries */
730 private->initial_entries = private->number;
731
732 list_add(&table->list, &net->xt.tables[table->af]);
733 mutex_unlock(&xt[table->af].mutex);
734 return table;
735
736 unlock:
737 mutex_unlock(&xt[table->af].mutex);
738 out_free:
739 kfree(table);
740 out:
741 return ERR_PTR(ret);
742 }
743 EXPORT_SYMBOL_GPL(xt_register_table);
744
745 void *xt_unregister_table(struct xt_table *table)
746 {
747 struct xt_table_info *private;
748
749 mutex_lock(&xt[table->af].mutex);
750 private = table->private;
751 list_del(&table->list);
752 mutex_unlock(&xt[table->af].mutex);
753 kfree(table);
754
755 return private;
756 }
757 EXPORT_SYMBOL_GPL(xt_unregister_table);
758
759 #ifdef CONFIG_PROC_FS
760 struct xt_names_priv {
761 struct seq_net_private p;
762 u_int8_t af;
763 };
764 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
765 {
766 struct xt_names_priv *priv = seq->private;
767 struct net *net = seq_file_net(seq);
768 u_int8_t af = priv->af;
769
770 mutex_lock(&xt[af].mutex);
771 return seq_list_start(&net->xt.tables[af], *pos);
772 }
773
774 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
775 {
776 struct xt_names_priv *priv = seq->private;
777 struct net *net = seq_file_net(seq);
778 u_int8_t af = priv->af;
779
780 return seq_list_next(v, &net->xt.tables[af], pos);
781 }
782
783 static void xt_table_seq_stop(struct seq_file *seq, void *v)
784 {
785 struct xt_names_priv *priv = seq->private;
786 u_int8_t af = priv->af;
787
788 mutex_unlock(&xt[af].mutex);
789 }
790
791 static int xt_table_seq_show(struct seq_file *seq, void *v)
792 {
793 struct xt_table *table = list_entry(v, struct xt_table, list);
794
795 if (strlen(table->name))
796 return seq_printf(seq, "%s\n", table->name);
797 else
798 return 0;
799 }
800
801 static const struct seq_operations xt_table_seq_ops = {
802 .start = xt_table_seq_start,
803 .next = xt_table_seq_next,
804 .stop = xt_table_seq_stop,
805 .show = xt_table_seq_show,
806 };
807
808 static int xt_table_open(struct inode *inode, struct file *file)
809 {
810 int ret;
811 struct xt_names_priv *priv;
812
813 ret = seq_open_net(inode, file, &xt_table_seq_ops,
814 sizeof(struct xt_names_priv));
815 if (!ret) {
816 priv = ((struct seq_file *)file->private_data)->private;
817 priv->af = (unsigned long)PDE(inode)->data;
818 }
819 return ret;
820 }
821
822 static const struct file_operations xt_table_ops = {
823 .owner = THIS_MODULE,
824 .open = xt_table_open,
825 .read = seq_read,
826 .llseek = seq_lseek,
827 .release = seq_release_net,
828 };
829
830 /*
831 * Traverse state for ip{,6}_{tables,matches} for helping crossing
832 * the multi-AF mutexes.
833 */
834 struct nf_mttg_trav {
835 struct list_head *head, *curr;
836 uint8_t class, nfproto;
837 };
838
839 enum {
840 MTTG_TRAV_INIT,
841 MTTG_TRAV_NFP_UNSPEC,
842 MTTG_TRAV_NFP_SPEC,
843 MTTG_TRAV_DONE,
844 };
845
846 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
847 bool is_target)
848 {
849 static const uint8_t next_class[] = {
850 [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
851 [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
852 };
853 struct nf_mttg_trav *trav = seq->private;
854
855 switch (trav->class) {
856 case MTTG_TRAV_INIT:
857 trav->class = MTTG_TRAV_NFP_UNSPEC;
858 mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
859 trav->head = trav->curr = is_target ?
860 &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
861 break;
862 case MTTG_TRAV_NFP_UNSPEC:
863 trav->curr = trav->curr->next;
864 if (trav->curr != trav->head)
865 break;
866 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
867 mutex_lock(&xt[trav->nfproto].mutex);
868 trav->head = trav->curr = is_target ?
869 &xt[trav->nfproto].target : &xt[trav->nfproto].match;
870 trav->class = next_class[trav->class];
871 break;
872 case MTTG_TRAV_NFP_SPEC:
873 trav->curr = trav->curr->next;
874 if (trav->curr != trav->head)
875 break;
876 /* fallthru, _stop will unlock */
877 default:
878 return NULL;
879 }
880
881 if (ppos != NULL)
882 ++*ppos;
883 return trav;
884 }
885
886 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
887 bool is_target)
888 {
889 struct nf_mttg_trav *trav = seq->private;
890 unsigned int j;
891
892 trav->class = MTTG_TRAV_INIT;
893 for (j = 0; j < *pos; ++j)
894 if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
895 return NULL;
896 return trav;
897 }
898
899 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
900 {
901 struct nf_mttg_trav *trav = seq->private;
902
903 switch (trav->class) {
904 case MTTG_TRAV_NFP_UNSPEC:
905 mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
906 break;
907 case MTTG_TRAV_NFP_SPEC:
908 mutex_unlock(&xt[trav->nfproto].mutex);
909 break;
910 }
911 }
912
913 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
914 {
915 return xt_mttg_seq_start(seq, pos, false);
916 }
917
918 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
919 {
920 return xt_mttg_seq_next(seq, v, ppos, false);
921 }
922
923 static int xt_match_seq_show(struct seq_file *seq, void *v)
924 {
925 const struct nf_mttg_trav *trav = seq->private;
926 const struct xt_match *match;
927
928 switch (trav->class) {
929 case MTTG_TRAV_NFP_UNSPEC:
930 case MTTG_TRAV_NFP_SPEC:
931 if (trav->curr == trav->head)
932 return 0;
933 match = list_entry(trav->curr, struct xt_match, list);
934 return (*match->name == '\0') ? 0 :
935 seq_printf(seq, "%s\n", match->name);
936 }
937 return 0;
938 }
939
940 static const struct seq_operations xt_match_seq_ops = {
941 .start = xt_match_seq_start,
942 .next = xt_match_seq_next,
943 .stop = xt_mttg_seq_stop,
944 .show = xt_match_seq_show,
945 };
946
947 static int xt_match_open(struct inode *inode, struct file *file)
948 {
949 struct seq_file *seq;
950 struct nf_mttg_trav *trav;
951 int ret;
952
953 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
954 if (trav == NULL)
955 return -ENOMEM;
956
957 ret = seq_open(file, &xt_match_seq_ops);
958 if (ret < 0) {
959 kfree(trav);
960 return ret;
961 }
962
963 seq = file->private_data;
964 seq->private = trav;
965 trav->nfproto = (unsigned long)PDE(inode)->data;
966 return 0;
967 }
968
969 static const struct file_operations xt_match_ops = {
970 .owner = THIS_MODULE,
971 .open = xt_match_open,
972 .read = seq_read,
973 .llseek = seq_lseek,
974 .release = seq_release_private,
975 };
976
977 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
978 {
979 return xt_mttg_seq_start(seq, pos, true);
980 }
981
982 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
983 {
984 return xt_mttg_seq_next(seq, v, ppos, true);
985 }
986
987 static int xt_target_seq_show(struct seq_file *seq, void *v)
988 {
989 const struct nf_mttg_trav *trav = seq->private;
990 const struct xt_target *target;
991
992 switch (trav->class) {
993 case MTTG_TRAV_NFP_UNSPEC:
994 case MTTG_TRAV_NFP_SPEC:
995 if (trav->curr == trav->head)
996 return 0;
997 target = list_entry(trav->curr, struct xt_target, list);
998 return (*target->name == '\0') ? 0 :
999 seq_printf(seq, "%s\n", target->name);
1000 }
1001 return 0;
1002 }
1003
1004 static const struct seq_operations xt_target_seq_ops = {
1005 .start = xt_target_seq_start,
1006 .next = xt_target_seq_next,
1007 .stop = xt_mttg_seq_stop,
1008 .show = xt_target_seq_show,
1009 };
1010
1011 static int xt_target_open(struct inode *inode, struct file *file)
1012 {
1013 struct seq_file *seq;
1014 struct nf_mttg_trav *trav;
1015 int ret;
1016
1017 trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1018 if (trav == NULL)
1019 return -ENOMEM;
1020
1021 ret = seq_open(file, &xt_target_seq_ops);
1022 if (ret < 0) {
1023 kfree(trav);
1024 return ret;
1025 }
1026
1027 seq = file->private_data;
1028 seq->private = trav;
1029 trav->nfproto = (unsigned long)PDE(inode)->data;
1030 return 0;
1031 }
1032
1033 static const struct file_operations xt_target_ops = {
1034 .owner = THIS_MODULE,
1035 .open = xt_target_open,
1036 .read = seq_read,
1037 .llseek = seq_lseek,
1038 .release = seq_release_private,
1039 };
1040
1041 #define FORMAT_TABLES "_tables_names"
1042 #define FORMAT_MATCHES "_tables_matches"
1043 #define FORMAT_TARGETS "_tables_targets"
1044
1045 #endif /* CONFIG_PROC_FS */
1046
1047 int xt_proto_init(struct net *net, u_int8_t af)
1048 {
1049 #ifdef CONFIG_PROC_FS
1050 char buf[XT_FUNCTION_MAXNAMELEN];
1051 struct proc_dir_entry *proc;
1052 #endif
1053
1054 if (af >= ARRAY_SIZE(xt_prefix))
1055 return -EINVAL;
1056
1057
1058 #ifdef CONFIG_PROC_FS
1059 strlcpy(buf, xt_prefix[af], sizeof(buf));
1060 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1061 proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1062 (void *)(unsigned long)af);
1063 if (!proc)
1064 goto out;
1065
1066 strlcpy(buf, xt_prefix[af], sizeof(buf));
1067 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1068 proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1069 (void *)(unsigned long)af);
1070 if (!proc)
1071 goto out_remove_tables;
1072
1073 strlcpy(buf, xt_prefix[af], sizeof(buf));
1074 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1075 proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1076 (void *)(unsigned long)af);
1077 if (!proc)
1078 goto out_remove_matches;
1079 #endif
1080
1081 return 0;
1082
1083 #ifdef CONFIG_PROC_FS
1084 out_remove_matches:
1085 strlcpy(buf, xt_prefix[af], sizeof(buf));
1086 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1087 proc_net_remove(net, buf);
1088
1089 out_remove_tables:
1090 strlcpy(buf, xt_prefix[af], sizeof(buf));
1091 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1092 proc_net_remove(net, buf);
1093 out:
1094 return -1;
1095 #endif
1096 }
1097 EXPORT_SYMBOL_GPL(xt_proto_init);
1098
1099 void xt_proto_fini(struct net *net, u_int8_t af)
1100 {
1101 #ifdef CONFIG_PROC_FS
1102 char buf[XT_FUNCTION_MAXNAMELEN];
1103
1104 strlcpy(buf, xt_prefix[af], sizeof(buf));
1105 strlcat(buf, FORMAT_TABLES, sizeof(buf));
1106 proc_net_remove(net, buf);
1107
1108 strlcpy(buf, xt_prefix[af], sizeof(buf));
1109 strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1110 proc_net_remove(net, buf);
1111
1112 strlcpy(buf, xt_prefix[af], sizeof(buf));
1113 strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1114 proc_net_remove(net, buf);
1115 #endif /*CONFIG_PROC_FS*/
1116 }
1117 EXPORT_SYMBOL_GPL(xt_proto_fini);
1118
1119 static int __net_init xt_net_init(struct net *net)
1120 {
1121 int i;
1122
1123 for (i = 0; i < NFPROTO_NUMPROTO; i++)
1124 INIT_LIST_HEAD(&net->xt.tables[i]);
1125 return 0;
1126 }
1127
1128 static struct pernet_operations xt_net_ops = {
1129 .init = xt_net_init,
1130 };
1131
1132 static int __init xt_init(void)
1133 {
1134 int i, rv;
1135
1136 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1137 if (!xt)
1138 return -ENOMEM;
1139
1140 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1141 mutex_init(&xt[i].mutex);
1142 #ifdef CONFIG_COMPAT
1143 mutex_init(&xt[i].compat_mutex);
1144 xt[i].compat_offsets = NULL;
1145 #endif
1146 INIT_LIST_HEAD(&xt[i].target);
1147 INIT_LIST_HEAD(&xt[i].match);
1148 }
1149 rv = register_pernet_subsys(&xt_net_ops);
1150 if (rv < 0)
1151 kfree(xt);
1152 return rv;
1153 }
1154
1155 static void __exit xt_fini(void)
1156 {
1157 unregister_pernet_subsys(&xt_net_ops);
1158 kfree(xt);
1159 }
1160
1161 module_init(xt_init);
1162 module_exit(xt_fini);
1163
This page took 0.097023 seconds and 5 git commands to generate.