5ef0dd439e76b4b858fa9696902fe7d237e71b76
[deliverable/linux.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_conntrack_expect.h>
27 #include <net/netfilter/nf_conntrack_helper.h>
28 #include <net/netfilter/nf_conntrack_tuple.h>
29
30 struct hlist_head *nf_ct_expect_hash __read_mostly;
31 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
32
33 unsigned int nf_ct_expect_hsize __read_mostly;
34 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
35
36 static unsigned int nf_ct_expect_hash_rnd __read_mostly;
37 static unsigned int nf_ct_expect_count;
38 static int nf_ct_expect_hash_rnd_initted __read_mostly;
39 static int nf_ct_expect_vmalloc;
40
41 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
42 static unsigned int nf_ct_expect_next_id;
43
44 /* nf_conntrack_expect helper functions */
45 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
46 {
47 struct nf_conn_help *master_help = nfct_help(exp->master);
48
49 NF_CT_ASSERT(master_help);
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
52 hlist_del(&exp->hnode);
53 nf_ct_expect_count--;
54
55 hlist_del(&exp->lnode);
56 master_help->expecting--;
57 nf_ct_expect_put(exp);
58
59 NF_CT_STAT_INC(expect_delete);
60 }
61 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
62
63 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
64 {
65 struct nf_conntrack_expect *exp = (void *)ul_expect;
66
67 write_lock_bh(&nf_conntrack_lock);
68 nf_ct_unlink_expect(exp);
69 write_unlock_bh(&nf_conntrack_lock);
70 nf_ct_expect_put(exp);
71 }
72
73 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
74 {
75 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
76 get_random_bytes(&nf_ct_expect_hash_rnd, 4);
77 nf_ct_expect_hash_rnd_initted = 1;
78 }
79
80 return jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
81 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
82 tuple->dst.u.all) ^ nf_ct_expect_hash_rnd) %
83 nf_ct_expect_hsize;
84 }
85
86 struct nf_conntrack_expect *
87 __nf_ct_expect_find(const struct nf_conntrack_tuple *tuple)
88 {
89 struct nf_conntrack_expect *i;
90 struct hlist_node *n;
91 unsigned int h;
92
93 if (!nf_ct_expect_count)
94 return NULL;
95
96 h = nf_ct_expect_dst_hash(tuple);
97 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
98 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
99 return i;
100 }
101 return NULL;
102 }
103 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
104
105 /* Just find a expectation corresponding to a tuple. */
106 struct nf_conntrack_expect *
107 nf_ct_expect_find_get(const struct nf_conntrack_tuple *tuple)
108 {
109 struct nf_conntrack_expect *i;
110
111 read_lock_bh(&nf_conntrack_lock);
112 i = __nf_ct_expect_find(tuple);
113 if (i)
114 atomic_inc(&i->use);
115 read_unlock_bh(&nf_conntrack_lock);
116
117 return i;
118 }
119 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
120
121 /* If an expectation for this connection is found, it gets delete from
122 * global list then returned. */
123 struct nf_conntrack_expect *
124 nf_ct_find_expectation(const struct nf_conntrack_tuple *tuple)
125 {
126 struct nf_conntrack_expect *exp;
127
128 exp = __nf_ct_expect_find(tuple);
129 if (!exp)
130 return NULL;
131
132 /* If master is not in hash table yet (ie. packet hasn't left
133 this machine yet), how can other end know about expected?
134 Hence these are not the droids you are looking for (if
135 master ct never got confirmed, we'd hold a reference to it
136 and weird things would happen to future packets). */
137 if (!nf_ct_is_confirmed(exp->master))
138 return NULL;
139
140 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
141 atomic_inc(&exp->use);
142 return exp;
143 } else if (del_timer(&exp->timeout)) {
144 nf_ct_unlink_expect(exp);
145 return exp;
146 }
147
148 return NULL;
149 }
150
151 /* delete all expectations for this conntrack */
152 void nf_ct_remove_expectations(struct nf_conn *ct)
153 {
154 struct nf_conn_help *help = nfct_help(ct);
155 struct nf_conntrack_expect *exp;
156 struct hlist_node *n, *next;
157
158 /* Optimization: most connection never expect any others. */
159 if (!help || help->expecting == 0)
160 return;
161
162 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
163 if (del_timer(&exp->timeout)) {
164 nf_ct_unlink_expect(exp);
165 nf_ct_expect_put(exp);
166 }
167 }
168 }
169 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
170
171 /* Would two expected things clash? */
172 static inline int expect_clash(const struct nf_conntrack_expect *a,
173 const struct nf_conntrack_expect *b)
174 {
175 /* Part covered by intersection of masks must be unequal,
176 otherwise they clash */
177 struct nf_conntrack_tuple_mask intersect_mask;
178 int count;
179
180 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
181
182 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
183 intersect_mask.src.u3.all[count] =
184 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
185 }
186
187 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
188 }
189
190 static inline int expect_matches(const struct nf_conntrack_expect *a,
191 const struct nf_conntrack_expect *b)
192 {
193 return a->master == b->master
194 && nf_ct_tuple_equal(&a->tuple, &b->tuple)
195 && nf_ct_tuple_mask_equal(&a->mask, &b->mask);
196 }
197
198 /* Generally a bad idea to call this: could have matched already. */
199 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
200 {
201 write_lock_bh(&nf_conntrack_lock);
202 if (del_timer(&exp->timeout)) {
203 nf_ct_unlink_expect(exp);
204 nf_ct_expect_put(exp);
205 }
206 write_unlock_bh(&nf_conntrack_lock);
207 }
208 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
209
210 /* We don't increase the master conntrack refcount for non-fulfilled
211 * conntracks. During the conntrack destruction, the expectations are
212 * always killed before the conntrack itself */
213 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
214 {
215 struct nf_conntrack_expect *new;
216
217 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
218 if (!new)
219 return NULL;
220
221 new->master = me;
222 atomic_set(&new->use, 1);
223 return new;
224 }
225 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
226
227 void nf_ct_expect_init(struct nf_conntrack_expect *exp, int family,
228 union nf_conntrack_address *saddr,
229 union nf_conntrack_address *daddr,
230 u_int8_t proto, __be16 *src, __be16 *dst)
231 {
232 int len;
233
234 if (family == AF_INET)
235 len = 4;
236 else
237 len = 16;
238
239 exp->flags = 0;
240 exp->expectfn = NULL;
241 exp->helper = NULL;
242 exp->tuple.src.l3num = family;
243 exp->tuple.dst.protonum = proto;
244
245 if (saddr) {
246 memcpy(&exp->tuple.src.u3, saddr, len);
247 if (sizeof(exp->tuple.src.u3) > len)
248 /* address needs to be cleared for nf_ct_tuple_equal */
249 memset((void *)&exp->tuple.src.u3 + len, 0x00,
250 sizeof(exp->tuple.src.u3) - len);
251 memset(&exp->mask.src.u3, 0xFF, len);
252 if (sizeof(exp->mask.src.u3) > len)
253 memset((void *)&exp->mask.src.u3 + len, 0x00,
254 sizeof(exp->mask.src.u3) - len);
255 } else {
256 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
257 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
258 }
259
260 if (src) {
261 exp->tuple.src.u.all = (__force u16)*src;
262 exp->mask.src.u.all = 0xFFFF;
263 } else {
264 exp->tuple.src.u.all = 0;
265 exp->mask.src.u.all = 0;
266 }
267
268 memcpy(&exp->tuple.dst.u3, daddr, len);
269 if (sizeof(exp->tuple.dst.u3) > len)
270 /* address needs to be cleared for nf_ct_tuple_equal */
271 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
272 sizeof(exp->tuple.dst.u3) - len);
273
274 exp->tuple.dst.u.all = (__force u16)*dst;
275 }
276 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
277
278 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
279 {
280 if (atomic_dec_and_test(&exp->use))
281 kmem_cache_free(nf_ct_expect_cachep, exp);
282 }
283 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
284
285 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
286 {
287 struct nf_conn_help *master_help = nfct_help(exp->master);
288 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
289
290 atomic_inc(&exp->use);
291
292 hlist_add_head(&exp->lnode, &master_help->expectations);
293 master_help->expecting++;
294
295 hlist_add_head(&exp->hnode, &nf_ct_expect_hash[h]);
296 nf_ct_expect_count++;
297
298 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
299 (unsigned long)exp);
300 exp->timeout.expires = jiffies + master_help->helper->timeout * HZ;
301 add_timer(&exp->timeout);
302
303 exp->id = ++nf_ct_expect_next_id;
304 atomic_inc(&exp->use);
305 NF_CT_STAT_INC(expect_create);
306 }
307
308 /* Race with expectations being used means we could have none to find; OK. */
309 static void evict_oldest_expect(struct nf_conn *master)
310 {
311 struct nf_conn_help *master_help = nfct_help(master);
312 struct nf_conntrack_expect *exp = NULL;
313 struct hlist_node *n;
314
315 hlist_for_each_entry(exp, n, &master_help->expectations, lnode)
316 ; /* nothing */
317
318 if (exp && del_timer(&exp->timeout)) {
319 nf_ct_unlink_expect(exp);
320 nf_ct_expect_put(exp);
321 }
322 }
323
324 static inline int refresh_timer(struct nf_conntrack_expect *i)
325 {
326 struct nf_conn_help *master_help = nfct_help(i->master);
327
328 if (!del_timer(&i->timeout))
329 return 0;
330
331 i->timeout.expires = jiffies + master_help->helper->timeout*HZ;
332 add_timer(&i->timeout);
333 return 1;
334 }
335
336 int nf_ct_expect_related(struct nf_conntrack_expect *expect)
337 {
338 struct nf_conntrack_expect *i;
339 struct nf_conn *master = expect->master;
340 struct nf_conn_help *master_help = nfct_help(master);
341 struct hlist_node *n;
342 unsigned int h;
343 int ret;
344
345 NF_CT_ASSERT(master_help);
346
347 write_lock_bh(&nf_conntrack_lock);
348 if (!master_help->helper) {
349 ret = -ESHUTDOWN;
350 goto out;
351 }
352 h = nf_ct_expect_dst_hash(&expect->tuple);
353 hlist_for_each_entry(i, n, &nf_ct_expect_hash[h], hnode) {
354 if (expect_matches(i, expect)) {
355 /* Refresh timer: if it's dying, ignore.. */
356 if (refresh_timer(i)) {
357 ret = 0;
358 goto out;
359 }
360 } else if (expect_clash(i, expect)) {
361 ret = -EBUSY;
362 goto out;
363 }
364 }
365 /* Will be over limit? */
366 if (master_help->helper->max_expected &&
367 master_help->expecting >= master_help->helper->max_expected)
368 evict_oldest_expect(master);
369
370 nf_ct_expect_insert(expect);
371 nf_ct_expect_event(IPEXP_NEW, expect);
372 ret = 0;
373 out:
374 write_unlock_bh(&nf_conntrack_lock);
375 return ret;
376 }
377 EXPORT_SYMBOL_GPL(nf_ct_expect_related);
378
379 #ifdef CONFIG_PROC_FS
380 struct ct_expect_iter_state {
381 unsigned int bucket;
382 };
383
384 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
385 {
386 struct ct_expect_iter_state *st = seq->private;
387
388 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
389 if (!hlist_empty(&nf_ct_expect_hash[st->bucket]))
390 return nf_ct_expect_hash[st->bucket].first;
391 }
392 return NULL;
393 }
394
395 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
396 struct hlist_node *head)
397 {
398 struct ct_expect_iter_state *st = seq->private;
399
400 head = head->next;
401 while (head == NULL) {
402 if (++st->bucket >= nf_ct_expect_hsize)
403 return NULL;
404 head = nf_ct_expect_hash[st->bucket].first;
405 }
406 return head;
407 }
408
409 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
410 {
411 struct hlist_node *head = ct_expect_get_first(seq);
412
413 if (head)
414 while (pos && (head = ct_expect_get_next(seq, head)))
415 pos--;
416 return pos ? NULL : head;
417 }
418
419 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
420 {
421 read_lock_bh(&nf_conntrack_lock);
422 return ct_expect_get_idx(seq, *pos);
423 }
424
425 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
426 {
427 (*pos)++;
428 return ct_expect_get_next(seq, v);
429 }
430
431 static void exp_seq_stop(struct seq_file *seq, void *v)
432 {
433 read_unlock_bh(&nf_conntrack_lock);
434 }
435
436 static int exp_seq_show(struct seq_file *s, void *v)
437 {
438 struct nf_conntrack_expect *expect;
439 struct hlist_node *n = v;
440
441 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
442
443 if (expect->timeout.function)
444 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
445 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
446 else
447 seq_printf(s, "- ");
448 seq_printf(s, "l3proto = %u proto=%u ",
449 expect->tuple.src.l3num,
450 expect->tuple.dst.protonum);
451 print_tuple(s, &expect->tuple,
452 __nf_ct_l3proto_find(expect->tuple.src.l3num),
453 __nf_ct_l4proto_find(expect->tuple.src.l3num,
454 expect->tuple.dst.protonum));
455 return seq_putc(s, '\n');
456 }
457
458 static struct seq_operations exp_seq_ops = {
459 .start = exp_seq_start,
460 .next = exp_seq_next,
461 .stop = exp_seq_stop,
462 .show = exp_seq_show
463 };
464
465 static int exp_open(struct inode *inode, struct file *file)
466 {
467 struct seq_file *seq;
468 struct ct_expect_iter_state *st;
469 int ret;
470
471 st = kmalloc(sizeof(struct ct_expect_iter_state), GFP_KERNEL);
472 if (st == NULL)
473 return -ENOMEM;
474 ret = seq_open(file, &exp_seq_ops);
475 if (ret)
476 goto out_free;
477 seq = file->private_data;
478 seq->private = st;
479 memset(st, 0, sizeof(struct ct_expect_iter_state));
480 return ret;
481 out_free:
482 kfree(st);
483 return ret;
484 }
485
486 static const struct file_operations exp_file_ops = {
487 .owner = THIS_MODULE,
488 .open = exp_open,
489 .read = seq_read,
490 .llseek = seq_lseek,
491 .release = seq_release_private,
492 };
493 #endif /* CONFIG_PROC_FS */
494
495 static int __init exp_proc_init(void)
496 {
497 #ifdef CONFIG_PROC_FS
498 struct proc_dir_entry *proc;
499
500 proc = proc_net_fops_create("nf_conntrack_expect", 0440, &exp_file_ops);
501 if (!proc)
502 return -ENOMEM;
503 #endif /* CONFIG_PROC_FS */
504 return 0;
505 }
506
507 static void exp_proc_remove(void)
508 {
509 #ifdef CONFIG_PROC_FS
510 proc_net_remove("nf_conntrack_expect");
511 #endif /* CONFIG_PROC_FS */
512 }
513
514 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
515
516 int __init nf_conntrack_expect_init(void)
517 {
518 int err = -ENOMEM;
519
520 if (!nf_ct_expect_hsize) {
521 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
522 if (!nf_ct_expect_hsize)
523 nf_ct_expect_hsize = 1;
524 }
525
526 nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
527 &nf_ct_expect_vmalloc);
528 if (nf_ct_expect_hash == NULL)
529 goto err1;
530
531 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
532 sizeof(struct nf_conntrack_expect),
533 0, 0, NULL, NULL);
534 if (!nf_ct_expect_cachep)
535 goto err2;
536
537 err = exp_proc_init();
538 if (err < 0)
539 goto err3;
540
541 return 0;
542
543 err3:
544 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
545 nf_ct_expect_hsize);
546 err2:
547 kmem_cache_destroy(nf_ct_expect_cachep);
548 err1:
549 return err;
550 }
551
552 void nf_conntrack_expect_fini(void)
553 {
554 exp_proc_remove();
555 kmem_cache_destroy(nf_ct_expect_cachep);
556 nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_vmalloc,
557 nf_ct_expect_hsize);
558 }
This page took 0.046066 seconds and 5 git commands to generate.