[NETFILTER]: conntrack: add fixed timeout flag in connection tracking
[deliverable/linux.git] / net / ipv4 / netfilter / ip_conntrack_core.c
1 /* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
13 * - new API and handling of conntrack/nat helpers
14 * - now capable of multiple expectations for one master
15 * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
16 * - add usage/reference counts to ip_conntrack_expect
17 * - export ip_conntrack[_expect]_{find_get,put} functions
18 * */
19
20 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/icmp.h>
23 #include <linux/ip.h>
24 #include <linux/netfilter.h>
25 #include <linux/netfilter_ipv4.h>
26 #include <linux/module.h>
27 #include <linux/skbuff.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <net/checksum.h>
31 #include <net/ip.h>
32 #include <linux/stddef.h>
33 #include <linux/sysctl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/jhash.h>
37 #include <linux/err.h>
38 #include <linux/percpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/notifier.h>
41
42 /* ip_conntrack_lock protects the main hash table, protocol/helper/expected
43 registrations, conntrack timers*/
44 #define ASSERT_READ_LOCK(x)
45 #define ASSERT_WRITE_LOCK(x)
46
47 #include <linux/netfilter_ipv4/ip_conntrack.h>
48 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
49 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
50 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
51 #include <linux/netfilter_ipv4/listhelp.h>
52
53 #define IP_CONNTRACK_VERSION "2.4"
54
55 #if 0
56 #define DEBUGP printk
57 #else
58 #define DEBUGP(format, args...)
59 #endif
60
61 DEFINE_RWLOCK(ip_conntrack_lock);
62
63 /* ip_conntrack_standalone needs this */
64 atomic_t ip_conntrack_count = ATOMIC_INIT(0);
65
66 void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
67 LIST_HEAD(ip_conntrack_expect_list);
68 struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO];
69 static LIST_HEAD(helpers);
70 unsigned int ip_conntrack_htable_size = 0;
71 int ip_conntrack_max;
72 struct list_head *ip_conntrack_hash;
73 static kmem_cache_t *ip_conntrack_cachep __read_mostly;
74 static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
75 struct ip_conntrack ip_conntrack_untracked;
76 unsigned int ip_ct_log_invalid;
77 static LIST_HEAD(unconfirmed);
78 static int ip_conntrack_vmalloc;
79
80 static unsigned int ip_conntrack_next_id;
81 static unsigned int ip_conntrack_expect_next_id;
82 #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
83 ATOMIC_NOTIFIER_HEAD(ip_conntrack_chain);
84 ATOMIC_NOTIFIER_HEAD(ip_conntrack_expect_chain);
85
86 DEFINE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache);
87
88 /* deliver cached events and clear cache entry - must be called with locally
89 * disabled softirqs */
90 static inline void
91 __ip_ct_deliver_cached_events(struct ip_conntrack_ecache *ecache)
92 {
93 DEBUGP("ecache: delivering events for %p\n", ecache->ct);
94 if (is_confirmed(ecache->ct) && !is_dying(ecache->ct) && ecache->events)
95 atomic_notifier_call_chain(&ip_conntrack_chain, ecache->events,
96 ecache->ct);
97 ecache->events = 0;
98 ip_conntrack_put(ecache->ct);
99 ecache->ct = NULL;
100 }
101
102 /* Deliver all cached events for a particular conntrack. This is called
103 * by code prior to async packet handling or freeing the skb */
104 void ip_ct_deliver_cached_events(const struct ip_conntrack *ct)
105 {
106 struct ip_conntrack_ecache *ecache;
107
108 local_bh_disable();
109 ecache = &__get_cpu_var(ip_conntrack_ecache);
110 if (ecache->ct == ct)
111 __ip_ct_deliver_cached_events(ecache);
112 local_bh_enable();
113 }
114
115 void __ip_ct_event_cache_init(struct ip_conntrack *ct)
116 {
117 struct ip_conntrack_ecache *ecache;
118
119 /* take care of delivering potentially old events */
120 ecache = &__get_cpu_var(ip_conntrack_ecache);
121 BUG_ON(ecache->ct == ct);
122 if (ecache->ct)
123 __ip_ct_deliver_cached_events(ecache);
124 /* initialize for this conntrack/packet */
125 ecache->ct = ct;
126 nf_conntrack_get(&ct->ct_general);
127 }
128
129 /* flush the event cache - touches other CPU's data and must not be called while
130 * packets are still passing through the code */
131 static void ip_ct_event_cache_flush(void)
132 {
133 struct ip_conntrack_ecache *ecache;
134 int cpu;
135
136 for_each_possible_cpu(cpu) {
137 ecache = &per_cpu(ip_conntrack_ecache, cpu);
138 if (ecache->ct)
139 ip_conntrack_put(ecache->ct);
140 }
141 }
142 #else
143 static inline void ip_ct_event_cache_flush(void) {}
144 #endif /* CONFIG_IP_NF_CONNTRACK_EVENTS */
145
146 DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
147
148 static int ip_conntrack_hash_rnd_initted;
149 static unsigned int ip_conntrack_hash_rnd;
150
151 static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
152 unsigned int size, unsigned int rnd)
153 {
154 return (jhash_3words(tuple->src.ip,
155 (tuple->dst.ip ^ tuple->dst.protonum),
156 (tuple->src.u.all | (tuple->dst.u.all << 16)),
157 rnd) % size);
158 }
159
160 static u_int32_t
161 hash_conntrack(const struct ip_conntrack_tuple *tuple)
162 {
163 return __hash_conntrack(tuple, ip_conntrack_htable_size,
164 ip_conntrack_hash_rnd);
165 }
166
167 int
168 ip_ct_get_tuple(const struct iphdr *iph,
169 const struct sk_buff *skb,
170 unsigned int dataoff,
171 struct ip_conntrack_tuple *tuple,
172 const struct ip_conntrack_protocol *protocol)
173 {
174 /* Never happen */
175 if (iph->frag_off & htons(IP_OFFSET)) {
176 printk("ip_conntrack_core: Frag of proto %u.\n",
177 iph->protocol);
178 return 0;
179 }
180
181 tuple->src.ip = iph->saddr;
182 tuple->dst.ip = iph->daddr;
183 tuple->dst.protonum = iph->protocol;
184 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
185
186 return protocol->pkt_to_tuple(skb, dataoff, tuple);
187 }
188
189 int
190 ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse,
191 const struct ip_conntrack_tuple *orig,
192 const struct ip_conntrack_protocol *protocol)
193 {
194 inverse->src.ip = orig->dst.ip;
195 inverse->dst.ip = orig->src.ip;
196 inverse->dst.protonum = orig->dst.protonum;
197 inverse->dst.dir = !orig->dst.dir;
198
199 return protocol->invert_tuple(inverse, orig);
200 }
201
202
203 /* ip_conntrack_expect helper functions */
204 void ip_ct_unlink_expect(struct ip_conntrack_expect *exp)
205 {
206 ASSERT_WRITE_LOCK(&ip_conntrack_lock);
207 IP_NF_ASSERT(!timer_pending(&exp->timeout));
208 list_del(&exp->list);
209 CONNTRACK_STAT_INC(expect_delete);
210 exp->master->expecting--;
211 ip_conntrack_expect_put(exp);
212 }
213
214 static void expectation_timed_out(unsigned long ul_expect)
215 {
216 struct ip_conntrack_expect *exp = (void *)ul_expect;
217
218 write_lock_bh(&ip_conntrack_lock);
219 ip_ct_unlink_expect(exp);
220 write_unlock_bh(&ip_conntrack_lock);
221 ip_conntrack_expect_put(exp);
222 }
223
224 struct ip_conntrack_expect *
225 __ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
226 {
227 struct ip_conntrack_expect *i;
228
229 list_for_each_entry(i, &ip_conntrack_expect_list, list) {
230 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
231 atomic_inc(&i->use);
232 return i;
233 }
234 }
235 return NULL;
236 }
237
238 /* Just find a expectation corresponding to a tuple. */
239 struct ip_conntrack_expect *
240 ip_conntrack_expect_find(const struct ip_conntrack_tuple *tuple)
241 {
242 struct ip_conntrack_expect *i;
243
244 read_lock_bh(&ip_conntrack_lock);
245 i = __ip_conntrack_expect_find(tuple);
246 read_unlock_bh(&ip_conntrack_lock);
247
248 return i;
249 }
250
251 /* If an expectation for this connection is found, it gets delete from
252 * global list then returned. */
253 static struct ip_conntrack_expect *
254 find_expectation(const struct ip_conntrack_tuple *tuple)
255 {
256 struct ip_conntrack_expect *i;
257
258 list_for_each_entry(i, &ip_conntrack_expect_list, list) {
259 /* If master is not in hash table yet (ie. packet hasn't left
260 this machine yet), how can other end know about expected?
261 Hence these are not the droids you are looking for (if
262 master ct never got confirmed, we'd hold a reference to it
263 and weird things would happen to future packets). */
264 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
265 && is_confirmed(i->master)) {
266 if (i->flags & IP_CT_EXPECT_PERMANENT) {
267 atomic_inc(&i->use);
268 return i;
269 } else if (del_timer(&i->timeout)) {
270 ip_ct_unlink_expect(i);
271 return i;
272 }
273 }
274 }
275 return NULL;
276 }
277
278 /* delete all expectations for this conntrack */
279 void ip_ct_remove_expectations(struct ip_conntrack *ct)
280 {
281 struct ip_conntrack_expect *i, *tmp;
282
283 /* Optimization: most connection never expect any others. */
284 if (ct->expecting == 0)
285 return;
286
287 list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) {
288 if (i->master == ct && del_timer(&i->timeout)) {
289 ip_ct_unlink_expect(i);
290 ip_conntrack_expect_put(i);
291 }
292 }
293 }
294
295 static void
296 clean_from_lists(struct ip_conntrack *ct)
297 {
298 unsigned int ho, hr;
299
300 DEBUGP("clean_from_lists(%p)\n", ct);
301 ASSERT_WRITE_LOCK(&ip_conntrack_lock);
302
303 ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
304 hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
305 LIST_DELETE(&ip_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
306 LIST_DELETE(&ip_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
307
308 /* Destroy all pending expectations */
309 ip_ct_remove_expectations(ct);
310 }
311
312 static void
313 destroy_conntrack(struct nf_conntrack *nfct)
314 {
315 struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
316 struct ip_conntrack_protocol *proto;
317
318 DEBUGP("destroy_conntrack(%p)\n", ct);
319 IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
320 IP_NF_ASSERT(!timer_pending(&ct->timeout));
321
322 ip_conntrack_event(IPCT_DESTROY, ct);
323 set_bit(IPS_DYING_BIT, &ct->status);
324
325 /* To make sure we don't get any weird locking issues here:
326 * destroy_conntrack() MUST NOT be called with a write lock
327 * to ip_conntrack_lock!!! -HW */
328 proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
329 if (proto && proto->destroy)
330 proto->destroy(ct);
331
332 if (ip_conntrack_destroyed)
333 ip_conntrack_destroyed(ct);
334
335 write_lock_bh(&ip_conntrack_lock);
336 /* Expectations will have been removed in clean_from_lists,
337 * except TFTP can create an expectation on the first packet,
338 * before connection is in the list, so we need to clean here,
339 * too. */
340 ip_ct_remove_expectations(ct);
341
342 /* We overload first tuple to link into unconfirmed list. */
343 if (!is_confirmed(ct)) {
344 BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
345 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
346 }
347
348 CONNTRACK_STAT_INC(delete);
349 write_unlock_bh(&ip_conntrack_lock);
350
351 if (ct->master)
352 ip_conntrack_put(ct->master);
353
354 DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
355 ip_conntrack_free(ct);
356 }
357
358 static void death_by_timeout(unsigned long ul_conntrack)
359 {
360 struct ip_conntrack *ct = (void *)ul_conntrack;
361
362 write_lock_bh(&ip_conntrack_lock);
363 /* Inside lock so preempt is disabled on module removal path.
364 * Otherwise we can get spurious warnings. */
365 CONNTRACK_STAT_INC(delete_list);
366 clean_from_lists(ct);
367 write_unlock_bh(&ip_conntrack_lock);
368 ip_conntrack_put(ct);
369 }
370
371 static inline int
372 conntrack_tuple_cmp(const struct ip_conntrack_tuple_hash *i,
373 const struct ip_conntrack_tuple *tuple,
374 const struct ip_conntrack *ignored_conntrack)
375 {
376 ASSERT_READ_LOCK(&ip_conntrack_lock);
377 return tuplehash_to_ctrack(i) != ignored_conntrack
378 && ip_ct_tuple_equal(tuple, &i->tuple);
379 }
380
381 struct ip_conntrack_tuple_hash *
382 __ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
383 const struct ip_conntrack *ignored_conntrack)
384 {
385 struct ip_conntrack_tuple_hash *h;
386 unsigned int hash = hash_conntrack(tuple);
387
388 ASSERT_READ_LOCK(&ip_conntrack_lock);
389 list_for_each_entry(h, &ip_conntrack_hash[hash], list) {
390 if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
391 CONNTRACK_STAT_INC(found);
392 return h;
393 }
394 CONNTRACK_STAT_INC(searched);
395 }
396
397 return NULL;
398 }
399
400 /* Find a connection corresponding to a tuple. */
401 struct ip_conntrack_tuple_hash *
402 ip_conntrack_find_get(const struct ip_conntrack_tuple *tuple,
403 const struct ip_conntrack *ignored_conntrack)
404 {
405 struct ip_conntrack_tuple_hash *h;
406
407 read_lock_bh(&ip_conntrack_lock);
408 h = __ip_conntrack_find(tuple, ignored_conntrack);
409 if (h)
410 atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use);
411 read_unlock_bh(&ip_conntrack_lock);
412
413 return h;
414 }
415
416 static void __ip_conntrack_hash_insert(struct ip_conntrack *ct,
417 unsigned int hash,
418 unsigned int repl_hash)
419 {
420 ct->id = ++ip_conntrack_next_id;
421 list_prepend(&ip_conntrack_hash[hash],
422 &ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
423 list_prepend(&ip_conntrack_hash[repl_hash],
424 &ct->tuplehash[IP_CT_DIR_REPLY].list);
425 }
426
427 void ip_conntrack_hash_insert(struct ip_conntrack *ct)
428 {
429 unsigned int hash, repl_hash;
430
431 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
432 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
433
434 write_lock_bh(&ip_conntrack_lock);
435 __ip_conntrack_hash_insert(ct, hash, repl_hash);
436 write_unlock_bh(&ip_conntrack_lock);
437 }
438
439 /* Confirm a connection given skb; places it in hash table */
440 int
441 __ip_conntrack_confirm(struct sk_buff **pskb)
442 {
443 unsigned int hash, repl_hash;
444 struct ip_conntrack *ct;
445 enum ip_conntrack_info ctinfo;
446
447 ct = ip_conntrack_get(*pskb, &ctinfo);
448
449 /* ipt_REJECT uses ip_conntrack_attach to attach related
450 ICMP/TCP RST packets in other direction. Actual packet
451 which created connection will be IP_CT_NEW or for an
452 expected connection, IP_CT_RELATED. */
453 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
454 return NF_ACCEPT;
455
456 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
457 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
458
459 /* We're not in hash table, and we refuse to set up related
460 connections for unconfirmed conns. But packet copies and
461 REJECT will give spurious warnings here. */
462 /* IP_NF_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
463
464 /* No external references means noone else could have
465 confirmed us. */
466 IP_NF_ASSERT(!is_confirmed(ct));
467 DEBUGP("Confirming conntrack %p\n", ct);
468
469 write_lock_bh(&ip_conntrack_lock);
470
471 /* See if there's one in the list already, including reverse:
472 NAT could have grabbed it without realizing, since we're
473 not in the hash. If there is, we lost race. */
474 if (!LIST_FIND(&ip_conntrack_hash[hash],
475 conntrack_tuple_cmp,
476 struct ip_conntrack_tuple_hash *,
477 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
478 && !LIST_FIND(&ip_conntrack_hash[repl_hash],
479 conntrack_tuple_cmp,
480 struct ip_conntrack_tuple_hash *,
481 &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
482 /* Remove from unconfirmed list */
483 list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
484
485 __ip_conntrack_hash_insert(ct, hash, repl_hash);
486 /* Timer relative to confirmation time, not original
487 setting time, otherwise we'd get timer wrap in
488 weird delay cases. */
489 ct->timeout.expires += jiffies;
490 add_timer(&ct->timeout);
491 atomic_inc(&ct->ct_general.use);
492 set_bit(IPS_CONFIRMED_BIT, &ct->status);
493 CONNTRACK_STAT_INC(insert);
494 write_unlock_bh(&ip_conntrack_lock);
495 if (ct->helper)
496 ip_conntrack_event_cache(IPCT_HELPER, *pskb);
497 #ifdef CONFIG_IP_NF_NAT_NEEDED
498 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
499 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
500 ip_conntrack_event_cache(IPCT_NATINFO, *pskb);
501 #endif
502 ip_conntrack_event_cache(master_ct(ct) ?
503 IPCT_RELATED : IPCT_NEW, *pskb);
504
505 return NF_ACCEPT;
506 }
507
508 CONNTRACK_STAT_INC(insert_failed);
509 write_unlock_bh(&ip_conntrack_lock);
510
511 return NF_DROP;
512 }
513
514 /* Returns true if a connection correspondings to the tuple (required
515 for NAT). */
516 int
517 ip_conntrack_tuple_taken(const struct ip_conntrack_tuple *tuple,
518 const struct ip_conntrack *ignored_conntrack)
519 {
520 struct ip_conntrack_tuple_hash *h;
521
522 read_lock_bh(&ip_conntrack_lock);
523 h = __ip_conntrack_find(tuple, ignored_conntrack);
524 read_unlock_bh(&ip_conntrack_lock);
525
526 return h != NULL;
527 }
528
529 /* There's a small race here where we may free a just-assured
530 connection. Too bad: we're in trouble anyway. */
531 static inline int unreplied(const struct ip_conntrack_tuple_hash *i)
532 {
533 return !(test_bit(IPS_ASSURED_BIT, &tuplehash_to_ctrack(i)->status));
534 }
535
536 static int early_drop(struct list_head *chain)
537 {
538 /* Traverse backwards: gives us oldest, which is roughly LRU */
539 struct ip_conntrack_tuple_hash *h;
540 struct ip_conntrack *ct = NULL;
541 int dropped = 0;
542
543 read_lock_bh(&ip_conntrack_lock);
544 h = LIST_FIND_B(chain, unreplied, struct ip_conntrack_tuple_hash *);
545 if (h) {
546 ct = tuplehash_to_ctrack(h);
547 atomic_inc(&ct->ct_general.use);
548 }
549 read_unlock_bh(&ip_conntrack_lock);
550
551 if (!ct)
552 return dropped;
553
554 if (del_timer(&ct->timeout)) {
555 death_by_timeout((unsigned long)ct);
556 dropped = 1;
557 CONNTRACK_STAT_INC(early_drop);
558 }
559 ip_conntrack_put(ct);
560 return dropped;
561 }
562
563 static inline int helper_cmp(const struct ip_conntrack_helper *i,
564 const struct ip_conntrack_tuple *rtuple)
565 {
566 return ip_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
567 }
568
569 static struct ip_conntrack_helper *
570 __ip_conntrack_helper_find( const struct ip_conntrack_tuple *tuple)
571 {
572 return LIST_FIND(&helpers, helper_cmp,
573 struct ip_conntrack_helper *,
574 tuple);
575 }
576
577 struct ip_conntrack_helper *
578 ip_conntrack_helper_find_get( const struct ip_conntrack_tuple *tuple)
579 {
580 struct ip_conntrack_helper *helper;
581
582 /* need ip_conntrack_lock to assure that helper exists until
583 * try_module_get() is called */
584 read_lock_bh(&ip_conntrack_lock);
585
586 helper = __ip_conntrack_helper_find(tuple);
587 if (helper) {
588 /* need to increase module usage count to assure helper will
589 * not go away while the caller is e.g. busy putting a
590 * conntrack in the hash that uses the helper */
591 if (!try_module_get(helper->me))
592 helper = NULL;
593 }
594
595 read_unlock_bh(&ip_conntrack_lock);
596
597 return helper;
598 }
599
600 void ip_conntrack_helper_put(struct ip_conntrack_helper *helper)
601 {
602 module_put(helper->me);
603 }
604
605 struct ip_conntrack_protocol *
606 __ip_conntrack_proto_find(u_int8_t protocol)
607 {
608 return ip_ct_protos[protocol];
609 }
610
611 /* this is guaranteed to always return a valid protocol helper, since
612 * it falls back to generic_protocol */
613 struct ip_conntrack_protocol *
614 ip_conntrack_proto_find_get(u_int8_t protocol)
615 {
616 struct ip_conntrack_protocol *p;
617
618 preempt_disable();
619 p = __ip_conntrack_proto_find(protocol);
620 if (p) {
621 if (!try_module_get(p->me))
622 p = &ip_conntrack_generic_protocol;
623 }
624 preempt_enable();
625
626 return p;
627 }
628
629 void ip_conntrack_proto_put(struct ip_conntrack_protocol *p)
630 {
631 module_put(p->me);
632 }
633
634 struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
635 struct ip_conntrack_tuple *repl)
636 {
637 struct ip_conntrack *conntrack;
638
639 if (!ip_conntrack_hash_rnd_initted) {
640 get_random_bytes(&ip_conntrack_hash_rnd, 4);
641 ip_conntrack_hash_rnd_initted = 1;
642 }
643
644 if (ip_conntrack_max
645 && atomic_read(&ip_conntrack_count) >= ip_conntrack_max) {
646 unsigned int hash = hash_conntrack(orig);
647 /* Try dropping from this hash chain. */
648 if (!early_drop(&ip_conntrack_hash[hash])) {
649 if (net_ratelimit())
650 printk(KERN_WARNING
651 "ip_conntrack: table full, dropping"
652 " packet.\n");
653 return ERR_PTR(-ENOMEM);
654 }
655 }
656
657 conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
658 if (!conntrack) {
659 DEBUGP("Can't allocate conntrack.\n");
660 return ERR_PTR(-ENOMEM);
661 }
662
663 memset(conntrack, 0, sizeof(*conntrack));
664 atomic_set(&conntrack->ct_general.use, 1);
665 conntrack->ct_general.destroy = destroy_conntrack;
666 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
667 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
668 /* Don't set timer yet: wait for confirmation */
669 init_timer(&conntrack->timeout);
670 conntrack->timeout.data = (unsigned long)conntrack;
671 conntrack->timeout.function = death_by_timeout;
672
673 atomic_inc(&ip_conntrack_count);
674
675 return conntrack;
676 }
677
678 void
679 ip_conntrack_free(struct ip_conntrack *conntrack)
680 {
681 atomic_dec(&ip_conntrack_count);
682 kmem_cache_free(ip_conntrack_cachep, conntrack);
683 }
684
685 /* Allocate a new conntrack: we return -ENOMEM if classification
686 * failed due to stress. Otherwise it really is unclassifiable */
687 static struct ip_conntrack_tuple_hash *
688 init_conntrack(struct ip_conntrack_tuple *tuple,
689 struct ip_conntrack_protocol *protocol,
690 struct sk_buff *skb)
691 {
692 struct ip_conntrack *conntrack;
693 struct ip_conntrack_tuple repl_tuple;
694 struct ip_conntrack_expect *exp;
695
696 if (!ip_ct_invert_tuple(&repl_tuple, tuple, protocol)) {
697 DEBUGP("Can't invert tuple.\n");
698 return NULL;
699 }
700
701 conntrack = ip_conntrack_alloc(tuple, &repl_tuple);
702 if (conntrack == NULL || IS_ERR(conntrack))
703 return (struct ip_conntrack_tuple_hash *)conntrack;
704
705 if (!protocol->new(conntrack, skb)) {
706 ip_conntrack_free(conntrack);
707 return NULL;
708 }
709
710 write_lock_bh(&ip_conntrack_lock);
711 exp = find_expectation(tuple);
712
713 if (exp) {
714 DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
715 conntrack, exp);
716 /* Welcome, Mr. Bond. We've been expecting you... */
717 __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
718 conntrack->master = exp->master;
719 #ifdef CONFIG_IP_NF_CONNTRACK_MARK
720 conntrack->mark = exp->master->mark;
721 #endif
722 #if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
723 defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
724 /* this is ugly, but there is no other place where to put it */
725 conntrack->nat.masq_index = exp->master->nat.masq_index;
726 #endif
727 nf_conntrack_get(&conntrack->master->ct_general);
728 CONNTRACK_STAT_INC(expect_new);
729 } else {
730 conntrack->helper = __ip_conntrack_helper_find(&repl_tuple);
731
732 CONNTRACK_STAT_INC(new);
733 }
734
735 /* Overload tuple linked list to put us in unconfirmed list. */
736 list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
737
738 write_unlock_bh(&ip_conntrack_lock);
739
740 if (exp) {
741 if (exp->expectfn)
742 exp->expectfn(conntrack, exp);
743 ip_conntrack_expect_put(exp);
744 }
745
746 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
747 }
748
749 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
750 static inline struct ip_conntrack *
751 resolve_normal_ct(struct sk_buff *skb,
752 struct ip_conntrack_protocol *proto,
753 int *set_reply,
754 unsigned int hooknum,
755 enum ip_conntrack_info *ctinfo)
756 {
757 struct ip_conntrack_tuple tuple;
758 struct ip_conntrack_tuple_hash *h;
759 struct ip_conntrack *ct;
760
761 IP_NF_ASSERT((skb->nh.iph->frag_off & htons(IP_OFFSET)) == 0);
762
763 if (!ip_ct_get_tuple(skb->nh.iph, skb, skb->nh.iph->ihl*4,
764 &tuple,proto))
765 return NULL;
766
767 /* look for tuple match */
768 h = ip_conntrack_find_get(&tuple, NULL);
769 if (!h) {
770 h = init_conntrack(&tuple, proto, skb);
771 if (!h)
772 return NULL;
773 if (IS_ERR(h))
774 return (void *)h;
775 }
776 ct = tuplehash_to_ctrack(h);
777
778 /* It exists; we have (non-exclusive) reference. */
779 if (DIRECTION(h) == IP_CT_DIR_REPLY) {
780 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
781 /* Please set reply bit if this packet OK */
782 *set_reply = 1;
783 } else {
784 /* Once we've had two way comms, always ESTABLISHED. */
785 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
786 DEBUGP("ip_conntrack_in: normal packet for %p\n",
787 ct);
788 *ctinfo = IP_CT_ESTABLISHED;
789 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
790 DEBUGP("ip_conntrack_in: related packet for %p\n",
791 ct);
792 *ctinfo = IP_CT_RELATED;
793 } else {
794 DEBUGP("ip_conntrack_in: new packet for %p\n",
795 ct);
796 *ctinfo = IP_CT_NEW;
797 }
798 *set_reply = 0;
799 }
800 skb->nfct = &ct->ct_general;
801 skb->nfctinfo = *ctinfo;
802 return ct;
803 }
804
805 /* Netfilter hook itself. */
806 unsigned int ip_conntrack_in(unsigned int hooknum,
807 struct sk_buff **pskb,
808 const struct net_device *in,
809 const struct net_device *out,
810 int (*okfn)(struct sk_buff *))
811 {
812 struct ip_conntrack *ct;
813 enum ip_conntrack_info ctinfo;
814 struct ip_conntrack_protocol *proto;
815 int set_reply = 0;
816 int ret;
817
818 /* Previously seen (loopback or untracked)? Ignore. */
819 if ((*pskb)->nfct) {
820 CONNTRACK_STAT_INC(ignore);
821 return NF_ACCEPT;
822 }
823
824 /* Never happen */
825 if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
826 if (net_ratelimit()) {
827 printk(KERN_ERR "ip_conntrack_in: Frag of proto %u (hook=%u)\n",
828 (*pskb)->nh.iph->protocol, hooknum);
829 }
830 return NF_DROP;
831 }
832
833 /* Doesn't cover locally-generated broadcast, so not worth it. */
834 #if 0
835 /* Ignore broadcast: no `connection'. */
836 if ((*pskb)->pkt_type == PACKET_BROADCAST) {
837 printk("Broadcast packet!\n");
838 return NF_ACCEPT;
839 } else if (((*pskb)->nh.iph->daddr & htonl(0x000000FF))
840 == htonl(0x000000FF)) {
841 printk("Should bcast: %u.%u.%u.%u->%u.%u.%u.%u (sk=%p, ptype=%u)\n",
842 NIPQUAD((*pskb)->nh.iph->saddr),
843 NIPQUAD((*pskb)->nh.iph->daddr),
844 (*pskb)->sk, (*pskb)->pkt_type);
845 }
846 #endif
847
848 proto = __ip_conntrack_proto_find((*pskb)->nh.iph->protocol);
849
850 /* It may be an special packet, error, unclean...
851 * inverse of the return code tells to the netfilter
852 * core what to do with the packet. */
853 if (proto->error != NULL
854 && (ret = proto->error(*pskb, &ctinfo, hooknum)) <= 0) {
855 CONNTRACK_STAT_INC(error);
856 CONNTRACK_STAT_INC(invalid);
857 return -ret;
858 }
859
860 if (!(ct = resolve_normal_ct(*pskb, proto,&set_reply,hooknum,&ctinfo))) {
861 /* Not valid part of a connection */
862 CONNTRACK_STAT_INC(invalid);
863 return NF_ACCEPT;
864 }
865
866 if (IS_ERR(ct)) {
867 /* Too stressed to deal. */
868 CONNTRACK_STAT_INC(drop);
869 return NF_DROP;
870 }
871
872 IP_NF_ASSERT((*pskb)->nfct);
873
874 ret = proto->packet(ct, *pskb, ctinfo);
875 if (ret < 0) {
876 /* Invalid: inverse of the return code tells
877 * the netfilter core what to do*/
878 nf_conntrack_put((*pskb)->nfct);
879 (*pskb)->nfct = NULL;
880 CONNTRACK_STAT_INC(invalid);
881 return -ret;
882 }
883
884 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
885 ip_conntrack_event_cache(IPCT_STATUS, *pskb);
886
887 return ret;
888 }
889
890 int invert_tuplepr(struct ip_conntrack_tuple *inverse,
891 const struct ip_conntrack_tuple *orig)
892 {
893 return ip_ct_invert_tuple(inverse, orig,
894 __ip_conntrack_proto_find(orig->dst.protonum));
895 }
896
897 /* Would two expected things clash? */
898 static inline int expect_clash(const struct ip_conntrack_expect *a,
899 const struct ip_conntrack_expect *b)
900 {
901 /* Part covered by intersection of masks must be unequal,
902 otherwise they clash */
903 struct ip_conntrack_tuple intersect_mask
904 = { { a->mask.src.ip & b->mask.src.ip,
905 { a->mask.src.u.all & b->mask.src.u.all } },
906 { a->mask.dst.ip & b->mask.dst.ip,
907 { a->mask.dst.u.all & b->mask.dst.u.all },
908 a->mask.dst.protonum & b->mask.dst.protonum } };
909
910 return ip_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
911 }
912
913 static inline int expect_matches(const struct ip_conntrack_expect *a,
914 const struct ip_conntrack_expect *b)
915 {
916 return a->master == b->master
917 && ip_ct_tuple_equal(&a->tuple, &b->tuple)
918 && ip_ct_tuple_equal(&a->mask, &b->mask);
919 }
920
921 /* Generally a bad idea to call this: could have matched already. */
922 void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
923 {
924 struct ip_conntrack_expect *i;
925
926 write_lock_bh(&ip_conntrack_lock);
927 /* choose the the oldest expectation to evict */
928 list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
929 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
930 ip_ct_unlink_expect(i);
931 write_unlock_bh(&ip_conntrack_lock);
932 ip_conntrack_expect_put(i);
933 return;
934 }
935 }
936 write_unlock_bh(&ip_conntrack_lock);
937 }
938
939 /* We don't increase the master conntrack refcount for non-fulfilled
940 * conntracks. During the conntrack destruction, the expectations are
941 * always killed before the conntrack itself */
942 struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
943 {
944 struct ip_conntrack_expect *new;
945
946 new = kmem_cache_alloc(ip_conntrack_expect_cachep, GFP_ATOMIC);
947 if (!new) {
948 DEBUGP("expect_related: OOM allocating expect\n");
949 return NULL;
950 }
951 new->master = me;
952 atomic_set(&new->use, 1);
953 return new;
954 }
955
956 void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
957 {
958 if (atomic_dec_and_test(&exp->use))
959 kmem_cache_free(ip_conntrack_expect_cachep, exp);
960 }
961
962 static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp)
963 {
964 atomic_inc(&exp->use);
965 exp->master->expecting++;
966 list_add(&exp->list, &ip_conntrack_expect_list);
967
968 init_timer(&exp->timeout);
969 exp->timeout.data = (unsigned long)exp;
970 exp->timeout.function = expectation_timed_out;
971 exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;
972 add_timer(&exp->timeout);
973
974 exp->id = ++ip_conntrack_expect_next_id;
975 atomic_inc(&exp->use);
976 CONNTRACK_STAT_INC(expect_create);
977 }
978
979 /* Race with expectations being used means we could have none to find; OK. */
980 static void evict_oldest_expect(struct ip_conntrack *master)
981 {
982 struct ip_conntrack_expect *i;
983
984 list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
985 if (i->master == master) {
986 if (del_timer(&i->timeout)) {
987 ip_ct_unlink_expect(i);
988 ip_conntrack_expect_put(i);
989 }
990 break;
991 }
992 }
993 }
994
995 static inline int refresh_timer(struct ip_conntrack_expect *i)
996 {
997 if (!del_timer(&i->timeout))
998 return 0;
999
1000 i->timeout.expires = jiffies + i->master->helper->timeout*HZ;
1001 add_timer(&i->timeout);
1002 return 1;
1003 }
1004
1005 int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
1006 {
1007 struct ip_conntrack_expect *i;
1008 int ret;
1009
1010 DEBUGP("ip_conntrack_expect_related %p\n", related_to);
1011 DEBUGP("tuple: "); DUMP_TUPLE(&expect->tuple);
1012 DEBUGP("mask: "); DUMP_TUPLE(&expect->mask);
1013
1014 write_lock_bh(&ip_conntrack_lock);
1015 list_for_each_entry(i, &ip_conntrack_expect_list, list) {
1016 if (expect_matches(i, expect)) {
1017 /* Refresh timer: if it's dying, ignore.. */
1018 if (refresh_timer(i)) {
1019 ret = 0;
1020 goto out;
1021 }
1022 } else if (expect_clash(i, expect)) {
1023 ret = -EBUSY;
1024 goto out;
1025 }
1026 }
1027
1028 /* Will be over limit? */
1029 if (expect->master->helper->max_expected &&
1030 expect->master->expecting >= expect->master->helper->max_expected)
1031 evict_oldest_expect(expect->master);
1032
1033 ip_conntrack_expect_insert(expect);
1034 ip_conntrack_expect_event(IPEXP_NEW, expect);
1035 ret = 0;
1036 out:
1037 write_unlock_bh(&ip_conntrack_lock);
1038 return ret;
1039 }
1040
1041 /* Alter reply tuple (maybe alter helper). This is for NAT, and is
1042 implicitly racy: see __ip_conntrack_confirm */
1043 void ip_conntrack_alter_reply(struct ip_conntrack *conntrack,
1044 const struct ip_conntrack_tuple *newreply)
1045 {
1046 write_lock_bh(&ip_conntrack_lock);
1047 /* Should be unconfirmed, so not in hash table yet */
1048 IP_NF_ASSERT(!is_confirmed(conntrack));
1049
1050 DEBUGP("Altering reply tuple of %p to ", conntrack);
1051 DUMP_TUPLE(newreply);
1052
1053 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1054 if (!conntrack->master && conntrack->expecting == 0)
1055 conntrack->helper = __ip_conntrack_helper_find(newreply);
1056 write_unlock_bh(&ip_conntrack_lock);
1057 }
1058
1059 int ip_conntrack_helper_register(struct ip_conntrack_helper *me)
1060 {
1061 BUG_ON(me->timeout == 0);
1062 write_lock_bh(&ip_conntrack_lock);
1063 list_prepend(&helpers, me);
1064 write_unlock_bh(&ip_conntrack_lock);
1065
1066 return 0;
1067 }
1068
1069 struct ip_conntrack_helper *
1070 __ip_conntrack_helper_find_byname(const char *name)
1071 {
1072 struct ip_conntrack_helper *h;
1073
1074 list_for_each_entry(h, &helpers, list) {
1075 if (!strcmp(h->name, name))
1076 return h;
1077 }
1078
1079 return NULL;
1080 }
1081
1082 static inline int unhelp(struct ip_conntrack_tuple_hash *i,
1083 const struct ip_conntrack_helper *me)
1084 {
1085 if (tuplehash_to_ctrack(i)->helper == me) {
1086 ip_conntrack_event(IPCT_HELPER, tuplehash_to_ctrack(i));
1087 tuplehash_to_ctrack(i)->helper = NULL;
1088 }
1089 return 0;
1090 }
1091
1092 void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
1093 {
1094 unsigned int i;
1095 struct ip_conntrack_expect *exp, *tmp;
1096
1097 /* Need write lock here, to delete helper. */
1098 write_lock_bh(&ip_conntrack_lock);
1099 LIST_DELETE(&helpers, me);
1100
1101 /* Get rid of expectations */
1102 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) {
1103 if (exp->master->helper == me && del_timer(&exp->timeout)) {
1104 ip_ct_unlink_expect(exp);
1105 ip_conntrack_expect_put(exp);
1106 }
1107 }
1108 /* Get rid of expecteds, set helpers to NULL. */
1109 LIST_FIND_W(&unconfirmed, unhelp, struct ip_conntrack_tuple_hash*, me);
1110 for (i = 0; i < ip_conntrack_htable_size; i++)
1111 LIST_FIND_W(&ip_conntrack_hash[i], unhelp,
1112 struct ip_conntrack_tuple_hash *, me);
1113 write_unlock_bh(&ip_conntrack_lock);
1114
1115 /* Someone could be still looking at the helper in a bh. */
1116 synchronize_net();
1117 }
1118
1119 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1120 void __ip_ct_refresh_acct(struct ip_conntrack *ct,
1121 enum ip_conntrack_info ctinfo,
1122 const struct sk_buff *skb,
1123 unsigned long extra_jiffies,
1124 int do_acct)
1125 {
1126 int event = 0;
1127
1128 IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct);
1129 IP_NF_ASSERT(skb);
1130
1131 write_lock_bh(&ip_conntrack_lock);
1132
1133 /* Only update if this is not a fixed timeout */
1134 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
1135 write_unlock_bh(&ip_conntrack_lock);
1136 return;
1137 }
1138
1139 /* If not in hash table, timer will not be active yet */
1140 if (!is_confirmed(ct)) {
1141 ct->timeout.expires = extra_jiffies;
1142 event = IPCT_REFRESH;
1143 } else {
1144 /* Need del_timer for race avoidance (may already be dying). */
1145 if (del_timer(&ct->timeout)) {
1146 ct->timeout.expires = jiffies + extra_jiffies;
1147 add_timer(&ct->timeout);
1148 event = IPCT_REFRESH;
1149 }
1150 }
1151
1152 #ifdef CONFIG_IP_NF_CT_ACCT
1153 if (do_acct) {
1154 ct->counters[CTINFO2DIR(ctinfo)].packets++;
1155 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
1156 ntohs(skb->nh.iph->tot_len);
1157 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
1158 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
1159 event |= IPCT_COUNTER_FILLING;
1160 }
1161 #endif
1162
1163 write_unlock_bh(&ip_conntrack_lock);
1164
1165 /* must be unlocked when calling event cache */
1166 if (event)
1167 ip_conntrack_event_cache(event, skb);
1168 }
1169
1170 #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \
1171 defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE)
1172 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1173 * in ip_conntrack_core, since we don't want the protocols to autoload
1174 * or depend on ctnetlink */
1175 int ip_ct_port_tuple_to_nfattr(struct sk_buff *skb,
1176 const struct ip_conntrack_tuple *tuple)
1177 {
1178 NFA_PUT(skb, CTA_PROTO_SRC_PORT, sizeof(u_int16_t),
1179 &tuple->src.u.tcp.port);
1180 NFA_PUT(skb, CTA_PROTO_DST_PORT, sizeof(u_int16_t),
1181 &tuple->dst.u.tcp.port);
1182 return 0;
1183
1184 nfattr_failure:
1185 return -1;
1186 }
1187
1188 int ip_ct_port_nfattr_to_tuple(struct nfattr *tb[],
1189 struct ip_conntrack_tuple *t)
1190 {
1191 if (!tb[CTA_PROTO_SRC_PORT-1] || !tb[CTA_PROTO_DST_PORT-1])
1192 return -EINVAL;
1193
1194 t->src.u.tcp.port =
1195 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_SRC_PORT-1]);
1196 t->dst.u.tcp.port =
1197 *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_DST_PORT-1]);
1198
1199 return 0;
1200 }
1201 #endif
1202
1203 /* Returns new sk_buff, or NULL */
1204 struct sk_buff *
1205 ip_ct_gather_frags(struct sk_buff *skb, u_int32_t user)
1206 {
1207 skb_orphan(skb);
1208
1209 local_bh_disable();
1210 skb = ip_defrag(skb, user);
1211 local_bh_enable();
1212
1213 if (skb)
1214 ip_send_check(skb->nh.iph);
1215 return skb;
1216 }
1217
1218 /* Used by ipt_REJECT. */
1219 static void ip_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1220 {
1221 struct ip_conntrack *ct;
1222 enum ip_conntrack_info ctinfo;
1223
1224 /* This ICMP is in reverse direction to the packet which caused it */
1225 ct = ip_conntrack_get(skb, &ctinfo);
1226
1227 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1228 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
1229 else
1230 ctinfo = IP_CT_RELATED;
1231
1232 /* Attach to new skbuff, and increment count */
1233 nskb->nfct = &ct->ct_general;
1234 nskb->nfctinfo = ctinfo;
1235 nf_conntrack_get(nskb->nfct);
1236 }
1237
1238 static inline int
1239 do_iter(const struct ip_conntrack_tuple_hash *i,
1240 int (*iter)(struct ip_conntrack *i, void *data),
1241 void *data)
1242 {
1243 return iter(tuplehash_to_ctrack(i), data);
1244 }
1245
1246 /* Bring out ya dead! */
1247 static struct ip_conntrack_tuple_hash *
1248 get_next_corpse(int (*iter)(struct ip_conntrack *i, void *data),
1249 void *data, unsigned int *bucket)
1250 {
1251 struct ip_conntrack_tuple_hash *h = NULL;
1252
1253 write_lock_bh(&ip_conntrack_lock);
1254 for (; *bucket < ip_conntrack_htable_size; (*bucket)++) {
1255 h = LIST_FIND_W(&ip_conntrack_hash[*bucket], do_iter,
1256 struct ip_conntrack_tuple_hash *, iter, data);
1257 if (h)
1258 break;
1259 }
1260 if (!h)
1261 h = LIST_FIND_W(&unconfirmed, do_iter,
1262 struct ip_conntrack_tuple_hash *, iter, data);
1263 if (h)
1264 atomic_inc(&tuplehash_to_ctrack(h)->ct_general.use);
1265 write_unlock_bh(&ip_conntrack_lock);
1266
1267 return h;
1268 }
1269
1270 void
1271 ip_ct_iterate_cleanup(int (*iter)(struct ip_conntrack *i, void *), void *data)
1272 {
1273 struct ip_conntrack_tuple_hash *h;
1274 unsigned int bucket = 0;
1275
1276 while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
1277 struct ip_conntrack *ct = tuplehash_to_ctrack(h);
1278 /* Time to push up daises... */
1279 if (del_timer(&ct->timeout))
1280 death_by_timeout((unsigned long)ct);
1281 /* ... else the timer will get him soon. */
1282
1283 ip_conntrack_put(ct);
1284 }
1285 }
1286
1287 /* Fast function for those who don't want to parse /proc (and I don't
1288 blame them). */
1289 /* Reversing the socket's dst/src point of view gives us the reply
1290 mapping. */
1291 static int
1292 getorigdst(struct sock *sk, int optval, void __user *user, int *len)
1293 {
1294 struct inet_sock *inet = inet_sk(sk);
1295 struct ip_conntrack_tuple_hash *h;
1296 struct ip_conntrack_tuple tuple;
1297
1298 IP_CT_TUPLE_U_BLANK(&tuple);
1299 tuple.src.ip = inet->rcv_saddr;
1300 tuple.src.u.tcp.port = inet->sport;
1301 tuple.dst.ip = inet->daddr;
1302 tuple.dst.u.tcp.port = inet->dport;
1303 tuple.dst.protonum = IPPROTO_TCP;
1304
1305 /* We only do TCP at the moment: is there a better way? */
1306 if (strcmp(sk->sk_prot->name, "TCP")) {
1307 DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
1308 return -ENOPROTOOPT;
1309 }
1310
1311 if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
1312 DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
1313 *len, sizeof(struct sockaddr_in));
1314 return -EINVAL;
1315 }
1316
1317 h = ip_conntrack_find_get(&tuple, NULL);
1318 if (h) {
1319 struct sockaddr_in sin;
1320 struct ip_conntrack *ct = tuplehash_to_ctrack(h);
1321
1322 sin.sin_family = AF_INET;
1323 sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
1324 .tuple.dst.u.tcp.port;
1325 sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
1326 .tuple.dst.ip;
1327 memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
1328
1329 DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
1330 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
1331 ip_conntrack_put(ct);
1332 if (copy_to_user(user, &sin, sizeof(sin)) != 0)
1333 return -EFAULT;
1334 else
1335 return 0;
1336 }
1337 DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
1338 NIPQUAD(tuple.src.ip), ntohs(tuple.src.u.tcp.port),
1339 NIPQUAD(tuple.dst.ip), ntohs(tuple.dst.u.tcp.port));
1340 return -ENOENT;
1341 }
1342
1343 static struct nf_sockopt_ops so_getorigdst = {
1344 .pf = PF_INET,
1345 .get_optmin = SO_ORIGINAL_DST,
1346 .get_optmax = SO_ORIGINAL_DST+1,
1347 .get = &getorigdst,
1348 };
1349
1350 static int kill_all(struct ip_conntrack *i, void *data)
1351 {
1352 return 1;
1353 }
1354
1355 void ip_conntrack_flush(void)
1356 {
1357 ip_ct_iterate_cleanup(kill_all, NULL);
1358 }
1359
1360 static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
1361 {
1362 if (vmalloced)
1363 vfree(hash);
1364 else
1365 free_pages((unsigned long)hash,
1366 get_order(sizeof(struct list_head) * size));
1367 }
1368
1369 /* Mishearing the voices in his head, our hero wonders how he's
1370 supposed to kill the mall. */
1371 void ip_conntrack_cleanup(void)
1372 {
1373 ip_ct_attach = NULL;
1374
1375 /* This makes sure all current packets have passed through
1376 netfilter framework. Roll on, two-stage module
1377 delete... */
1378 synchronize_net();
1379
1380 ip_ct_event_cache_flush();
1381 i_see_dead_people:
1382 ip_conntrack_flush();
1383 if (atomic_read(&ip_conntrack_count) != 0) {
1384 schedule();
1385 goto i_see_dead_people;
1386 }
1387 /* wait until all references to ip_conntrack_untracked are dropped */
1388 while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
1389 schedule();
1390
1391 kmem_cache_destroy(ip_conntrack_cachep);
1392 kmem_cache_destroy(ip_conntrack_expect_cachep);
1393 free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
1394 ip_conntrack_htable_size);
1395 nf_unregister_sockopt(&so_getorigdst);
1396 }
1397
1398 static struct list_head *alloc_hashtable(int size, int *vmalloced)
1399 {
1400 struct list_head *hash;
1401 unsigned int i;
1402
1403 *vmalloced = 0;
1404 hash = (void*)__get_free_pages(GFP_KERNEL,
1405 get_order(sizeof(struct list_head)
1406 * size));
1407 if (!hash) {
1408 *vmalloced = 1;
1409 printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
1410 hash = vmalloc(sizeof(struct list_head) * size);
1411 }
1412
1413 if (hash)
1414 for (i = 0; i < size; i++)
1415 INIT_LIST_HEAD(&hash[i]);
1416
1417 return hash;
1418 }
1419
1420 static int set_hashsize(const char *val, struct kernel_param *kp)
1421 {
1422 int i, bucket, hashsize, vmalloced;
1423 int old_vmalloced, old_size;
1424 int rnd;
1425 struct list_head *hash, *old_hash;
1426 struct ip_conntrack_tuple_hash *h;
1427
1428 /* On boot, we can set this without any fancy locking. */
1429 if (!ip_conntrack_htable_size)
1430 return param_set_int(val, kp);
1431
1432 hashsize = simple_strtol(val, NULL, 0);
1433 if (!hashsize)
1434 return -EINVAL;
1435
1436 hash = alloc_hashtable(hashsize, &vmalloced);
1437 if (!hash)
1438 return -ENOMEM;
1439
1440 /* We have to rehash for the new table anyway, so we also can
1441 * use a new random seed */
1442 get_random_bytes(&rnd, 4);
1443
1444 write_lock_bh(&ip_conntrack_lock);
1445 for (i = 0; i < ip_conntrack_htable_size; i++) {
1446 while (!list_empty(&ip_conntrack_hash[i])) {
1447 h = list_entry(ip_conntrack_hash[i].next,
1448 struct ip_conntrack_tuple_hash, list);
1449 list_del(&h->list);
1450 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1451 list_add_tail(&h->list, &hash[bucket]);
1452 }
1453 }
1454 old_size = ip_conntrack_htable_size;
1455 old_vmalloced = ip_conntrack_vmalloc;
1456 old_hash = ip_conntrack_hash;
1457
1458 ip_conntrack_htable_size = hashsize;
1459 ip_conntrack_vmalloc = vmalloced;
1460 ip_conntrack_hash = hash;
1461 ip_conntrack_hash_rnd = rnd;
1462 write_unlock_bh(&ip_conntrack_lock);
1463
1464 free_conntrack_hash(old_hash, old_vmalloced, old_size);
1465 return 0;
1466 }
1467
1468 module_param_call(hashsize, set_hashsize, param_get_uint,
1469 &ip_conntrack_htable_size, 0600);
1470
1471 int __init ip_conntrack_init(void)
1472 {
1473 unsigned int i;
1474 int ret;
1475
1476 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1477 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1478 if (!ip_conntrack_htable_size) {
1479 ip_conntrack_htable_size
1480 = (((num_physpages << PAGE_SHIFT) / 16384)
1481 / sizeof(struct list_head));
1482 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1483 ip_conntrack_htable_size = 8192;
1484 if (ip_conntrack_htable_size < 16)
1485 ip_conntrack_htable_size = 16;
1486 }
1487 ip_conntrack_max = 8 * ip_conntrack_htable_size;
1488
1489 printk("ip_conntrack version %s (%u buckets, %d max)"
1490 " - %Zd bytes per conntrack\n", IP_CONNTRACK_VERSION,
1491 ip_conntrack_htable_size, ip_conntrack_max,
1492 sizeof(struct ip_conntrack));
1493
1494 ret = nf_register_sockopt(&so_getorigdst);
1495 if (ret != 0) {
1496 printk(KERN_ERR "Unable to register netfilter socket option\n");
1497 return ret;
1498 }
1499
1500 ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
1501 &ip_conntrack_vmalloc);
1502 if (!ip_conntrack_hash) {
1503 printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
1504 goto err_unreg_sockopt;
1505 }
1506
1507 ip_conntrack_cachep = kmem_cache_create("ip_conntrack",
1508 sizeof(struct ip_conntrack), 0,
1509 0, NULL, NULL);
1510 if (!ip_conntrack_cachep) {
1511 printk(KERN_ERR "Unable to create ip_conntrack slab cache\n");
1512 goto err_free_hash;
1513 }
1514
1515 ip_conntrack_expect_cachep = kmem_cache_create("ip_conntrack_expect",
1516 sizeof(struct ip_conntrack_expect),
1517 0, 0, NULL, NULL);
1518 if (!ip_conntrack_expect_cachep) {
1519 printk(KERN_ERR "Unable to create ip_expect slab cache\n");
1520 goto err_free_conntrack_slab;
1521 }
1522
1523 /* Don't NEED lock here, but good form anyway. */
1524 write_lock_bh(&ip_conntrack_lock);
1525 for (i = 0; i < MAX_IP_CT_PROTO; i++)
1526 ip_ct_protos[i] = &ip_conntrack_generic_protocol;
1527 /* Sew in builtin protocols. */
1528 ip_ct_protos[IPPROTO_TCP] = &ip_conntrack_protocol_tcp;
1529 ip_ct_protos[IPPROTO_UDP] = &ip_conntrack_protocol_udp;
1530 ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
1531 write_unlock_bh(&ip_conntrack_lock);
1532
1533 /* For use by ipt_REJECT */
1534 ip_ct_attach = ip_conntrack_attach;
1535
1536 /* Set up fake conntrack:
1537 - to never be deleted, not in any hashes */
1538 atomic_set(&ip_conntrack_untracked.ct_general.use, 1);
1539 /* - and look it like as a confirmed connection */
1540 set_bit(IPS_CONFIRMED_BIT, &ip_conntrack_untracked.status);
1541
1542 return ret;
1543
1544 err_free_conntrack_slab:
1545 kmem_cache_destroy(ip_conntrack_cachep);
1546 err_free_hash:
1547 free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
1548 ip_conntrack_htable_size);
1549 err_unreg_sockopt:
1550 nf_unregister_sockopt(&so_getorigdst);
1551
1552 return -ENOMEM;
1553 }
This page took 0.083406 seconds and 5 git commands to generate.