Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / ipv4 / inetpeer.c
1 /*
2 * INETPEER - A storage for permanent information about peers
3 *
4 * This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 * Authors: Andrey V. Savochkin <saw@msu.ru>
7 */
8
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/interrupt.h>
13 #include <linux/spinlock.h>
14 #include <linux/random.h>
15 #include <linux/timer.h>
16 #include <linux/time.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/net.h>
20 #include <linux/workqueue.h>
21 #include <net/ip.h>
22 #include <net/inetpeer.h>
23 #include <net/secure_seq.h>
24
25 /*
26 * Theory of operations.
27 * We keep one entry for each peer IP address. The nodes contains long-living
28 * information about the peer which doesn't depend on routes.
29 *
30 * Nodes are removed only when reference counter goes to 0.
31 * When it's happened the node may be removed when a sufficient amount of
32 * time has been passed since its last use. The less-recently-used entry can
33 * also be removed if the pool is overloaded i.e. if the total amount of
34 * entries is greater-or-equal than the threshold.
35 *
36 * Node pool is organised as an AVL tree.
37 * Such an implementation has been chosen not just for fun. It's a way to
38 * prevent easy and efficient DoS attacks by creating hash collisions. A huge
39 * amount of long living nodes in a single hash slot would significantly delay
40 * lookups performed with disabled BHs.
41 *
42 * Serialisation issues.
43 * 1. Nodes may appear in the tree only with the pool lock held.
44 * 2. Nodes may disappear from the tree only with the pool lock held
45 * AND reference count being 0.
46 * 3. Global variable peer_total is modified under the pool lock.
47 * 4. struct inet_peer fields modification:
48 * avl_left, avl_right, avl_parent, avl_height: pool lock
49 * refcnt: atomically against modifications on other CPU;
50 * usually under some other lock to prevent node disappearing
51 * daddr: unchangeable
52 */
53
54 static struct kmem_cache *peer_cachep __read_mostly;
55
56 static LIST_HEAD(gc_list);
57 static const int gc_delay = 60 * HZ;
58 static struct delayed_work gc_work;
59 static DEFINE_SPINLOCK(gc_lock);
60
61 #define node_height(x) x->avl_height
62
63 #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
64 #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
65 static const struct inet_peer peer_fake_node = {
66 .avl_left = peer_avl_empty_rcu,
67 .avl_right = peer_avl_empty_rcu,
68 .avl_height = 0
69 };
70
71 void inet_peer_base_init(struct inet_peer_base *bp)
72 {
73 bp->root = peer_avl_empty_rcu;
74 seqlock_init(&bp->lock);
75 bp->flush_seq = ~0U;
76 bp->total = 0;
77 }
78 EXPORT_SYMBOL_GPL(inet_peer_base_init);
79
80 static atomic_t v4_seq = ATOMIC_INIT(0);
81 static atomic_t v6_seq = ATOMIC_INIT(0);
82
83 static atomic_t *inetpeer_seq_ptr(int family)
84 {
85 return (family == AF_INET ? &v4_seq : &v6_seq);
86 }
87
88 static inline void flush_check(struct inet_peer_base *base, int family)
89 {
90 atomic_t *fp = inetpeer_seq_ptr(family);
91
92 if (unlikely(base->flush_seq != atomic_read(fp))) {
93 inetpeer_invalidate_tree(base);
94 base->flush_seq = atomic_read(fp);
95 }
96 }
97
98 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
99
100 /* Exported for sysctl_net_ipv4. */
101 int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
102 * aggressively at this stage */
103 int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
104 int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
105
106 static void inetpeer_gc_worker(struct work_struct *work)
107 {
108 struct inet_peer *p, *n, *c;
109 struct list_head list;
110
111 spin_lock_bh(&gc_lock);
112 list_replace_init(&gc_list, &list);
113 spin_unlock_bh(&gc_lock);
114
115 if (list_empty(&list))
116 return;
117
118 list_for_each_entry_safe(p, n, &list, gc_list) {
119
120 if (need_resched())
121 cond_resched();
122
123 c = rcu_dereference_protected(p->avl_left, 1);
124 if (c != peer_avl_empty) {
125 list_add_tail(&c->gc_list, &list);
126 p->avl_left = peer_avl_empty_rcu;
127 }
128
129 c = rcu_dereference_protected(p->avl_right, 1);
130 if (c != peer_avl_empty) {
131 list_add_tail(&c->gc_list, &list);
132 p->avl_right = peer_avl_empty_rcu;
133 }
134
135 n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
136
137 if (!atomic_read(&p->refcnt)) {
138 list_del(&p->gc_list);
139 kmem_cache_free(peer_cachep, p);
140 }
141 }
142
143 if (list_empty(&list))
144 return;
145
146 spin_lock_bh(&gc_lock);
147 list_splice(&list, &gc_list);
148 spin_unlock_bh(&gc_lock);
149
150 schedule_delayed_work(&gc_work, gc_delay);
151 }
152
153 /* Called from ip_output.c:ip_init */
154 void __init inet_initpeers(void)
155 {
156 struct sysinfo si;
157
158 /* Use the straight interface to information about memory. */
159 si_meminfo(&si);
160 /* The values below were suggested by Alexey Kuznetsov
161 * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
162 * myself. --SAW
163 */
164 if (si.totalram <= (32768*1024)/PAGE_SIZE)
165 inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
166 if (si.totalram <= (16384*1024)/PAGE_SIZE)
167 inet_peer_threshold >>= 1; /* about 512KB */
168 if (si.totalram <= (8192*1024)/PAGE_SIZE)
169 inet_peer_threshold >>= 2; /* about 128KB */
170
171 peer_cachep = kmem_cache_create("inet_peer_cache",
172 sizeof(struct inet_peer),
173 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
174 NULL);
175
176 INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
177 }
178
179 static int addr_compare(const struct inetpeer_addr *a,
180 const struct inetpeer_addr *b)
181 {
182 int i, n = (a->family == AF_INET ? 1 : 4);
183
184 for (i = 0; i < n; i++) {
185 if (a->addr.a6[i] == b->addr.a6[i])
186 continue;
187 if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
188 return -1;
189 return 1;
190 }
191
192 return 0;
193 }
194
195 #define rcu_deref_locked(X, BASE) \
196 rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
197
198 /*
199 * Called with local BH disabled and the pool lock held.
200 */
201 #define lookup(_daddr, _stack, _base) \
202 ({ \
203 struct inet_peer *u; \
204 struct inet_peer __rcu **v; \
205 \
206 stackptr = _stack; \
207 *stackptr++ = &_base->root; \
208 for (u = rcu_deref_locked(_base->root, _base); \
209 u != peer_avl_empty;) { \
210 int cmp = addr_compare(_daddr, &u->daddr); \
211 if (cmp == 0) \
212 break; \
213 if (cmp == -1) \
214 v = &u->avl_left; \
215 else \
216 v = &u->avl_right; \
217 *stackptr++ = v; \
218 u = rcu_deref_locked(*v, _base); \
219 } \
220 u; \
221 })
222
223 /*
224 * Called with rcu_read_lock()
225 * Because we hold no lock against a writer, its quite possible we fall
226 * in an endless loop.
227 * But every pointer we follow is guaranteed to be valid thanks to RCU.
228 * We exit from this function if number of links exceeds PEER_MAXDEPTH
229 */
230 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
231 struct inet_peer_base *base)
232 {
233 struct inet_peer *u = rcu_dereference(base->root);
234 int count = 0;
235
236 while (u != peer_avl_empty) {
237 int cmp = addr_compare(daddr, &u->daddr);
238 if (cmp == 0) {
239 /* Before taking a reference, check if this entry was
240 * deleted (refcnt=-1)
241 */
242 if (!atomic_add_unless(&u->refcnt, 1, -1))
243 u = NULL;
244 return u;
245 }
246 if (cmp == -1)
247 u = rcu_dereference(u->avl_left);
248 else
249 u = rcu_dereference(u->avl_right);
250 if (unlikely(++count == PEER_MAXDEPTH))
251 break;
252 }
253 return NULL;
254 }
255
256 /* Called with local BH disabled and the pool lock held. */
257 #define lookup_rightempty(start, base) \
258 ({ \
259 struct inet_peer *u; \
260 struct inet_peer __rcu **v; \
261 *stackptr++ = &start->avl_left; \
262 v = &start->avl_left; \
263 for (u = rcu_deref_locked(*v, base); \
264 u->avl_right != peer_avl_empty_rcu;) { \
265 v = &u->avl_right; \
266 *stackptr++ = v; \
267 u = rcu_deref_locked(*v, base); \
268 } \
269 u; \
270 })
271
272 /* Called with local BH disabled and the pool lock held.
273 * Variable names are the proof of operation correctness.
274 * Look into mm/map_avl.c for more detail description of the ideas.
275 */
276 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
277 struct inet_peer __rcu ***stackend,
278 struct inet_peer_base *base)
279 {
280 struct inet_peer __rcu **nodep;
281 struct inet_peer *node, *l, *r;
282 int lh, rh;
283
284 while (stackend > stack) {
285 nodep = *--stackend;
286 node = rcu_deref_locked(*nodep, base);
287 l = rcu_deref_locked(node->avl_left, base);
288 r = rcu_deref_locked(node->avl_right, base);
289 lh = node_height(l);
290 rh = node_height(r);
291 if (lh > rh + 1) { /* l: RH+2 */
292 struct inet_peer *ll, *lr, *lrl, *lrr;
293 int lrh;
294 ll = rcu_deref_locked(l->avl_left, base);
295 lr = rcu_deref_locked(l->avl_right, base);
296 lrh = node_height(lr);
297 if (lrh <= node_height(ll)) { /* ll: RH+1 */
298 RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
299 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
300 node->avl_height = lrh + 1; /* RH+1 or RH+2 */
301 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
302 RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
303 l->avl_height = node->avl_height + 1;
304 RCU_INIT_POINTER(*nodep, l);
305 } else { /* ll: RH, lr: RH+1 */
306 lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
307 lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
308 RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
309 RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
310 node->avl_height = rh + 1; /* node: RH+1 */
311 RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
312 RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
313 l->avl_height = rh + 1; /* l: RH+1 */
314 RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
315 RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
316 lr->avl_height = rh + 2;
317 RCU_INIT_POINTER(*nodep, lr);
318 }
319 } else if (rh > lh + 1) { /* r: LH+2 */
320 struct inet_peer *rr, *rl, *rlr, *rll;
321 int rlh;
322 rr = rcu_deref_locked(r->avl_right, base);
323 rl = rcu_deref_locked(r->avl_left, base);
324 rlh = node_height(rl);
325 if (rlh <= node_height(rr)) { /* rr: LH+1 */
326 RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
327 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
328 node->avl_height = rlh + 1; /* LH+1 or LH+2 */
329 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
330 RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
331 r->avl_height = node->avl_height + 1;
332 RCU_INIT_POINTER(*nodep, r);
333 } else { /* rr: RH, rl: RH+1 */
334 rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
335 rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
336 RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
337 RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
338 node->avl_height = lh + 1; /* node: LH+1 */
339 RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
340 RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
341 r->avl_height = lh + 1; /* r: LH+1 */
342 RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
343 RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
344 rl->avl_height = lh + 2;
345 RCU_INIT_POINTER(*nodep, rl);
346 }
347 } else {
348 node->avl_height = (lh > rh ? lh : rh) + 1;
349 }
350 }
351 }
352
353 /* Called with local BH disabled and the pool lock held. */
354 #define link_to_pool(n, base) \
355 do { \
356 n->avl_height = 1; \
357 n->avl_left = peer_avl_empty_rcu; \
358 n->avl_right = peer_avl_empty_rcu; \
359 /* lockless readers can catch us now */ \
360 rcu_assign_pointer(**--stackptr, n); \
361 peer_avl_rebalance(stack, stackptr, base); \
362 } while (0)
363
364 static void inetpeer_free_rcu(struct rcu_head *head)
365 {
366 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
367 }
368
369 static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
370 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
371 {
372 struct inet_peer __rcu ***stackptr, ***delp;
373
374 if (lookup(&p->daddr, stack, base) != p)
375 BUG();
376 delp = stackptr - 1; /* *delp[0] == p */
377 if (p->avl_left == peer_avl_empty_rcu) {
378 *delp[0] = p->avl_right;
379 --stackptr;
380 } else {
381 /* look for a node to insert instead of p */
382 struct inet_peer *t;
383 t = lookup_rightempty(p, base);
384 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
385 **--stackptr = t->avl_left;
386 /* t is removed, t->daddr > x->daddr for any
387 * x in p->avl_left subtree.
388 * Put t in the old place of p. */
389 RCU_INIT_POINTER(*delp[0], t);
390 t->avl_left = p->avl_left;
391 t->avl_right = p->avl_right;
392 t->avl_height = p->avl_height;
393 BUG_ON(delp[1] != &p->avl_left);
394 delp[1] = &t->avl_left; /* was &p->avl_left */
395 }
396 peer_avl_rebalance(stack, stackptr, base);
397 base->total--;
398 call_rcu(&p->rcu, inetpeer_free_rcu);
399 }
400
401 /* perform garbage collect on all items stacked during a lookup */
402 static int inet_peer_gc(struct inet_peer_base *base,
403 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
404 struct inet_peer __rcu ***stackptr)
405 {
406 struct inet_peer *p, *gchead = NULL;
407 __u32 delta, ttl;
408 int cnt = 0;
409
410 if (base->total >= inet_peer_threshold)
411 ttl = 0; /* be aggressive */
412 else
413 ttl = inet_peer_maxttl
414 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
415 base->total / inet_peer_threshold * HZ;
416 stackptr--; /* last stack slot is peer_avl_empty */
417 while (stackptr > stack) {
418 stackptr--;
419 p = rcu_deref_locked(**stackptr, base);
420 if (atomic_read(&p->refcnt) == 0) {
421 smp_rmb();
422 delta = (__u32)jiffies - p->dtime;
423 if (delta >= ttl &&
424 atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
425 p->gc_next = gchead;
426 gchead = p;
427 }
428 }
429 }
430 while ((p = gchead) != NULL) {
431 gchead = p->gc_next;
432 cnt++;
433 unlink_from_pool(p, base, stack);
434 }
435 return cnt;
436 }
437
438 struct inet_peer *inet_getpeer(struct inet_peer_base *base,
439 const struct inetpeer_addr *daddr,
440 int create)
441 {
442 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
443 struct inet_peer *p;
444 unsigned int sequence;
445 int invalidated, gccnt = 0;
446
447 flush_check(base, daddr->family);
448
449 /* Attempt a lockless lookup first.
450 * Because of a concurrent writer, we might not find an existing entry.
451 */
452 rcu_read_lock();
453 sequence = read_seqbegin(&base->lock);
454 p = lookup_rcu(daddr, base);
455 invalidated = read_seqretry(&base->lock, sequence);
456 rcu_read_unlock();
457
458 if (p)
459 return p;
460
461 /* If no writer did a change during our lookup, we can return early. */
462 if (!create && !invalidated)
463 return NULL;
464
465 /* retry an exact lookup, taking the lock before.
466 * At least, nodes should be hot in our cache.
467 */
468 write_seqlock_bh(&base->lock);
469 relookup:
470 p = lookup(daddr, stack, base);
471 if (p != peer_avl_empty) {
472 atomic_inc(&p->refcnt);
473 write_sequnlock_bh(&base->lock);
474 return p;
475 }
476 if (!gccnt) {
477 gccnt = inet_peer_gc(base, stack, stackptr);
478 if (gccnt && create)
479 goto relookup;
480 }
481 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
482 if (p) {
483 p->daddr = *daddr;
484 atomic_set(&p->refcnt, 1);
485 atomic_set(&p->rid, 0);
486 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
487 p->rate_tokens = 0;
488 /* 60*HZ is arbitrary, but chosen enough high so that the first
489 * calculation of tokens is at its maximum.
490 */
491 p->rate_last = jiffies - 60*HZ;
492 INIT_LIST_HEAD(&p->gc_list);
493
494 /* Link the node. */
495 link_to_pool(p, base);
496 base->total++;
497 }
498 write_sequnlock_bh(&base->lock);
499
500 return p;
501 }
502 EXPORT_SYMBOL_GPL(inet_getpeer);
503
504 void inet_putpeer(struct inet_peer *p)
505 {
506 p->dtime = (__u32)jiffies;
507 smp_mb__before_atomic();
508 atomic_dec(&p->refcnt);
509 }
510 EXPORT_SYMBOL_GPL(inet_putpeer);
511
512 /*
513 * Check transmit rate limitation for given message.
514 * The rate information is held in the inet_peer entries now.
515 * This function is generic and could be used for other purposes
516 * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
517 *
518 * Note that the same inet_peer fields are modified by functions in
519 * route.c too, but these work for packet destinations while xrlim_allow
520 * works for icmp destinations. This means the rate limiting information
521 * for one "ip object" is shared - and these ICMPs are twice limited:
522 * by source and by destination.
523 *
524 * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
525 * SHOULD allow setting of rate limits
526 *
527 * Shared between ICMPv4 and ICMPv6.
528 */
529 #define XRLIM_BURST_FACTOR 6
530 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
531 {
532 unsigned long now, token;
533 bool rc = false;
534
535 if (!peer)
536 return true;
537
538 token = peer->rate_tokens;
539 now = jiffies;
540 token += now - peer->rate_last;
541 peer->rate_last = now;
542 if (token > XRLIM_BURST_FACTOR * timeout)
543 token = XRLIM_BURST_FACTOR * timeout;
544 if (token >= timeout) {
545 token -= timeout;
546 rc = true;
547 }
548 peer->rate_tokens = token;
549 return rc;
550 }
551 EXPORT_SYMBOL(inet_peer_xrlim_allow);
552
553 static void inetpeer_inval_rcu(struct rcu_head *head)
554 {
555 struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
556
557 spin_lock_bh(&gc_lock);
558 list_add_tail(&p->gc_list, &gc_list);
559 spin_unlock_bh(&gc_lock);
560
561 schedule_delayed_work(&gc_work, gc_delay);
562 }
563
564 void inetpeer_invalidate_tree(struct inet_peer_base *base)
565 {
566 struct inet_peer *root;
567
568 write_seqlock_bh(&base->lock);
569
570 root = rcu_deref_locked(base->root, base);
571 if (root != peer_avl_empty) {
572 base->root = peer_avl_empty_rcu;
573 base->total = 0;
574 call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
575 }
576
577 write_sequnlock_bh(&base->lock);
578 }
579 EXPORT_SYMBOL(inetpeer_invalidate_tree);
This page took 0.054504 seconds and 5 git commands to generate.