13a80aa911d81b273c081ffe328d1dfbf533721d
[deliverable/linux.git] / net / ipv4 / fib_trie.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
15 * This work is based on the LPC-trie which is originally descibed in:
16 *
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
20 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
26 *
27 *
28 * Code from fib_hash has been reused which includes the following header:
29 *
30 *
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
34 *
35 * IPv4 FIB: lookup engine and maintenance routines.
36 *
37 *
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
39 *
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
44 *
45 * Substantial contributions to this work comes from:
46 *
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
51 */
52
53 #define VERSION "0.408"
54
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
84
85 #undef CONFIG_IP_FIB_TRIE_STATS
86 #define MAX_STAT_DEPTH 32
87
88 #define KEYLENGTH (8*sizeof(t_key))
89
90 typedef unsigned int t_key;
91
92 #define T_TNODE 0
93 #define T_LEAF 1
94 #define NODE_TYPE_MASK 0x1UL
95 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96
97 #define IS_TNODE(n) (!(n->parent & T_LEAF))
98 #define IS_LEAF(n) (n->parent & T_LEAF)
99
100 struct node {
101 t_key key;
102 unsigned long parent;
103 };
104
105 struct leaf {
106 t_key key;
107 unsigned long parent;
108 struct hlist_head list;
109 struct rcu_head rcu;
110 };
111
112 struct leaf_info {
113 struct hlist_node hlist;
114 struct rcu_head rcu;
115 int plen;
116 struct list_head falh;
117 };
118
119 struct tnode {
120 t_key key;
121 unsigned long parent;
122 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
123 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
124 unsigned short full_children; /* KEYLENGTH bits needed */
125 unsigned short empty_children; /* KEYLENGTH bits needed */
126 struct rcu_head rcu;
127 struct node *child[0];
128 };
129
130 #ifdef CONFIG_IP_FIB_TRIE_STATS
131 struct trie_use_stats {
132 unsigned int gets;
133 unsigned int backtrack;
134 unsigned int semantic_match_passed;
135 unsigned int semantic_match_miss;
136 unsigned int null_node_hit;
137 unsigned int resize_node_skipped;
138 };
139 #endif
140
141 struct trie_stat {
142 unsigned int totdepth;
143 unsigned int maxdepth;
144 unsigned int tnodes;
145 unsigned int leaves;
146 unsigned int nullpointers;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
148 };
149
150 struct trie {
151 struct node *trie;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
155 int size;
156 unsigned int revision;
157 };
158
159 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
160 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
161 static struct node *resize(struct trie *t, struct tnode *tn);
162 static struct tnode *inflate(struct trie *t, struct tnode *tn);
163 static struct tnode *halve(struct trie *t, struct tnode *tn);
164 static void tnode_free(struct tnode *tn);
165
166 static struct kmem_cache *fn_alias_kmem __read_mostly;
167
168 static inline struct tnode *node_parent(struct node *node)
169 {
170 struct tnode *ret;
171
172 ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 return rcu_dereference(ret);
174 }
175
176 static inline void node_set_parent(struct node *node, struct tnode *ptr)
177 {
178 rcu_assign_pointer(node->parent,
179 (unsigned long)ptr | NODE_TYPE(node));
180 }
181
182 /* rcu_read_lock needs to be hold by caller from readside */
183
184 static inline struct node *tnode_get_child(struct tnode *tn, int i)
185 {
186 BUG_ON(i >= 1 << tn->bits);
187
188 return rcu_dereference(tn->child[i]);
189 }
190
191 static inline int tnode_child_length(const struct tnode *tn)
192 {
193 return 1 << tn->bits;
194 }
195
196 static inline t_key mask_pfx(t_key k, unsigned short l)
197 {
198 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
199 }
200
201 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
202 {
203 if (offset < KEYLENGTH)
204 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
205 else
206 return 0;
207 }
208
209 static inline int tkey_equals(t_key a, t_key b)
210 {
211 return a == b;
212 }
213
214 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
215 {
216 if (bits == 0 || offset >= KEYLENGTH)
217 return 1;
218 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
219 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
220 }
221
222 static inline int tkey_mismatch(t_key a, int offset, t_key b)
223 {
224 t_key diff = a ^ b;
225 int i = offset;
226
227 if (!diff)
228 return 0;
229 while ((diff << i) >> (KEYLENGTH-1) == 0)
230 i++;
231 return i;
232 }
233
234 /*
235 To understand this stuff, an understanding of keys and all their bits is
236 necessary. Every node in the trie has a key associated with it, but not
237 all of the bits in that key are significant.
238
239 Consider a node 'n' and its parent 'tp'.
240
241 If n is a leaf, every bit in its key is significant. Its presence is
242 necessitated by path compression, since during a tree traversal (when
243 searching for a leaf - unless we are doing an insertion) we will completely
244 ignore all skipped bits we encounter. Thus we need to verify, at the end of
245 a potentially successful search, that we have indeed been walking the
246 correct key path.
247
248 Note that we can never "miss" the correct key in the tree if present by
249 following the wrong path. Path compression ensures that segments of the key
250 that are the same for all keys with a given prefix are skipped, but the
251 skipped part *is* identical for each node in the subtrie below the skipped
252 bit! trie_insert() in this implementation takes care of that - note the
253 call to tkey_sub_equals() in trie_insert().
254
255 if n is an internal node - a 'tnode' here, the various parts of its key
256 have many different meanings.
257
258 Example:
259 _________________________________________________________________
260 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
261 -----------------------------------------------------------------
262 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
263
264 _________________________________________________________________
265 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
266 -----------------------------------------------------------------
267 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
268
269 tp->pos = 7
270 tp->bits = 3
271 n->pos = 15
272 n->bits = 4
273
274 First, let's just ignore the bits that come before the parent tp, that is
275 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
276 not use them for anything.
277
278 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
279 index into the parent's child array. That is, they will be used to find
280 'n' among tp's children.
281
282 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
283 for the node n.
284
285 All the bits we have seen so far are significant to the node n. The rest
286 of the bits are really not needed or indeed known in n->key.
287
288 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
289 n's child array, and will of course be different for each child.
290
291
292 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
293 at this point.
294
295 */
296
297 static inline void check_tnode(const struct tnode *tn)
298 {
299 WARN_ON(tn && tn->pos+tn->bits > 32);
300 }
301
302 static int halve_threshold = 25;
303 static int inflate_threshold = 50;
304 static int halve_threshold_root = 8;
305 static int inflate_threshold_root = 15;
306
307
308 static void __alias_free_mem(struct rcu_head *head)
309 {
310 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
311 kmem_cache_free(fn_alias_kmem, fa);
312 }
313
314 static inline void alias_free_mem_rcu(struct fib_alias *fa)
315 {
316 call_rcu(&fa->rcu, __alias_free_mem);
317 }
318
319 static void __leaf_free_rcu(struct rcu_head *head)
320 {
321 kfree(container_of(head, struct leaf, rcu));
322 }
323
324 static void __leaf_info_free_rcu(struct rcu_head *head)
325 {
326 kfree(container_of(head, struct leaf_info, rcu));
327 }
328
329 static inline void free_leaf_info(struct leaf_info *leaf)
330 {
331 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
332 }
333
334 static struct tnode *tnode_alloc(unsigned int size)
335 {
336 struct page *pages;
337
338 if (size <= PAGE_SIZE)
339 return kcalloc(size, 1, GFP_KERNEL);
340
341 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
342 if (!pages)
343 return NULL;
344
345 return page_address(pages);
346 }
347
348 static void __tnode_free_rcu(struct rcu_head *head)
349 {
350 struct tnode *tn = container_of(head, struct tnode, rcu);
351 unsigned int size = sizeof(struct tnode) +
352 (1 << tn->bits) * sizeof(struct node *);
353
354 if (size <= PAGE_SIZE)
355 kfree(tn);
356 else
357 free_pages((unsigned long)tn, get_order(size));
358 }
359
360 static inline void tnode_free(struct tnode *tn)
361 {
362 if (IS_LEAF(tn)) {
363 struct leaf *l = (struct leaf *) tn;
364 call_rcu_bh(&l->rcu, __leaf_free_rcu);
365 } else
366 call_rcu(&tn->rcu, __tnode_free_rcu);
367 }
368
369 static struct leaf *leaf_new(void)
370 {
371 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
372 if (l) {
373 l->parent = T_LEAF;
374 INIT_HLIST_HEAD(&l->list);
375 }
376 return l;
377 }
378
379 static struct leaf_info *leaf_info_new(int plen)
380 {
381 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
382 if (li) {
383 li->plen = plen;
384 INIT_LIST_HEAD(&li->falh);
385 }
386 return li;
387 }
388
389 static struct tnode* tnode_new(t_key key, int pos, int bits)
390 {
391 int nchildren = 1<<bits;
392 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
393 struct tnode *tn = tnode_alloc(sz);
394
395 if (tn) {
396 memset(tn, 0, sz);
397 tn->parent = T_TNODE;
398 tn->pos = pos;
399 tn->bits = bits;
400 tn->key = key;
401 tn->full_children = 0;
402 tn->empty_children = 1<<bits;
403 }
404
405 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
406 (unsigned int) (sizeof(struct node) * 1<<bits));
407 return tn;
408 }
409
410 /*
411 * Check whether a tnode 'n' is "full", i.e. it is an internal node
412 * and no bits are skipped. See discussion in dyntree paper p. 6
413 */
414
415 static inline int tnode_full(const struct tnode *tn, const struct node *n)
416 {
417 if (n == NULL || IS_LEAF(n))
418 return 0;
419
420 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
421 }
422
423 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
424 {
425 tnode_put_child_reorg(tn, i, n, -1);
426 }
427
428 /*
429 * Add a child at position i overwriting the old value.
430 * Update the value of full_children and empty_children.
431 */
432
433 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
434 {
435 struct node *chi = tn->child[i];
436 int isfull;
437
438 BUG_ON(i >= 1<<tn->bits);
439
440
441 /* update emptyChildren */
442 if (n == NULL && chi != NULL)
443 tn->empty_children++;
444 else if (n != NULL && chi == NULL)
445 tn->empty_children--;
446
447 /* update fullChildren */
448 if (wasfull == -1)
449 wasfull = tnode_full(tn, chi);
450
451 isfull = tnode_full(tn, n);
452 if (wasfull && !isfull)
453 tn->full_children--;
454 else if (!wasfull && isfull)
455 tn->full_children++;
456
457 if (n)
458 node_set_parent(n, tn);
459
460 rcu_assign_pointer(tn->child[i], n);
461 }
462
463 static struct node *resize(struct trie *t, struct tnode *tn)
464 {
465 int i;
466 int err = 0;
467 struct tnode *old_tn;
468 int inflate_threshold_use;
469 int halve_threshold_use;
470 int max_resize;
471
472 if (!tn)
473 return NULL;
474
475 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
476 tn, inflate_threshold, halve_threshold);
477
478 /* No children */
479 if (tn->empty_children == tnode_child_length(tn)) {
480 tnode_free(tn);
481 return NULL;
482 }
483 /* One child */
484 if (tn->empty_children == tnode_child_length(tn) - 1)
485 for (i = 0; i < tnode_child_length(tn); i++) {
486 struct node *n;
487
488 n = tn->child[i];
489 if (!n)
490 continue;
491
492 /* compress one level */
493 node_set_parent(n, NULL);
494 tnode_free(tn);
495 return n;
496 }
497 /*
498 * Double as long as the resulting node has a number of
499 * nonempty nodes that are above the threshold.
500 */
501
502 /*
503 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
504 * the Helsinki University of Technology and Matti Tikkanen of Nokia
505 * Telecommunications, page 6:
506 * "A node is doubled if the ratio of non-empty children to all
507 * children in the *doubled* node is at least 'high'."
508 *
509 * 'high' in this instance is the variable 'inflate_threshold'. It
510 * is expressed as a percentage, so we multiply it with
511 * tnode_child_length() and instead of multiplying by 2 (since the
512 * child array will be doubled by inflate()) and multiplying
513 * the left-hand side by 100 (to handle the percentage thing) we
514 * multiply the left-hand side by 50.
515 *
516 * The left-hand side may look a bit weird: tnode_child_length(tn)
517 * - tn->empty_children is of course the number of non-null children
518 * in the current node. tn->full_children is the number of "full"
519 * children, that is non-null tnodes with a skip value of 0.
520 * All of those will be doubled in the resulting inflated tnode, so
521 * we just count them one extra time here.
522 *
523 * A clearer way to write this would be:
524 *
525 * to_be_doubled = tn->full_children;
526 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
527 * tn->full_children;
528 *
529 * new_child_length = tnode_child_length(tn) * 2;
530 *
531 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
532 * new_child_length;
533 * if (new_fill_factor >= inflate_threshold)
534 *
535 * ...and so on, tho it would mess up the while () loop.
536 *
537 * anyway,
538 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
539 * inflate_threshold
540 *
541 * avoid a division:
542 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
543 * inflate_threshold * new_child_length
544 *
545 * expand not_to_be_doubled and to_be_doubled, and shorten:
546 * 100 * (tnode_child_length(tn) - tn->empty_children +
547 * tn->full_children) >= inflate_threshold * new_child_length
548 *
549 * expand new_child_length:
550 * 100 * (tnode_child_length(tn) - tn->empty_children +
551 * tn->full_children) >=
552 * inflate_threshold * tnode_child_length(tn) * 2
553 *
554 * shorten again:
555 * 50 * (tn->full_children + tnode_child_length(tn) -
556 * tn->empty_children) >= inflate_threshold *
557 * tnode_child_length(tn)
558 *
559 */
560
561 check_tnode(tn);
562
563 /* Keep root node larger */
564
565 if (!tn->parent)
566 inflate_threshold_use = inflate_threshold_root;
567 else
568 inflate_threshold_use = inflate_threshold;
569
570 err = 0;
571 max_resize = 10;
572 while ((tn->full_children > 0 && max_resize-- &&
573 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
574 inflate_threshold_use * tnode_child_length(tn))) {
575
576 old_tn = tn;
577 tn = inflate(t, tn);
578 if (IS_ERR(tn)) {
579 tn = old_tn;
580 #ifdef CONFIG_IP_FIB_TRIE_STATS
581 t->stats.resize_node_skipped++;
582 #endif
583 break;
584 }
585 }
586
587 if (max_resize < 0) {
588 if (!tn->parent)
589 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
590 inflate_threshold_root, tn->bits);
591 else
592 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
593 inflate_threshold, tn->bits);
594 }
595
596 check_tnode(tn);
597
598 /*
599 * Halve as long as the number of empty children in this
600 * node is above threshold.
601 */
602
603
604 /* Keep root node larger */
605
606 if (!tn->parent)
607 halve_threshold_use = halve_threshold_root;
608 else
609 halve_threshold_use = halve_threshold;
610
611 err = 0;
612 max_resize = 10;
613 while (tn->bits > 1 && max_resize-- &&
614 100 * (tnode_child_length(tn) - tn->empty_children) <
615 halve_threshold_use * tnode_child_length(tn)) {
616
617 old_tn = tn;
618 tn = halve(t, tn);
619 if (IS_ERR(tn)) {
620 tn = old_tn;
621 #ifdef CONFIG_IP_FIB_TRIE_STATS
622 t->stats.resize_node_skipped++;
623 #endif
624 break;
625 }
626 }
627
628 if (max_resize < 0) {
629 if (!tn->parent)
630 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
631 halve_threshold_root, tn->bits);
632 else
633 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
634 halve_threshold, tn->bits);
635 }
636
637 /* Only one child remains */
638 if (tn->empty_children == tnode_child_length(tn) - 1)
639 for (i = 0; i < tnode_child_length(tn); i++) {
640 struct node *n;
641
642 n = tn->child[i];
643 if (!n)
644 continue;
645
646 /* compress one level */
647
648 node_set_parent(n, NULL);
649 tnode_free(tn);
650 return n;
651 }
652
653 return (struct node *) tn;
654 }
655
656 static struct tnode *inflate(struct trie *t, struct tnode *tn)
657 {
658 struct tnode *inode;
659 struct tnode *oldtnode = tn;
660 int olen = tnode_child_length(tn);
661 int i;
662
663 pr_debug("In inflate\n");
664
665 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
666
667 if (!tn)
668 return ERR_PTR(-ENOMEM);
669
670 /*
671 * Preallocate and store tnodes before the actual work so we
672 * don't get into an inconsistent state if memory allocation
673 * fails. In case of failure we return the oldnode and inflate
674 * of tnode is ignored.
675 */
676
677 for (i = 0; i < olen; i++) {
678 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
679
680 if (inode &&
681 IS_TNODE(inode) &&
682 inode->pos == oldtnode->pos + oldtnode->bits &&
683 inode->bits > 1) {
684 struct tnode *left, *right;
685 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
686
687 left = tnode_new(inode->key&(~m), inode->pos + 1,
688 inode->bits - 1);
689 if (!left)
690 goto nomem;
691
692 right = tnode_new(inode->key|m, inode->pos + 1,
693 inode->bits - 1);
694
695 if (!right) {
696 tnode_free(left);
697 goto nomem;
698 }
699
700 put_child(t, tn, 2*i, (struct node *) left);
701 put_child(t, tn, 2*i+1, (struct node *) right);
702 }
703 }
704
705 for (i = 0; i < olen; i++) {
706 struct node *node = tnode_get_child(oldtnode, i);
707 struct tnode *left, *right;
708 int size, j;
709
710 /* An empty child */
711 if (node == NULL)
712 continue;
713
714 /* A leaf or an internal node with skipped bits */
715
716 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
717 tn->pos + tn->bits - 1) {
718 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
719 1) == 0)
720 put_child(t, tn, 2*i, node);
721 else
722 put_child(t, tn, 2*i+1, node);
723 continue;
724 }
725
726 /* An internal node with two children */
727 inode = (struct tnode *) node;
728
729 if (inode->bits == 1) {
730 put_child(t, tn, 2*i, inode->child[0]);
731 put_child(t, tn, 2*i+1, inode->child[1]);
732
733 tnode_free(inode);
734 continue;
735 }
736
737 /* An internal node with more than two children */
738
739 /* We will replace this node 'inode' with two new
740 * ones, 'left' and 'right', each with half of the
741 * original children. The two new nodes will have
742 * a position one bit further down the key and this
743 * means that the "significant" part of their keys
744 * (see the discussion near the top of this file)
745 * will differ by one bit, which will be "0" in
746 * left's key and "1" in right's key. Since we are
747 * moving the key position by one step, the bit that
748 * we are moving away from - the bit at position
749 * (inode->pos) - is the one that will differ between
750 * left and right. So... we synthesize that bit in the
751 * two new keys.
752 * The mask 'm' below will be a single "one" bit at
753 * the position (inode->pos)
754 */
755
756 /* Use the old key, but set the new significant
757 * bit to zero.
758 */
759
760 left = (struct tnode *) tnode_get_child(tn, 2*i);
761 put_child(t, tn, 2*i, NULL);
762
763 BUG_ON(!left);
764
765 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
766 put_child(t, tn, 2*i+1, NULL);
767
768 BUG_ON(!right);
769
770 size = tnode_child_length(left);
771 for (j = 0; j < size; j++) {
772 put_child(t, left, j, inode->child[j]);
773 put_child(t, right, j, inode->child[j + size]);
774 }
775 put_child(t, tn, 2*i, resize(t, left));
776 put_child(t, tn, 2*i+1, resize(t, right));
777
778 tnode_free(inode);
779 }
780 tnode_free(oldtnode);
781 return tn;
782 nomem:
783 {
784 int size = tnode_child_length(tn);
785 int j;
786
787 for (j = 0; j < size; j++)
788 if (tn->child[j])
789 tnode_free((struct tnode *)tn->child[j]);
790
791 tnode_free(tn);
792
793 return ERR_PTR(-ENOMEM);
794 }
795 }
796
797 static struct tnode *halve(struct trie *t, struct tnode *tn)
798 {
799 struct tnode *oldtnode = tn;
800 struct node *left, *right;
801 int i;
802 int olen = tnode_child_length(tn);
803
804 pr_debug("In halve\n");
805
806 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
807
808 if (!tn)
809 return ERR_PTR(-ENOMEM);
810
811 /*
812 * Preallocate and store tnodes before the actual work so we
813 * don't get into an inconsistent state if memory allocation
814 * fails. In case of failure we return the oldnode and halve
815 * of tnode is ignored.
816 */
817
818 for (i = 0; i < olen; i += 2) {
819 left = tnode_get_child(oldtnode, i);
820 right = tnode_get_child(oldtnode, i+1);
821
822 /* Two nonempty children */
823 if (left && right) {
824 struct tnode *newn;
825
826 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
827
828 if (!newn)
829 goto nomem;
830
831 put_child(t, tn, i/2, (struct node *)newn);
832 }
833
834 }
835
836 for (i = 0; i < olen; i += 2) {
837 struct tnode *newBinNode;
838
839 left = tnode_get_child(oldtnode, i);
840 right = tnode_get_child(oldtnode, i+1);
841
842 /* At least one of the children is empty */
843 if (left == NULL) {
844 if (right == NULL) /* Both are empty */
845 continue;
846 put_child(t, tn, i/2, right);
847 continue;
848 }
849
850 if (right == NULL) {
851 put_child(t, tn, i/2, left);
852 continue;
853 }
854
855 /* Two nonempty children */
856 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
857 put_child(t, tn, i/2, NULL);
858 put_child(t, newBinNode, 0, left);
859 put_child(t, newBinNode, 1, right);
860 put_child(t, tn, i/2, resize(t, newBinNode));
861 }
862 tnode_free(oldtnode);
863 return tn;
864 nomem:
865 {
866 int size = tnode_child_length(tn);
867 int j;
868
869 for (j = 0; j < size; j++)
870 if (tn->child[j])
871 tnode_free((struct tnode *)tn->child[j]);
872
873 tnode_free(tn);
874
875 return ERR_PTR(-ENOMEM);
876 }
877 }
878
879 static void trie_init(struct trie *t)
880 {
881 if (!t)
882 return;
883
884 t->size = 0;
885 rcu_assign_pointer(t->trie, NULL);
886 t->revision = 0;
887 #ifdef CONFIG_IP_FIB_TRIE_STATS
888 memset(&t->stats, 0, sizeof(struct trie_use_stats));
889 #endif
890 }
891
892 /* readside must use rcu_read_lock currently dump routines
893 via get_fa_head and dump */
894
895 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
896 {
897 struct hlist_head *head = &l->list;
898 struct hlist_node *node;
899 struct leaf_info *li;
900
901 hlist_for_each_entry_rcu(li, node, head, hlist)
902 if (li->plen == plen)
903 return li;
904
905 return NULL;
906 }
907
908 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
909 {
910 struct leaf_info *li = find_leaf_info(l, plen);
911
912 if (!li)
913 return NULL;
914
915 return &li->falh;
916 }
917
918 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
919 {
920 struct leaf_info *li = NULL, *last = NULL;
921 struct hlist_node *node;
922
923 if (hlist_empty(head)) {
924 hlist_add_head_rcu(&new->hlist, head);
925 } else {
926 hlist_for_each_entry(li, node, head, hlist) {
927 if (new->plen > li->plen)
928 break;
929
930 last = li;
931 }
932 if (last)
933 hlist_add_after_rcu(&last->hlist, &new->hlist);
934 else
935 hlist_add_before_rcu(&new->hlist, &li->hlist);
936 }
937 }
938
939 /* rcu_read_lock needs to be hold by caller from readside */
940
941 static struct leaf *
942 fib_find_node(struct trie *t, u32 key)
943 {
944 int pos;
945 struct tnode *tn;
946 struct node *n;
947
948 pos = 0;
949 n = rcu_dereference(t->trie);
950
951 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
952 tn = (struct tnode *) n;
953
954 check_tnode(tn);
955
956 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
957 pos = tn->pos + tn->bits;
958 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
959 } else
960 break;
961 }
962 /* Case we have found a leaf. Compare prefixes */
963
964 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
965 return (struct leaf *)n;
966
967 return NULL;
968 }
969
970 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
971 {
972 int wasfull;
973 t_key cindex, key = tn->key;
974 struct tnode *tp;
975
976 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
977 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
978 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
979 tn = (struct tnode *) resize (t, (struct tnode *)tn);
980 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
981
982 tp = node_parent((struct node *) tn);
983 if (!tp)
984 break;
985 tn = tp;
986 }
987
988 /* Handle last (top) tnode */
989 if (IS_TNODE(tn))
990 tn = (struct tnode*) resize(t, (struct tnode *)tn);
991
992 return (struct node*) tn;
993 }
994
995 /* only used from updater-side */
996
997 static struct list_head *
998 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
999 {
1000 int pos, newpos;
1001 struct tnode *tp = NULL, *tn = NULL;
1002 struct node *n;
1003 struct leaf *l;
1004 int missbit;
1005 struct list_head *fa_head = NULL;
1006 struct leaf_info *li;
1007 t_key cindex;
1008
1009 pos = 0;
1010 n = t->trie;
1011
1012 /* If we point to NULL, stop. Either the tree is empty and we should
1013 * just put a new leaf in if, or we have reached an empty child slot,
1014 * and we should just put our new leaf in that.
1015 * If we point to a T_TNODE, check if it matches our key. Note that
1016 * a T_TNODE might be skipping any number of bits - its 'pos' need
1017 * not be the parent's 'pos'+'bits'!
1018 *
1019 * If it does match the current key, get pos/bits from it, extract
1020 * the index from our key, push the T_TNODE and walk the tree.
1021 *
1022 * If it doesn't, we have to replace it with a new T_TNODE.
1023 *
1024 * If we point to a T_LEAF, it might or might not have the same key
1025 * as we do. If it does, just change the value, update the T_LEAF's
1026 * value, and return it.
1027 * If it doesn't, we need to replace it with a T_TNODE.
1028 */
1029
1030 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1031 tn = (struct tnode *) n;
1032
1033 check_tnode(tn);
1034
1035 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1036 tp = tn;
1037 pos = tn->pos + tn->bits;
1038 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1039
1040 BUG_ON(n && node_parent(n) != tn);
1041 } else
1042 break;
1043 }
1044
1045 /*
1046 * n ----> NULL, LEAF or TNODE
1047 *
1048 * tp is n's (parent) ----> NULL or TNODE
1049 */
1050
1051 BUG_ON(tp && IS_LEAF(tp));
1052
1053 /* Case 1: n is a leaf. Compare prefixes */
1054
1055 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1056 struct leaf *l = (struct leaf *) n;
1057
1058 li = leaf_info_new(plen);
1059
1060 if (!li) {
1061 *err = -ENOMEM;
1062 goto err;
1063 }
1064
1065 fa_head = &li->falh;
1066 insert_leaf_info(&l->list, li);
1067 goto done;
1068 }
1069 t->size++;
1070 l = leaf_new();
1071
1072 if (!l) {
1073 *err = -ENOMEM;
1074 goto err;
1075 }
1076
1077 l->key = key;
1078 li = leaf_info_new(plen);
1079
1080 if (!li) {
1081 tnode_free((struct tnode *) l);
1082 *err = -ENOMEM;
1083 goto err;
1084 }
1085
1086 fa_head = &li->falh;
1087 insert_leaf_info(&l->list, li);
1088
1089 if (t->trie && n == NULL) {
1090 /* Case 2: n is NULL, and will just insert a new leaf */
1091
1092 node_set_parent((struct node *)l, tp);
1093
1094 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1095 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1096 } else {
1097 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1098 /*
1099 * Add a new tnode here
1100 * first tnode need some special handling
1101 */
1102
1103 if (tp)
1104 pos = tp->pos+tp->bits;
1105 else
1106 pos = 0;
1107
1108 if (n) {
1109 newpos = tkey_mismatch(key, pos, n->key);
1110 tn = tnode_new(n->key, newpos, 1);
1111 } else {
1112 newpos = 0;
1113 tn = tnode_new(key, newpos, 1); /* First tnode */
1114 }
1115
1116 if (!tn) {
1117 free_leaf_info(li);
1118 tnode_free((struct tnode *) l);
1119 *err = -ENOMEM;
1120 goto err;
1121 }
1122
1123 node_set_parent((struct node *)tn, tp);
1124
1125 missbit = tkey_extract_bits(key, newpos, 1);
1126 put_child(t, tn, missbit, (struct node *)l);
1127 put_child(t, tn, 1-missbit, n);
1128
1129 if (tp) {
1130 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1131 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1132 } else {
1133 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1134 tp = tn;
1135 }
1136 }
1137
1138 if (tp && tp->pos + tp->bits > 32)
1139 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1140 tp, tp->pos, tp->bits, key, plen);
1141
1142 /* Rebalance the trie */
1143
1144 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1145 done:
1146 t->revision++;
1147 err:
1148 return fa_head;
1149 }
1150
1151 /*
1152 * Caller must hold RTNL.
1153 */
1154 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1155 {
1156 struct trie *t = (struct trie *) tb->tb_data;
1157 struct fib_alias *fa, *new_fa;
1158 struct list_head *fa_head = NULL;
1159 struct fib_info *fi;
1160 int plen = cfg->fc_dst_len;
1161 u8 tos = cfg->fc_tos;
1162 u32 key, mask;
1163 int err;
1164 struct leaf *l;
1165
1166 if (plen > 32)
1167 return -EINVAL;
1168
1169 key = ntohl(cfg->fc_dst);
1170
1171 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1172
1173 mask = ntohl(inet_make_mask(plen));
1174
1175 if (key & ~mask)
1176 return -EINVAL;
1177
1178 key = key & mask;
1179
1180 fi = fib_create_info(cfg);
1181 if (IS_ERR(fi)) {
1182 err = PTR_ERR(fi);
1183 goto err;
1184 }
1185
1186 l = fib_find_node(t, key);
1187 fa = NULL;
1188
1189 if (l) {
1190 fa_head = get_fa_head(l, plen);
1191 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1192 }
1193
1194 /* Now fa, if non-NULL, points to the first fib alias
1195 * with the same keys [prefix,tos,priority], if such key already
1196 * exists or to the node before which we will insert new one.
1197 *
1198 * If fa is NULL, we will need to allocate a new one and
1199 * insert to the head of f.
1200 *
1201 * If f is NULL, no fib node matched the destination key
1202 * and we need to allocate a new one of those as well.
1203 */
1204
1205 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1206 struct fib_alias *fa_orig;
1207
1208 err = -EEXIST;
1209 if (cfg->fc_nlflags & NLM_F_EXCL)
1210 goto out;
1211
1212 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1213 struct fib_info *fi_drop;
1214 u8 state;
1215
1216 if (fi->fib_treeref > 1)
1217 goto out;
1218
1219 err = -ENOBUFS;
1220 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1221 if (new_fa == NULL)
1222 goto out;
1223
1224 fi_drop = fa->fa_info;
1225 new_fa->fa_tos = fa->fa_tos;
1226 new_fa->fa_info = fi;
1227 new_fa->fa_type = cfg->fc_type;
1228 new_fa->fa_scope = cfg->fc_scope;
1229 state = fa->fa_state;
1230 new_fa->fa_state &= ~FA_S_ACCESSED;
1231
1232 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1233 alias_free_mem_rcu(fa);
1234
1235 fib_release_info(fi_drop);
1236 if (state & FA_S_ACCESSED)
1237 rt_cache_flush(-1);
1238 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1239 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1240
1241 goto succeeded;
1242 }
1243 /* Error if we find a perfect match which
1244 * uses the same scope, type, and nexthop
1245 * information.
1246 */
1247 fa_orig = fa;
1248 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1249 if (fa->fa_tos != tos)
1250 break;
1251 if (fa->fa_info->fib_priority != fi->fib_priority)
1252 break;
1253 if (fa->fa_type == cfg->fc_type &&
1254 fa->fa_scope == cfg->fc_scope &&
1255 fa->fa_info == fi) {
1256 goto out;
1257 }
1258 }
1259 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1260 fa = fa_orig;
1261 }
1262 err = -ENOENT;
1263 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1264 goto out;
1265
1266 err = -ENOBUFS;
1267 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1268 if (new_fa == NULL)
1269 goto out;
1270
1271 new_fa->fa_info = fi;
1272 new_fa->fa_tos = tos;
1273 new_fa->fa_type = cfg->fc_type;
1274 new_fa->fa_scope = cfg->fc_scope;
1275 new_fa->fa_state = 0;
1276 /*
1277 * Insert new entry to the list.
1278 */
1279
1280 if (!fa_head) {
1281 err = 0;
1282 fa_head = fib_insert_node(t, &err, key, plen);
1283 if (err)
1284 goto out_free_new_fa;
1285 }
1286
1287 list_add_tail_rcu(&new_fa->fa_list,
1288 (fa ? &fa->fa_list : fa_head));
1289
1290 rt_cache_flush(-1);
1291 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1292 &cfg->fc_nlinfo, 0);
1293 succeeded:
1294 return 0;
1295
1296 out_free_new_fa:
1297 kmem_cache_free(fn_alias_kmem, new_fa);
1298 out:
1299 fib_release_info(fi);
1300 err:
1301 return err;
1302 }
1303
1304
1305 /* should be called with rcu_read_lock */
1306 static inline int check_leaf(struct trie *t, struct leaf *l,
1307 t_key key, int *plen, const struct flowi *flp,
1308 struct fib_result *res)
1309 {
1310 int err, i;
1311 __be32 mask;
1312 struct leaf_info *li;
1313 struct hlist_head *hhead = &l->list;
1314 struct hlist_node *node;
1315
1316 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1317 i = li->plen;
1318 mask = inet_make_mask(i);
1319 if (l->key != (key & ntohl(mask)))
1320 continue;
1321
1322 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1323 *plen = i;
1324 #ifdef CONFIG_IP_FIB_TRIE_STATS
1325 t->stats.semantic_match_passed++;
1326 #endif
1327 return err;
1328 }
1329 #ifdef CONFIG_IP_FIB_TRIE_STATS
1330 t->stats.semantic_match_miss++;
1331 #endif
1332 }
1333 return 1;
1334 }
1335
1336 static int
1337 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1338 {
1339 struct trie *t = (struct trie *) tb->tb_data;
1340 int plen, ret = 0;
1341 struct node *n;
1342 struct tnode *pn;
1343 int pos, bits;
1344 t_key key = ntohl(flp->fl4_dst);
1345 int chopped_off;
1346 t_key cindex = 0;
1347 int current_prefix_length = KEYLENGTH;
1348 struct tnode *cn;
1349 t_key node_prefix, key_prefix, pref_mismatch;
1350 int mp;
1351
1352 rcu_read_lock();
1353
1354 n = rcu_dereference(t->trie);
1355 if (!n)
1356 goto failed;
1357
1358 #ifdef CONFIG_IP_FIB_TRIE_STATS
1359 t->stats.gets++;
1360 #endif
1361
1362 /* Just a leaf? */
1363 if (IS_LEAF(n)) {
1364 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1365 goto found;
1366 goto failed;
1367 }
1368 pn = (struct tnode *) n;
1369 chopped_off = 0;
1370
1371 while (pn) {
1372 pos = pn->pos;
1373 bits = pn->bits;
1374
1375 if (!chopped_off)
1376 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1377 pos, bits);
1378
1379 n = tnode_get_child(pn, cindex);
1380
1381 if (n == NULL) {
1382 #ifdef CONFIG_IP_FIB_TRIE_STATS
1383 t->stats.null_node_hit++;
1384 #endif
1385 goto backtrace;
1386 }
1387
1388 if (IS_LEAF(n)) {
1389 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1390 goto found;
1391 else
1392 goto backtrace;
1393 }
1394
1395 #define HL_OPTIMIZE
1396 #ifdef HL_OPTIMIZE
1397 cn = (struct tnode *)n;
1398
1399 /*
1400 * It's a tnode, and we can do some extra checks here if we
1401 * like, to avoid descending into a dead-end branch.
1402 * This tnode is in the parent's child array at index
1403 * key[p_pos..p_pos+p_bits] but potentially with some bits
1404 * chopped off, so in reality the index may be just a
1405 * subprefix, padded with zero at the end.
1406 * We can also take a look at any skipped bits in this
1407 * tnode - everything up to p_pos is supposed to be ok,
1408 * and the non-chopped bits of the index (se previous
1409 * paragraph) are also guaranteed ok, but the rest is
1410 * considered unknown.
1411 *
1412 * The skipped bits are key[pos+bits..cn->pos].
1413 */
1414
1415 /* If current_prefix_length < pos+bits, we are already doing
1416 * actual prefix matching, which means everything from
1417 * pos+(bits-chopped_off) onward must be zero along some
1418 * branch of this subtree - otherwise there is *no* valid
1419 * prefix present. Here we can only check the skipped
1420 * bits. Remember, since we have already indexed into the
1421 * parent's child array, we know that the bits we chopped of
1422 * *are* zero.
1423 */
1424
1425 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1426
1427 if (current_prefix_length < pos+bits) {
1428 if (tkey_extract_bits(cn->key, current_prefix_length,
1429 cn->pos - current_prefix_length) != 0 ||
1430 !(cn->child[0]))
1431 goto backtrace;
1432 }
1433
1434 /*
1435 * If chopped_off=0, the index is fully validated and we
1436 * only need to look at the skipped bits for this, the new,
1437 * tnode. What we actually want to do is to find out if
1438 * these skipped bits match our key perfectly, or if we will
1439 * have to count on finding a matching prefix further down,
1440 * because if we do, we would like to have some way of
1441 * verifying the existence of such a prefix at this point.
1442 */
1443
1444 /* The only thing we can do at this point is to verify that
1445 * any such matching prefix can indeed be a prefix to our
1446 * key, and if the bits in the node we are inspecting that
1447 * do not match our key are not ZERO, this cannot be true.
1448 * Thus, find out where there is a mismatch (before cn->pos)
1449 * and verify that all the mismatching bits are zero in the
1450 * new tnode's key.
1451 */
1452
1453 /* Note: We aren't very concerned about the piece of the key
1454 * that precede pn->pos+pn->bits, since these have already been
1455 * checked. The bits after cn->pos aren't checked since these are
1456 * by definition "unknown" at this point. Thus, what we want to
1457 * see is if we are about to enter the "prefix matching" state,
1458 * and in that case verify that the skipped bits that will prevail
1459 * throughout this subtree are zero, as they have to be if we are
1460 * to find a matching prefix.
1461 */
1462
1463 node_prefix = mask_pfx(cn->key, cn->pos);
1464 key_prefix = mask_pfx(key, cn->pos);
1465 pref_mismatch = key_prefix^node_prefix;
1466 mp = 0;
1467
1468 /* In short: If skipped bits in this node do not match the search
1469 * key, enter the "prefix matching" state.directly.
1470 */
1471 if (pref_mismatch) {
1472 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1473 mp++;
1474 pref_mismatch = pref_mismatch <<1;
1475 }
1476 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1477
1478 if (key_prefix != 0)
1479 goto backtrace;
1480
1481 if (current_prefix_length >= cn->pos)
1482 current_prefix_length = mp;
1483 }
1484 #endif
1485 pn = (struct tnode *)n; /* Descend */
1486 chopped_off = 0;
1487 continue;
1488
1489 backtrace:
1490 chopped_off++;
1491
1492 /* As zero don't change the child key (cindex) */
1493 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1494 chopped_off++;
1495
1496 /* Decrease current_... with bits chopped off */
1497 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1498 current_prefix_length = pn->pos + pn->bits - chopped_off;
1499
1500 /*
1501 * Either we do the actual chop off according or if we have
1502 * chopped off all bits in this tnode walk up to our parent.
1503 */
1504
1505 if (chopped_off <= pn->bits) {
1506 cindex &= ~(1 << (chopped_off-1));
1507 } else {
1508 struct tnode *parent = node_parent((struct node *) pn);
1509 if (!parent)
1510 goto failed;
1511
1512 /* Get Child's index */
1513 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1514 pn = parent;
1515 chopped_off = 0;
1516
1517 #ifdef CONFIG_IP_FIB_TRIE_STATS
1518 t->stats.backtrack++;
1519 #endif
1520 goto backtrace;
1521 }
1522 }
1523 failed:
1524 ret = 1;
1525 found:
1526 rcu_read_unlock();
1527 return ret;
1528 }
1529
1530 /* only called from updater side */
1531 static int trie_leaf_remove(struct trie *t, t_key key)
1532 {
1533 t_key cindex;
1534 struct tnode *tp = NULL;
1535 struct node *n = t->trie;
1536 struct leaf *l;
1537
1538 pr_debug("entering trie_leaf_remove(%p)\n", n);
1539
1540 /* Note that in the case skipped bits, those bits are *not* checked!
1541 * When we finish this, we will have NULL or a T_LEAF, and the
1542 * T_LEAF may or may not match our key.
1543 */
1544
1545 while (n != NULL && IS_TNODE(n)) {
1546 struct tnode *tn = (struct tnode *) n;
1547 check_tnode(tn);
1548 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1549
1550 BUG_ON(n && node_parent(n) != tn);
1551 }
1552 l = (struct leaf *) n;
1553
1554 if (!n || !tkey_equals(l->key, key))
1555 return 0;
1556
1557 /*
1558 * Key found.
1559 * Remove the leaf and rebalance the tree
1560 */
1561
1562 t->revision++;
1563 t->size--;
1564
1565 tp = node_parent(n);
1566 tnode_free((struct tnode *) n);
1567
1568 if (tp) {
1569 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1570 put_child(t, (struct tnode *)tp, cindex, NULL);
1571 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1572 } else
1573 rcu_assign_pointer(t->trie, NULL);
1574
1575 return 1;
1576 }
1577
1578 /*
1579 * Caller must hold RTNL.
1580 */
1581 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1582 {
1583 struct trie *t = (struct trie *) tb->tb_data;
1584 u32 key, mask;
1585 int plen = cfg->fc_dst_len;
1586 u8 tos = cfg->fc_tos;
1587 struct fib_alias *fa, *fa_to_delete;
1588 struct list_head *fa_head;
1589 struct leaf *l;
1590 struct leaf_info *li;
1591
1592 if (plen > 32)
1593 return -EINVAL;
1594
1595 key = ntohl(cfg->fc_dst);
1596 mask = ntohl(inet_make_mask(plen));
1597
1598 if (key & ~mask)
1599 return -EINVAL;
1600
1601 key = key & mask;
1602 l = fib_find_node(t, key);
1603
1604 if (!l)
1605 return -ESRCH;
1606
1607 fa_head = get_fa_head(l, plen);
1608 fa = fib_find_alias(fa_head, tos, 0);
1609
1610 if (!fa)
1611 return -ESRCH;
1612
1613 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1614
1615 fa_to_delete = NULL;
1616 fa_head = fa->fa_list.prev;
1617
1618 list_for_each_entry(fa, fa_head, fa_list) {
1619 struct fib_info *fi = fa->fa_info;
1620
1621 if (fa->fa_tos != tos)
1622 break;
1623
1624 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1625 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1626 fa->fa_scope == cfg->fc_scope) &&
1627 (!cfg->fc_protocol ||
1628 fi->fib_protocol == cfg->fc_protocol) &&
1629 fib_nh_match(cfg, fi) == 0) {
1630 fa_to_delete = fa;
1631 break;
1632 }
1633 }
1634
1635 if (!fa_to_delete)
1636 return -ESRCH;
1637
1638 fa = fa_to_delete;
1639 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1640 &cfg->fc_nlinfo, 0);
1641
1642 l = fib_find_node(t, key);
1643 li = find_leaf_info(l, plen);
1644
1645 list_del_rcu(&fa->fa_list);
1646
1647 if (list_empty(fa_head)) {
1648 hlist_del_rcu(&li->hlist);
1649 free_leaf_info(li);
1650 }
1651
1652 if (hlist_empty(&l->list))
1653 trie_leaf_remove(t, key);
1654
1655 if (fa->fa_state & FA_S_ACCESSED)
1656 rt_cache_flush(-1);
1657
1658 fib_release_info(fa->fa_info);
1659 alias_free_mem_rcu(fa);
1660 return 0;
1661 }
1662
1663 static int trie_flush_list(struct trie *t, struct list_head *head)
1664 {
1665 struct fib_alias *fa, *fa_node;
1666 int found = 0;
1667
1668 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1669 struct fib_info *fi = fa->fa_info;
1670
1671 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1672 list_del_rcu(&fa->fa_list);
1673 fib_release_info(fa->fa_info);
1674 alias_free_mem_rcu(fa);
1675 found++;
1676 }
1677 }
1678 return found;
1679 }
1680
1681 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1682 {
1683 int found = 0;
1684 struct hlist_head *lih = &l->list;
1685 struct hlist_node *node, *tmp;
1686 struct leaf_info *li = NULL;
1687
1688 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1689 found += trie_flush_list(t, &li->falh);
1690
1691 if (list_empty(&li->falh)) {
1692 hlist_del_rcu(&li->hlist);
1693 free_leaf_info(li);
1694 }
1695 }
1696 return found;
1697 }
1698
1699 /* rcu_read_lock needs to be hold by caller from readside */
1700
1701 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1702 {
1703 struct node *c = (struct node *) thisleaf;
1704 struct tnode *p;
1705 int idx;
1706 struct node *trie = rcu_dereference(t->trie);
1707
1708 if (c == NULL) {
1709 if (trie == NULL)
1710 return NULL;
1711
1712 if (IS_LEAF(trie)) /* trie w. just a leaf */
1713 return (struct leaf *) trie;
1714
1715 p = (struct tnode*) trie; /* Start */
1716 } else
1717 p = node_parent(c);
1718
1719 while (p) {
1720 int pos, last;
1721
1722 /* Find the next child of the parent */
1723 if (c)
1724 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1725 else
1726 pos = 0;
1727
1728 last = 1 << p->bits;
1729 for (idx = pos; idx < last ; idx++) {
1730 c = rcu_dereference(p->child[idx]);
1731
1732 if (!c)
1733 continue;
1734
1735 /* Decend if tnode */
1736 while (IS_TNODE(c)) {
1737 p = (struct tnode *) c;
1738 idx = 0;
1739
1740 /* Rightmost non-NULL branch */
1741 if (p && IS_TNODE(p))
1742 while (!(c = rcu_dereference(p->child[idx]))
1743 && idx < (1<<p->bits)) idx++;
1744
1745 /* Done with this tnode? */
1746 if (idx >= (1 << p->bits) || !c)
1747 goto up;
1748 }
1749 return (struct leaf *) c;
1750 }
1751 up:
1752 /* No more children go up one step */
1753 c = (struct node *) p;
1754 p = node_parent(c);
1755 }
1756 return NULL; /* Ready. Root of trie */
1757 }
1758
1759 /*
1760 * Caller must hold RTNL.
1761 */
1762 static int fn_trie_flush(struct fib_table *tb)
1763 {
1764 struct trie *t = (struct trie *) tb->tb_data;
1765 struct leaf *ll = NULL, *l = NULL;
1766 int found = 0, h;
1767
1768 t->revision++;
1769
1770 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1771 found += trie_flush_leaf(t, l);
1772
1773 if (ll && hlist_empty(&ll->list))
1774 trie_leaf_remove(t, ll->key);
1775 ll = l;
1776 }
1777
1778 if (ll && hlist_empty(&ll->list))
1779 trie_leaf_remove(t, ll->key);
1780
1781 pr_debug("trie_flush found=%d\n", found);
1782 return found;
1783 }
1784
1785 static int trie_last_dflt = -1;
1786
1787 static void
1788 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1789 {
1790 struct trie *t = (struct trie *) tb->tb_data;
1791 int order, last_idx;
1792 struct fib_info *fi = NULL;
1793 struct fib_info *last_resort;
1794 struct fib_alias *fa = NULL;
1795 struct list_head *fa_head;
1796 struct leaf *l;
1797
1798 last_idx = -1;
1799 last_resort = NULL;
1800 order = -1;
1801
1802 rcu_read_lock();
1803
1804 l = fib_find_node(t, 0);
1805 if (!l)
1806 goto out;
1807
1808 fa_head = get_fa_head(l, 0);
1809 if (!fa_head)
1810 goto out;
1811
1812 if (list_empty(fa_head))
1813 goto out;
1814
1815 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1816 struct fib_info *next_fi = fa->fa_info;
1817
1818 if (fa->fa_scope != res->scope ||
1819 fa->fa_type != RTN_UNICAST)
1820 continue;
1821
1822 if (next_fi->fib_priority > res->fi->fib_priority)
1823 break;
1824 if (!next_fi->fib_nh[0].nh_gw ||
1825 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1826 continue;
1827 fa->fa_state |= FA_S_ACCESSED;
1828
1829 if (fi == NULL) {
1830 if (next_fi != res->fi)
1831 break;
1832 } else if (!fib_detect_death(fi, order, &last_resort,
1833 &last_idx, &trie_last_dflt)) {
1834 if (res->fi)
1835 fib_info_put(res->fi);
1836 res->fi = fi;
1837 atomic_inc(&fi->fib_clntref);
1838 trie_last_dflt = order;
1839 goto out;
1840 }
1841 fi = next_fi;
1842 order++;
1843 }
1844 if (order <= 0 || fi == NULL) {
1845 trie_last_dflt = -1;
1846 goto out;
1847 }
1848
1849 if (!fib_detect_death(fi, order, &last_resort, &last_idx, &trie_last_dflt)) {
1850 if (res->fi)
1851 fib_info_put(res->fi);
1852 res->fi = fi;
1853 atomic_inc(&fi->fib_clntref);
1854 trie_last_dflt = order;
1855 goto out;
1856 }
1857 if (last_idx >= 0) {
1858 if (res->fi)
1859 fib_info_put(res->fi);
1860 res->fi = last_resort;
1861 if (last_resort)
1862 atomic_inc(&last_resort->fib_clntref);
1863 }
1864 trie_last_dflt = last_idx;
1865 out:;
1866 rcu_read_unlock();
1867 }
1868
1869 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1870 struct sk_buff *skb, struct netlink_callback *cb)
1871 {
1872 int i, s_i;
1873 struct fib_alias *fa;
1874
1875 __be32 xkey = htonl(key);
1876
1877 s_i = cb->args[4];
1878 i = 0;
1879
1880 /* rcu_read_lock is hold by caller */
1881
1882 list_for_each_entry_rcu(fa, fah, fa_list) {
1883 if (i < s_i) {
1884 i++;
1885 continue;
1886 }
1887 BUG_ON(!fa->fa_info);
1888
1889 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1890 cb->nlh->nlmsg_seq,
1891 RTM_NEWROUTE,
1892 tb->tb_id,
1893 fa->fa_type,
1894 fa->fa_scope,
1895 xkey,
1896 plen,
1897 fa->fa_tos,
1898 fa->fa_info, 0) < 0) {
1899 cb->args[4] = i;
1900 return -1;
1901 }
1902 i++;
1903 }
1904 cb->args[4] = i;
1905 return skb->len;
1906 }
1907
1908 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1909 struct netlink_callback *cb)
1910 {
1911 int h, s_h;
1912 struct list_head *fa_head;
1913 struct leaf *l = NULL;
1914
1915 s_h = cb->args[3];
1916
1917 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1918 if (h < s_h)
1919 continue;
1920 if (h > s_h)
1921 memset(&cb->args[4], 0,
1922 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1923
1924 fa_head = get_fa_head(l, plen);
1925
1926 if (!fa_head)
1927 continue;
1928
1929 if (list_empty(fa_head))
1930 continue;
1931
1932 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1933 cb->args[3] = h;
1934 return -1;
1935 }
1936 }
1937 cb->args[3] = h;
1938 return skb->len;
1939 }
1940
1941 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1942 {
1943 int m, s_m;
1944 struct trie *t = (struct trie *) tb->tb_data;
1945
1946 s_m = cb->args[2];
1947
1948 rcu_read_lock();
1949 for (m = 0; m <= 32; m++) {
1950 if (m < s_m)
1951 continue;
1952 if (m > s_m)
1953 memset(&cb->args[3], 0,
1954 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1955
1956 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1957 cb->args[2] = m;
1958 goto out;
1959 }
1960 }
1961 rcu_read_unlock();
1962 cb->args[2] = m;
1963 return skb->len;
1964 out:
1965 rcu_read_unlock();
1966 return -1;
1967 }
1968
1969 /* Fix more generic FIB names for init later */
1970
1971 #ifdef CONFIG_IP_MULTIPLE_TABLES
1972 struct fib_table * fib_hash_init(u32 id)
1973 #else
1974 struct fib_table * __init fib_hash_init(u32 id)
1975 #endif
1976 {
1977 struct fib_table *tb;
1978 struct trie *t;
1979
1980 if (fn_alias_kmem == NULL)
1981 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1982 sizeof(struct fib_alias),
1983 0, SLAB_HWCACHE_ALIGN,
1984 NULL);
1985
1986 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1987 GFP_KERNEL);
1988 if (tb == NULL)
1989 return NULL;
1990
1991 tb->tb_id = id;
1992 tb->tb_lookup = fn_trie_lookup;
1993 tb->tb_insert = fn_trie_insert;
1994 tb->tb_delete = fn_trie_delete;
1995 tb->tb_flush = fn_trie_flush;
1996 tb->tb_select_default = fn_trie_select_default;
1997 tb->tb_dump = fn_trie_dump;
1998 memset(tb->tb_data, 0, sizeof(struct trie));
1999
2000 t = (struct trie *) tb->tb_data;
2001
2002 trie_init(t);
2003
2004 if (id == RT_TABLE_LOCAL)
2005 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
2006
2007 return tb;
2008 }
2009
2010 #ifdef CONFIG_PROC_FS
2011 /* Depth first Trie walk iterator */
2012 struct fib_trie_iter {
2013 struct trie *trie_local, *trie_main;
2014 struct tnode *tnode;
2015 struct trie *trie;
2016 unsigned index;
2017 unsigned depth;
2018 };
2019
2020 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2021 {
2022 struct tnode *tn = iter->tnode;
2023 unsigned cindex = iter->index;
2024 struct tnode *p;
2025
2026 /* A single entry routing table */
2027 if (!tn)
2028 return NULL;
2029
2030 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2031 iter->tnode, iter->index, iter->depth);
2032 rescan:
2033 while (cindex < (1<<tn->bits)) {
2034 struct node *n = tnode_get_child(tn, cindex);
2035
2036 if (n) {
2037 if (IS_LEAF(n)) {
2038 iter->tnode = tn;
2039 iter->index = cindex + 1;
2040 } else {
2041 /* push down one level */
2042 iter->tnode = (struct tnode *) n;
2043 iter->index = 0;
2044 ++iter->depth;
2045 }
2046 return n;
2047 }
2048
2049 ++cindex;
2050 }
2051
2052 /* Current node exhausted, pop back up */
2053 p = node_parent((struct node *)tn);
2054 if (p) {
2055 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2056 tn = p;
2057 --iter->depth;
2058 goto rescan;
2059 }
2060
2061 /* got root? */
2062 return NULL;
2063 }
2064
2065 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2066 struct trie *t)
2067 {
2068 struct node *n ;
2069
2070 if (!t)
2071 return NULL;
2072
2073 n = rcu_dereference(t->trie);
2074
2075 if (!iter)
2076 return NULL;
2077
2078 if (n) {
2079 if (IS_TNODE(n)) {
2080 iter->tnode = (struct tnode *) n;
2081 iter->trie = t;
2082 iter->index = 0;
2083 iter->depth = 1;
2084 } else {
2085 iter->tnode = NULL;
2086 iter->trie = t;
2087 iter->index = 0;
2088 iter->depth = 0;
2089 }
2090 return n;
2091 }
2092 return NULL;
2093 }
2094
2095 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2096 {
2097 struct node *n;
2098 struct fib_trie_iter iter;
2099
2100 memset(s, 0, sizeof(*s));
2101
2102 rcu_read_lock();
2103 for (n = fib_trie_get_first(&iter, t); n;
2104 n = fib_trie_get_next(&iter)) {
2105 if (IS_LEAF(n)) {
2106 s->leaves++;
2107 s->totdepth += iter.depth;
2108 if (iter.depth > s->maxdepth)
2109 s->maxdepth = iter.depth;
2110 } else {
2111 const struct tnode *tn = (const struct tnode *) n;
2112 int i;
2113
2114 s->tnodes++;
2115 if (tn->bits < MAX_STAT_DEPTH)
2116 s->nodesizes[tn->bits]++;
2117
2118 for (i = 0; i < (1<<tn->bits); i++)
2119 if (!tn->child[i])
2120 s->nullpointers++;
2121 }
2122 }
2123 rcu_read_unlock();
2124 }
2125
2126 /*
2127 * This outputs /proc/net/fib_triestats
2128 */
2129 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2130 {
2131 unsigned i, max, pointers, bytes, avdepth;
2132
2133 if (stat->leaves)
2134 avdepth = stat->totdepth*100 / stat->leaves;
2135 else
2136 avdepth = 0;
2137
2138 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2139 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2140
2141 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2142
2143 bytes = sizeof(struct leaf) * stat->leaves;
2144 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2145 bytes += sizeof(struct tnode) * stat->tnodes;
2146
2147 max = MAX_STAT_DEPTH;
2148 while (max > 0 && stat->nodesizes[max-1] == 0)
2149 max--;
2150
2151 pointers = 0;
2152 for (i = 1; i <= max; i++)
2153 if (stat->nodesizes[i] != 0) {
2154 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2155 pointers += (1<<i) * stat->nodesizes[i];
2156 }
2157 seq_putc(seq, '\n');
2158 seq_printf(seq, "\tPointers: %d\n", pointers);
2159
2160 bytes += sizeof(struct node *) * pointers;
2161 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2162 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2163
2164 #ifdef CONFIG_IP_FIB_TRIE_STATS
2165 seq_printf(seq, "Counters:\n---------\n");
2166 seq_printf(seq,"gets = %d\n", t->stats.gets);
2167 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2168 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2169 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2170 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2171 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2172 #ifdef CLEAR_STATS
2173 memset(&(t->stats), 0, sizeof(t->stats));
2174 #endif
2175 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2176 }
2177
2178 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2179 {
2180 struct trie *trie_local, *trie_main;
2181 struct trie_stat *stat;
2182 struct fib_table *tb;
2183
2184 trie_local = NULL;
2185 tb = fib_get_table(RT_TABLE_LOCAL);
2186 if (tb)
2187 trie_local = (struct trie *) tb->tb_data;
2188
2189 trie_main = NULL;
2190 tb = fib_get_table(RT_TABLE_MAIN);
2191 if (tb)
2192 trie_main = (struct trie *) tb->tb_data;
2193
2194
2195 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2196 if (!stat)
2197 return -ENOMEM;
2198
2199 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2200 sizeof(struct leaf), sizeof(struct tnode));
2201
2202 if (trie_local) {
2203 seq_printf(seq, "Local:\n");
2204 trie_collect_stats(trie_local, stat);
2205 trie_show_stats(seq, stat);
2206 }
2207
2208 if (trie_main) {
2209 seq_printf(seq, "Main:\n");
2210 trie_collect_stats(trie_main, stat);
2211 trie_show_stats(seq, stat);
2212 }
2213 kfree(stat);
2214
2215 return 0;
2216 }
2217
2218 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2219 {
2220 return single_open(file, fib_triestat_seq_show, NULL);
2221 }
2222
2223 static const struct file_operations fib_triestat_fops = {
2224 .owner = THIS_MODULE,
2225 .open = fib_triestat_seq_open,
2226 .read = seq_read,
2227 .llseek = seq_lseek,
2228 .release = single_release,
2229 };
2230
2231 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2232 loff_t pos)
2233 {
2234 loff_t idx = 0;
2235 struct node *n;
2236
2237 for (n = fib_trie_get_first(iter, iter->trie_local);
2238 n; ++idx, n = fib_trie_get_next(iter)) {
2239 if (pos == idx)
2240 return n;
2241 }
2242
2243 for (n = fib_trie_get_first(iter, iter->trie_main);
2244 n; ++idx, n = fib_trie_get_next(iter)) {
2245 if (pos == idx)
2246 return n;
2247 }
2248 return NULL;
2249 }
2250
2251 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2252 {
2253 struct fib_trie_iter *iter = seq->private;
2254 struct fib_table *tb;
2255
2256 if (!iter->trie_local) {
2257 tb = fib_get_table(RT_TABLE_LOCAL);
2258 if (tb)
2259 iter->trie_local = (struct trie *) tb->tb_data;
2260 }
2261 if (!iter->trie_main) {
2262 tb = fib_get_table(RT_TABLE_MAIN);
2263 if (tb)
2264 iter->trie_main = (struct trie *) tb->tb_data;
2265 }
2266 rcu_read_lock();
2267 if (*pos == 0)
2268 return SEQ_START_TOKEN;
2269 return fib_trie_get_idx(iter, *pos - 1);
2270 }
2271
2272 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2273 {
2274 struct fib_trie_iter *iter = seq->private;
2275 void *l = v;
2276
2277 ++*pos;
2278 if (v == SEQ_START_TOKEN)
2279 return fib_trie_get_idx(iter, 0);
2280
2281 v = fib_trie_get_next(iter);
2282 BUG_ON(v == l);
2283 if (v)
2284 return v;
2285
2286 /* continue scan in next trie */
2287 if (iter->trie == iter->trie_local)
2288 return fib_trie_get_first(iter, iter->trie_main);
2289
2290 return NULL;
2291 }
2292
2293 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2294 {
2295 rcu_read_unlock();
2296 }
2297
2298 static void seq_indent(struct seq_file *seq, int n)
2299 {
2300 while (n-- > 0) seq_puts(seq, " ");
2301 }
2302
2303 static inline const char *rtn_scope(enum rt_scope_t s)
2304 {
2305 static char buf[32];
2306
2307 switch (s) {
2308 case RT_SCOPE_UNIVERSE: return "universe";
2309 case RT_SCOPE_SITE: return "site";
2310 case RT_SCOPE_LINK: return "link";
2311 case RT_SCOPE_HOST: return "host";
2312 case RT_SCOPE_NOWHERE: return "nowhere";
2313 default:
2314 snprintf(buf, sizeof(buf), "scope=%d", s);
2315 return buf;
2316 }
2317 }
2318
2319 static const char *rtn_type_names[__RTN_MAX] = {
2320 [RTN_UNSPEC] = "UNSPEC",
2321 [RTN_UNICAST] = "UNICAST",
2322 [RTN_LOCAL] = "LOCAL",
2323 [RTN_BROADCAST] = "BROADCAST",
2324 [RTN_ANYCAST] = "ANYCAST",
2325 [RTN_MULTICAST] = "MULTICAST",
2326 [RTN_BLACKHOLE] = "BLACKHOLE",
2327 [RTN_UNREACHABLE] = "UNREACHABLE",
2328 [RTN_PROHIBIT] = "PROHIBIT",
2329 [RTN_THROW] = "THROW",
2330 [RTN_NAT] = "NAT",
2331 [RTN_XRESOLVE] = "XRESOLVE",
2332 };
2333
2334 static inline const char *rtn_type(unsigned t)
2335 {
2336 static char buf[32];
2337
2338 if (t < __RTN_MAX && rtn_type_names[t])
2339 return rtn_type_names[t];
2340 snprintf(buf, sizeof(buf), "type %d", t);
2341 return buf;
2342 }
2343
2344 /* Pretty print the trie */
2345 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2346 {
2347 const struct fib_trie_iter *iter = seq->private;
2348 struct node *n = v;
2349
2350 if (v == SEQ_START_TOKEN)
2351 return 0;
2352
2353 if (!node_parent(n)) {
2354 if (iter->trie == iter->trie_local)
2355 seq_puts(seq, "<local>:\n");
2356 else
2357 seq_puts(seq, "<main>:\n");
2358 }
2359
2360 if (IS_TNODE(n)) {
2361 struct tnode *tn = (struct tnode *) n;
2362 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2363
2364 seq_indent(seq, iter->depth-1);
2365 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2366 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2367 tn->empty_children);
2368
2369 } else {
2370 struct leaf *l = (struct leaf *) n;
2371 int i;
2372 __be32 val = htonl(l->key);
2373
2374 seq_indent(seq, iter->depth);
2375 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2376 for (i = 32; i >= 0; i--) {
2377 struct leaf_info *li = find_leaf_info(l, i);
2378 if (li) {
2379 struct fib_alias *fa;
2380 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2381 seq_indent(seq, iter->depth+1);
2382 seq_printf(seq, " /%d %s %s", i,
2383 rtn_scope(fa->fa_scope),
2384 rtn_type(fa->fa_type));
2385 if (fa->fa_tos)
2386 seq_printf(seq, "tos =%d\n",
2387 fa->fa_tos);
2388 seq_putc(seq, '\n');
2389 }
2390 }
2391 }
2392 }
2393
2394 return 0;
2395 }
2396
2397 static const struct seq_operations fib_trie_seq_ops = {
2398 .start = fib_trie_seq_start,
2399 .next = fib_trie_seq_next,
2400 .stop = fib_trie_seq_stop,
2401 .show = fib_trie_seq_show,
2402 };
2403
2404 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2405 {
2406 return seq_open_private(file, &fib_trie_seq_ops,
2407 sizeof(struct fib_trie_iter));
2408 }
2409
2410 static const struct file_operations fib_trie_fops = {
2411 .owner = THIS_MODULE,
2412 .open = fib_trie_seq_open,
2413 .read = seq_read,
2414 .llseek = seq_lseek,
2415 .release = seq_release_private,
2416 };
2417
2418 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2419 {
2420 static unsigned type2flags[RTN_MAX + 1] = {
2421 [7] = RTF_REJECT, [8] = RTF_REJECT,
2422 };
2423 unsigned flags = type2flags[type];
2424
2425 if (fi && fi->fib_nh->nh_gw)
2426 flags |= RTF_GATEWAY;
2427 if (mask == htonl(0xFFFFFFFF))
2428 flags |= RTF_HOST;
2429 flags |= RTF_UP;
2430 return flags;
2431 }
2432
2433 /*
2434 * This outputs /proc/net/route.
2435 * The format of the file is not supposed to be changed
2436 * and needs to be same as fib_hash output to avoid breaking
2437 * legacy utilities
2438 */
2439 static int fib_route_seq_show(struct seq_file *seq, void *v)
2440 {
2441 const struct fib_trie_iter *iter = seq->private;
2442 struct leaf *l = v;
2443 int i;
2444 char bf[128];
2445
2446 if (v == SEQ_START_TOKEN) {
2447 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2448 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2449 "\tWindow\tIRTT");
2450 return 0;
2451 }
2452
2453 if (iter->trie == iter->trie_local)
2454 return 0;
2455 if (IS_TNODE(l))
2456 return 0;
2457
2458 for (i=32; i>=0; i--) {
2459 struct leaf_info *li = find_leaf_info(l, i);
2460 struct fib_alias *fa;
2461 __be32 mask, prefix;
2462
2463 if (!li)
2464 continue;
2465
2466 mask = inet_make_mask(li->plen);
2467 prefix = htonl(l->key);
2468
2469 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2470 const struct fib_info *fi = fa->fa_info;
2471 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2472
2473 if (fa->fa_type == RTN_BROADCAST
2474 || fa->fa_type == RTN_MULTICAST)
2475 continue;
2476
2477 if (fi)
2478 snprintf(bf, sizeof(bf),
2479 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2480 fi->fib_dev ? fi->fib_dev->name : "*",
2481 prefix,
2482 fi->fib_nh->nh_gw, flags, 0, 0,
2483 fi->fib_priority,
2484 mask,
2485 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2486 fi->fib_window,
2487 fi->fib_rtt >> 3);
2488 else
2489 snprintf(bf, sizeof(bf),
2490 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2491 prefix, 0, flags, 0, 0, 0,
2492 mask, 0, 0, 0);
2493
2494 seq_printf(seq, "%-127s\n", bf);
2495 }
2496 }
2497
2498 return 0;
2499 }
2500
2501 static const struct seq_operations fib_route_seq_ops = {
2502 .start = fib_trie_seq_start,
2503 .next = fib_trie_seq_next,
2504 .stop = fib_trie_seq_stop,
2505 .show = fib_route_seq_show,
2506 };
2507
2508 static int fib_route_seq_open(struct inode *inode, struct file *file)
2509 {
2510 return seq_open_private(file, &fib_route_seq_ops,
2511 sizeof(struct fib_trie_iter));
2512 }
2513
2514 static const struct file_operations fib_route_fops = {
2515 .owner = THIS_MODULE,
2516 .open = fib_route_seq_open,
2517 .read = seq_read,
2518 .llseek = seq_lseek,
2519 .release = seq_release_private,
2520 };
2521
2522 int __init fib_proc_init(void)
2523 {
2524 if (!proc_net_fops_create(&init_net, "fib_trie", S_IRUGO, &fib_trie_fops))
2525 goto out1;
2526
2527 if (!proc_net_fops_create(&init_net, "fib_triestat", S_IRUGO, &fib_triestat_fops))
2528 goto out2;
2529
2530 if (!proc_net_fops_create(&init_net, "route", S_IRUGO, &fib_route_fops))
2531 goto out3;
2532
2533 return 0;
2534
2535 out3:
2536 proc_net_remove(&init_net, "fib_triestat");
2537 out2:
2538 proc_net_remove(&init_net, "fib_trie");
2539 out1:
2540 return -ENOMEM;
2541 }
2542
2543 void __init fib_proc_exit(void)
2544 {
2545 proc_net_remove(&init_net, "fib_trie");
2546 proc_net_remove(&init_net, "fib_triestat");
2547 proc_net_remove(&init_net, "route");
2548 }
2549
2550 #endif /* CONFIG_PROC_FS */
This page took 0.078799 seconds and 4 git commands to generate.