9126eea71670b59b627c6605d5ca8cce034bcda4
[deliverable/linux.git] / net / ipv4 / fib_trie.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
15 * This work is based on the LPC-trie which is originally descibed in:
16 *
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
20 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
26 *
27 *
28 * Code from fib_hash has been reused which includes the following header:
29 *
30 *
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
34 *
35 * IPv4 FIB: lookup engine and maintenance routines.
36 *
37 *
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
39 *
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
44 *
45 * Substantial contributions to this work comes from:
46 *
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
51 */
52
53 #define VERSION "0.408"
54
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
84
85 #undef CONFIG_IP_FIB_TRIE_STATS
86 #define MAX_STAT_DEPTH 32
87
88 #define KEYLENGTH (8*sizeof(t_key))
89
90 typedef unsigned int t_key;
91
92 #define T_TNODE 0
93 #define T_LEAF 1
94 #define NODE_TYPE_MASK 0x1UL
95 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96
97 #define IS_TNODE(n) (!(n->parent & T_LEAF))
98 #define IS_LEAF(n) (n->parent & T_LEAF)
99
100 struct node {
101 t_key key;
102 unsigned long parent;
103 };
104
105 struct leaf {
106 t_key key;
107 unsigned long parent;
108 struct hlist_head list;
109 struct rcu_head rcu;
110 };
111
112 struct leaf_info {
113 struct hlist_node hlist;
114 struct rcu_head rcu;
115 int plen;
116 struct list_head falh;
117 };
118
119 struct tnode {
120 t_key key;
121 unsigned long parent;
122 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
123 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
124 unsigned short full_children; /* KEYLENGTH bits needed */
125 unsigned short empty_children; /* KEYLENGTH bits needed */
126 struct rcu_head rcu;
127 struct node *child[0];
128 };
129
130 #ifdef CONFIG_IP_FIB_TRIE_STATS
131 struct trie_use_stats {
132 unsigned int gets;
133 unsigned int backtrack;
134 unsigned int semantic_match_passed;
135 unsigned int semantic_match_miss;
136 unsigned int null_node_hit;
137 unsigned int resize_node_skipped;
138 };
139 #endif
140
141 struct trie_stat {
142 unsigned int totdepth;
143 unsigned int maxdepth;
144 unsigned int tnodes;
145 unsigned int leaves;
146 unsigned int nullpointers;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
148 };
149
150 struct trie {
151 struct node *trie;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
155 int size;
156 unsigned int revision;
157 };
158
159 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
160 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
161 static struct node *resize(struct trie *t, struct tnode *tn);
162 static struct tnode *inflate(struct trie *t, struct tnode *tn);
163 static struct tnode *halve(struct trie *t, struct tnode *tn);
164 static void tnode_free(struct tnode *tn);
165
166 static struct kmem_cache *fn_alias_kmem __read_mostly;
167
168 static inline struct tnode *node_parent(struct node *node)
169 {
170 struct tnode *ret;
171
172 ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 return rcu_dereference(ret);
174 }
175
176 static inline void node_set_parent(struct node *node, struct tnode *ptr)
177 {
178 rcu_assign_pointer(node->parent,
179 (unsigned long)ptr | NODE_TYPE(node));
180 }
181
182 /* rcu_read_lock needs to be hold by caller from readside */
183
184 static inline struct node *tnode_get_child(struct tnode *tn, int i)
185 {
186 BUG_ON(i >= 1 << tn->bits);
187
188 return rcu_dereference(tn->child[i]);
189 }
190
191 static inline int tnode_child_length(const struct tnode *tn)
192 {
193 return 1 << tn->bits;
194 }
195
196 static inline t_key mask_pfx(t_key k, unsigned short l)
197 {
198 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
199 }
200
201 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
202 {
203 if (offset < KEYLENGTH)
204 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
205 else
206 return 0;
207 }
208
209 static inline int tkey_equals(t_key a, t_key b)
210 {
211 return a == b;
212 }
213
214 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
215 {
216 if (bits == 0 || offset >= KEYLENGTH)
217 return 1;
218 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
219 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
220 }
221
222 static inline int tkey_mismatch(t_key a, int offset, t_key b)
223 {
224 t_key diff = a ^ b;
225 int i = offset;
226
227 if (!diff)
228 return 0;
229 while ((diff << i) >> (KEYLENGTH-1) == 0)
230 i++;
231 return i;
232 }
233
234 /*
235 To understand this stuff, an understanding of keys and all their bits is
236 necessary. Every node in the trie has a key associated with it, but not
237 all of the bits in that key are significant.
238
239 Consider a node 'n' and its parent 'tp'.
240
241 If n is a leaf, every bit in its key is significant. Its presence is
242 necessitated by path compression, since during a tree traversal (when
243 searching for a leaf - unless we are doing an insertion) we will completely
244 ignore all skipped bits we encounter. Thus we need to verify, at the end of
245 a potentially successful search, that we have indeed been walking the
246 correct key path.
247
248 Note that we can never "miss" the correct key in the tree if present by
249 following the wrong path. Path compression ensures that segments of the key
250 that are the same for all keys with a given prefix are skipped, but the
251 skipped part *is* identical for each node in the subtrie below the skipped
252 bit! trie_insert() in this implementation takes care of that - note the
253 call to tkey_sub_equals() in trie_insert().
254
255 if n is an internal node - a 'tnode' here, the various parts of its key
256 have many different meanings.
257
258 Example:
259 _________________________________________________________________
260 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
261 -----------------------------------------------------------------
262 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
263
264 _________________________________________________________________
265 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
266 -----------------------------------------------------------------
267 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
268
269 tp->pos = 7
270 tp->bits = 3
271 n->pos = 15
272 n->bits = 4
273
274 First, let's just ignore the bits that come before the parent tp, that is
275 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
276 not use them for anything.
277
278 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
279 index into the parent's child array. That is, they will be used to find
280 'n' among tp's children.
281
282 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
283 for the node n.
284
285 All the bits we have seen so far are significant to the node n. The rest
286 of the bits are really not needed or indeed known in n->key.
287
288 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
289 n's child array, and will of course be different for each child.
290
291
292 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
293 at this point.
294
295 */
296
297 static inline void check_tnode(const struct tnode *tn)
298 {
299 WARN_ON(tn && tn->pos+tn->bits > 32);
300 }
301
302 static int halve_threshold = 25;
303 static int inflate_threshold = 50;
304 static int halve_threshold_root = 8;
305 static int inflate_threshold_root = 15;
306
307
308 static void __alias_free_mem(struct rcu_head *head)
309 {
310 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
311 kmem_cache_free(fn_alias_kmem, fa);
312 }
313
314 static inline void alias_free_mem_rcu(struct fib_alias *fa)
315 {
316 call_rcu(&fa->rcu, __alias_free_mem);
317 }
318
319 static void __leaf_free_rcu(struct rcu_head *head)
320 {
321 kfree(container_of(head, struct leaf, rcu));
322 }
323
324 static void __leaf_info_free_rcu(struct rcu_head *head)
325 {
326 kfree(container_of(head, struct leaf_info, rcu));
327 }
328
329 static inline void free_leaf_info(struct leaf_info *leaf)
330 {
331 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
332 }
333
334 static struct tnode *tnode_alloc(unsigned int size)
335 {
336 struct page *pages;
337
338 if (size <= PAGE_SIZE)
339 return kcalloc(size, 1, GFP_KERNEL);
340
341 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
342 if (!pages)
343 return NULL;
344
345 return page_address(pages);
346 }
347
348 static void __tnode_free_rcu(struct rcu_head *head)
349 {
350 struct tnode *tn = container_of(head, struct tnode, rcu);
351 unsigned int size = sizeof(struct tnode) +
352 (1 << tn->bits) * sizeof(struct node *);
353
354 if (size <= PAGE_SIZE)
355 kfree(tn);
356 else
357 free_pages((unsigned long)tn, get_order(size));
358 }
359
360 static inline void tnode_free(struct tnode *tn)
361 {
362 if (IS_LEAF(tn)) {
363 struct leaf *l = (struct leaf *) tn;
364 call_rcu_bh(&l->rcu, __leaf_free_rcu);
365 } else
366 call_rcu(&tn->rcu, __tnode_free_rcu);
367 }
368
369 static struct leaf *leaf_new(void)
370 {
371 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
372 if (l) {
373 l->parent = T_LEAF;
374 INIT_HLIST_HEAD(&l->list);
375 }
376 return l;
377 }
378
379 static struct leaf_info *leaf_info_new(int plen)
380 {
381 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
382 if (li) {
383 li->plen = plen;
384 INIT_LIST_HEAD(&li->falh);
385 }
386 return li;
387 }
388
389 static struct tnode* tnode_new(t_key key, int pos, int bits)
390 {
391 int nchildren = 1<<bits;
392 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
393 struct tnode *tn = tnode_alloc(sz);
394
395 if (tn) {
396 memset(tn, 0, sz);
397 tn->parent = T_TNODE;
398 tn->pos = pos;
399 tn->bits = bits;
400 tn->key = key;
401 tn->full_children = 0;
402 tn->empty_children = 1<<bits;
403 }
404
405 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
406 (unsigned int) (sizeof(struct node) * 1<<bits));
407 return tn;
408 }
409
410 /*
411 * Check whether a tnode 'n' is "full", i.e. it is an internal node
412 * and no bits are skipped. See discussion in dyntree paper p. 6
413 */
414
415 static inline int tnode_full(const struct tnode *tn, const struct node *n)
416 {
417 if (n == NULL || IS_LEAF(n))
418 return 0;
419
420 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
421 }
422
423 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
424 {
425 tnode_put_child_reorg(tn, i, n, -1);
426 }
427
428 /*
429 * Add a child at position i overwriting the old value.
430 * Update the value of full_children and empty_children.
431 */
432
433 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
434 {
435 struct node *chi = tn->child[i];
436 int isfull;
437
438 BUG_ON(i >= 1<<tn->bits);
439
440
441 /* update emptyChildren */
442 if (n == NULL && chi != NULL)
443 tn->empty_children++;
444 else if (n != NULL && chi == NULL)
445 tn->empty_children--;
446
447 /* update fullChildren */
448 if (wasfull == -1)
449 wasfull = tnode_full(tn, chi);
450
451 isfull = tnode_full(tn, n);
452 if (wasfull && !isfull)
453 tn->full_children--;
454 else if (!wasfull && isfull)
455 tn->full_children++;
456
457 if (n)
458 node_set_parent(n, tn);
459
460 rcu_assign_pointer(tn->child[i], n);
461 }
462
463 static struct node *resize(struct trie *t, struct tnode *tn)
464 {
465 int i;
466 int err = 0;
467 struct tnode *old_tn;
468 int inflate_threshold_use;
469 int halve_threshold_use;
470 int max_resize;
471
472 if (!tn)
473 return NULL;
474
475 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
476 tn, inflate_threshold, halve_threshold);
477
478 /* No children */
479 if (tn->empty_children == tnode_child_length(tn)) {
480 tnode_free(tn);
481 return NULL;
482 }
483 /* One child */
484 if (tn->empty_children == tnode_child_length(tn) - 1)
485 for (i = 0; i < tnode_child_length(tn); i++) {
486 struct node *n;
487
488 n = tn->child[i];
489 if (!n)
490 continue;
491
492 /* compress one level */
493 node_set_parent(n, NULL);
494 tnode_free(tn);
495 return n;
496 }
497 /*
498 * Double as long as the resulting node has a number of
499 * nonempty nodes that are above the threshold.
500 */
501
502 /*
503 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
504 * the Helsinki University of Technology and Matti Tikkanen of Nokia
505 * Telecommunications, page 6:
506 * "A node is doubled if the ratio of non-empty children to all
507 * children in the *doubled* node is at least 'high'."
508 *
509 * 'high' in this instance is the variable 'inflate_threshold'. It
510 * is expressed as a percentage, so we multiply it with
511 * tnode_child_length() and instead of multiplying by 2 (since the
512 * child array will be doubled by inflate()) and multiplying
513 * the left-hand side by 100 (to handle the percentage thing) we
514 * multiply the left-hand side by 50.
515 *
516 * The left-hand side may look a bit weird: tnode_child_length(tn)
517 * - tn->empty_children is of course the number of non-null children
518 * in the current node. tn->full_children is the number of "full"
519 * children, that is non-null tnodes with a skip value of 0.
520 * All of those will be doubled in the resulting inflated tnode, so
521 * we just count them one extra time here.
522 *
523 * A clearer way to write this would be:
524 *
525 * to_be_doubled = tn->full_children;
526 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
527 * tn->full_children;
528 *
529 * new_child_length = tnode_child_length(tn) * 2;
530 *
531 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
532 * new_child_length;
533 * if (new_fill_factor >= inflate_threshold)
534 *
535 * ...and so on, tho it would mess up the while () loop.
536 *
537 * anyway,
538 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
539 * inflate_threshold
540 *
541 * avoid a division:
542 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
543 * inflate_threshold * new_child_length
544 *
545 * expand not_to_be_doubled and to_be_doubled, and shorten:
546 * 100 * (tnode_child_length(tn) - tn->empty_children +
547 * tn->full_children) >= inflate_threshold * new_child_length
548 *
549 * expand new_child_length:
550 * 100 * (tnode_child_length(tn) - tn->empty_children +
551 * tn->full_children) >=
552 * inflate_threshold * tnode_child_length(tn) * 2
553 *
554 * shorten again:
555 * 50 * (tn->full_children + tnode_child_length(tn) -
556 * tn->empty_children) >= inflate_threshold *
557 * tnode_child_length(tn)
558 *
559 */
560
561 check_tnode(tn);
562
563 /* Keep root node larger */
564
565 if (!tn->parent)
566 inflate_threshold_use = inflate_threshold_root;
567 else
568 inflate_threshold_use = inflate_threshold;
569
570 err = 0;
571 max_resize = 10;
572 while ((tn->full_children > 0 && max_resize-- &&
573 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
574 inflate_threshold_use * tnode_child_length(tn))) {
575
576 old_tn = tn;
577 tn = inflate(t, tn);
578 if (IS_ERR(tn)) {
579 tn = old_tn;
580 #ifdef CONFIG_IP_FIB_TRIE_STATS
581 t->stats.resize_node_skipped++;
582 #endif
583 break;
584 }
585 }
586
587 if (max_resize < 0) {
588 if (!tn->parent)
589 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
590 inflate_threshold_root, tn->bits);
591 else
592 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
593 inflate_threshold, tn->bits);
594 }
595
596 check_tnode(tn);
597
598 /*
599 * Halve as long as the number of empty children in this
600 * node is above threshold.
601 */
602
603
604 /* Keep root node larger */
605
606 if (!tn->parent)
607 halve_threshold_use = halve_threshold_root;
608 else
609 halve_threshold_use = halve_threshold;
610
611 err = 0;
612 max_resize = 10;
613 while (tn->bits > 1 && max_resize-- &&
614 100 * (tnode_child_length(tn) - tn->empty_children) <
615 halve_threshold_use * tnode_child_length(tn)) {
616
617 old_tn = tn;
618 tn = halve(t, tn);
619 if (IS_ERR(tn)) {
620 tn = old_tn;
621 #ifdef CONFIG_IP_FIB_TRIE_STATS
622 t->stats.resize_node_skipped++;
623 #endif
624 break;
625 }
626 }
627
628 if (max_resize < 0) {
629 if (!tn->parent)
630 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
631 halve_threshold_root, tn->bits);
632 else
633 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
634 halve_threshold, tn->bits);
635 }
636
637 /* Only one child remains */
638 if (tn->empty_children == tnode_child_length(tn) - 1)
639 for (i = 0; i < tnode_child_length(tn); i++) {
640 struct node *n;
641
642 n = tn->child[i];
643 if (!n)
644 continue;
645
646 /* compress one level */
647
648 node_set_parent(n, NULL);
649 tnode_free(tn);
650 return n;
651 }
652
653 return (struct node *) tn;
654 }
655
656 static struct tnode *inflate(struct trie *t, struct tnode *tn)
657 {
658 struct tnode *inode;
659 struct tnode *oldtnode = tn;
660 int olen = tnode_child_length(tn);
661 int i;
662
663 pr_debug("In inflate\n");
664
665 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
666
667 if (!tn)
668 return ERR_PTR(-ENOMEM);
669
670 /*
671 * Preallocate and store tnodes before the actual work so we
672 * don't get into an inconsistent state if memory allocation
673 * fails. In case of failure we return the oldnode and inflate
674 * of tnode is ignored.
675 */
676
677 for (i = 0; i < olen; i++) {
678 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
679
680 if (inode &&
681 IS_TNODE(inode) &&
682 inode->pos == oldtnode->pos + oldtnode->bits &&
683 inode->bits > 1) {
684 struct tnode *left, *right;
685 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
686
687 left = tnode_new(inode->key&(~m), inode->pos + 1,
688 inode->bits - 1);
689 if (!left)
690 goto nomem;
691
692 right = tnode_new(inode->key|m, inode->pos + 1,
693 inode->bits - 1);
694
695 if (!right) {
696 tnode_free(left);
697 goto nomem;
698 }
699
700 put_child(t, tn, 2*i, (struct node *) left);
701 put_child(t, tn, 2*i+1, (struct node *) right);
702 }
703 }
704
705 for (i = 0; i < olen; i++) {
706 struct node *node = tnode_get_child(oldtnode, i);
707 struct tnode *left, *right;
708 int size, j;
709
710 /* An empty child */
711 if (node == NULL)
712 continue;
713
714 /* A leaf or an internal node with skipped bits */
715
716 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
717 tn->pos + tn->bits - 1) {
718 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
719 1) == 0)
720 put_child(t, tn, 2*i, node);
721 else
722 put_child(t, tn, 2*i+1, node);
723 continue;
724 }
725
726 /* An internal node with two children */
727 inode = (struct tnode *) node;
728
729 if (inode->bits == 1) {
730 put_child(t, tn, 2*i, inode->child[0]);
731 put_child(t, tn, 2*i+1, inode->child[1]);
732
733 tnode_free(inode);
734 continue;
735 }
736
737 /* An internal node with more than two children */
738
739 /* We will replace this node 'inode' with two new
740 * ones, 'left' and 'right', each with half of the
741 * original children. The two new nodes will have
742 * a position one bit further down the key and this
743 * means that the "significant" part of their keys
744 * (see the discussion near the top of this file)
745 * will differ by one bit, which will be "0" in
746 * left's key and "1" in right's key. Since we are
747 * moving the key position by one step, the bit that
748 * we are moving away from - the bit at position
749 * (inode->pos) - is the one that will differ between
750 * left and right. So... we synthesize that bit in the
751 * two new keys.
752 * The mask 'm' below will be a single "one" bit at
753 * the position (inode->pos)
754 */
755
756 /* Use the old key, but set the new significant
757 * bit to zero.
758 */
759
760 left = (struct tnode *) tnode_get_child(tn, 2*i);
761 put_child(t, tn, 2*i, NULL);
762
763 BUG_ON(!left);
764
765 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
766 put_child(t, tn, 2*i+1, NULL);
767
768 BUG_ON(!right);
769
770 size = tnode_child_length(left);
771 for (j = 0; j < size; j++) {
772 put_child(t, left, j, inode->child[j]);
773 put_child(t, right, j, inode->child[j + size]);
774 }
775 put_child(t, tn, 2*i, resize(t, left));
776 put_child(t, tn, 2*i+1, resize(t, right));
777
778 tnode_free(inode);
779 }
780 tnode_free(oldtnode);
781 return tn;
782 nomem:
783 {
784 int size = tnode_child_length(tn);
785 int j;
786
787 for (j = 0; j < size; j++)
788 if (tn->child[j])
789 tnode_free((struct tnode *)tn->child[j]);
790
791 tnode_free(tn);
792
793 return ERR_PTR(-ENOMEM);
794 }
795 }
796
797 static struct tnode *halve(struct trie *t, struct tnode *tn)
798 {
799 struct tnode *oldtnode = tn;
800 struct node *left, *right;
801 int i;
802 int olen = tnode_child_length(tn);
803
804 pr_debug("In halve\n");
805
806 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
807
808 if (!tn)
809 return ERR_PTR(-ENOMEM);
810
811 /*
812 * Preallocate and store tnodes before the actual work so we
813 * don't get into an inconsistent state if memory allocation
814 * fails. In case of failure we return the oldnode and halve
815 * of tnode is ignored.
816 */
817
818 for (i = 0; i < olen; i += 2) {
819 left = tnode_get_child(oldtnode, i);
820 right = tnode_get_child(oldtnode, i+1);
821
822 /* Two nonempty children */
823 if (left && right) {
824 struct tnode *newn;
825
826 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
827
828 if (!newn)
829 goto nomem;
830
831 put_child(t, tn, i/2, (struct node *)newn);
832 }
833
834 }
835
836 for (i = 0; i < olen; i += 2) {
837 struct tnode *newBinNode;
838
839 left = tnode_get_child(oldtnode, i);
840 right = tnode_get_child(oldtnode, i+1);
841
842 /* At least one of the children is empty */
843 if (left == NULL) {
844 if (right == NULL) /* Both are empty */
845 continue;
846 put_child(t, tn, i/2, right);
847 continue;
848 }
849
850 if (right == NULL) {
851 put_child(t, tn, i/2, left);
852 continue;
853 }
854
855 /* Two nonempty children */
856 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
857 put_child(t, tn, i/2, NULL);
858 put_child(t, newBinNode, 0, left);
859 put_child(t, newBinNode, 1, right);
860 put_child(t, tn, i/2, resize(t, newBinNode));
861 }
862 tnode_free(oldtnode);
863 return tn;
864 nomem:
865 {
866 int size = tnode_child_length(tn);
867 int j;
868
869 for (j = 0; j < size; j++)
870 if (tn->child[j])
871 tnode_free((struct tnode *)tn->child[j]);
872
873 tnode_free(tn);
874
875 return ERR_PTR(-ENOMEM);
876 }
877 }
878
879 static void trie_init(struct trie *t)
880 {
881 if (!t)
882 return;
883
884 t->size = 0;
885 rcu_assign_pointer(t->trie, NULL);
886 t->revision = 0;
887 #ifdef CONFIG_IP_FIB_TRIE_STATS
888 memset(&t->stats, 0, sizeof(struct trie_use_stats));
889 #endif
890 }
891
892 /* readside must use rcu_read_lock currently dump routines
893 via get_fa_head and dump */
894
895 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
896 {
897 struct hlist_head *head = &l->list;
898 struct hlist_node *node;
899 struct leaf_info *li;
900
901 hlist_for_each_entry_rcu(li, node, head, hlist)
902 if (li->plen == plen)
903 return li;
904
905 return NULL;
906 }
907
908 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
909 {
910 struct leaf_info *li = find_leaf_info(l, plen);
911
912 if (!li)
913 return NULL;
914
915 return &li->falh;
916 }
917
918 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
919 {
920 struct leaf_info *li = NULL, *last = NULL;
921 struct hlist_node *node;
922
923 if (hlist_empty(head)) {
924 hlist_add_head_rcu(&new->hlist, head);
925 } else {
926 hlist_for_each_entry(li, node, head, hlist) {
927 if (new->plen > li->plen)
928 break;
929
930 last = li;
931 }
932 if (last)
933 hlist_add_after_rcu(&last->hlist, &new->hlist);
934 else
935 hlist_add_before_rcu(&new->hlist, &li->hlist);
936 }
937 }
938
939 /* rcu_read_lock needs to be hold by caller from readside */
940
941 static struct leaf *
942 fib_find_node(struct trie *t, u32 key)
943 {
944 int pos;
945 struct tnode *tn;
946 struct node *n;
947
948 pos = 0;
949 n = rcu_dereference(t->trie);
950
951 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
952 tn = (struct tnode *) n;
953
954 check_tnode(tn);
955
956 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
957 pos = tn->pos + tn->bits;
958 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
959 } else
960 break;
961 }
962 /* Case we have found a leaf. Compare prefixes */
963
964 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
965 return (struct leaf *)n;
966
967 return NULL;
968 }
969
970 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
971 {
972 int wasfull;
973 t_key cindex, key = tn->key;
974 struct tnode *tp;
975
976 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
977 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
978 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
979 tn = (struct tnode *) resize (t, (struct tnode *)tn);
980 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
981
982 tp = node_parent((struct node *) tn);
983 if (!tp)
984 break;
985 tn = tp;
986 }
987
988 /* Handle last (top) tnode */
989 if (IS_TNODE(tn))
990 tn = (struct tnode*) resize(t, (struct tnode *)tn);
991
992 return (struct node*) tn;
993 }
994
995 /* only used from updater-side */
996
997 static struct list_head *
998 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
999 {
1000 int pos, newpos;
1001 struct tnode *tp = NULL, *tn = NULL;
1002 struct node *n;
1003 struct leaf *l;
1004 int missbit;
1005 struct list_head *fa_head = NULL;
1006 struct leaf_info *li;
1007 t_key cindex;
1008
1009 pos = 0;
1010 n = t->trie;
1011
1012 /* If we point to NULL, stop. Either the tree is empty and we should
1013 * just put a new leaf in if, or we have reached an empty child slot,
1014 * and we should just put our new leaf in that.
1015 * If we point to a T_TNODE, check if it matches our key. Note that
1016 * a T_TNODE might be skipping any number of bits - its 'pos' need
1017 * not be the parent's 'pos'+'bits'!
1018 *
1019 * If it does match the current key, get pos/bits from it, extract
1020 * the index from our key, push the T_TNODE and walk the tree.
1021 *
1022 * If it doesn't, we have to replace it with a new T_TNODE.
1023 *
1024 * If we point to a T_LEAF, it might or might not have the same key
1025 * as we do. If it does, just change the value, update the T_LEAF's
1026 * value, and return it.
1027 * If it doesn't, we need to replace it with a T_TNODE.
1028 */
1029
1030 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1031 tn = (struct tnode *) n;
1032
1033 check_tnode(tn);
1034
1035 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1036 tp = tn;
1037 pos = tn->pos + tn->bits;
1038 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1039
1040 BUG_ON(n && node_parent(n) != tn);
1041 } else
1042 break;
1043 }
1044
1045 /*
1046 * n ----> NULL, LEAF or TNODE
1047 *
1048 * tp is n's (parent) ----> NULL or TNODE
1049 */
1050
1051 BUG_ON(tp && IS_LEAF(tp));
1052
1053 /* Case 1: n is a leaf. Compare prefixes */
1054
1055 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1056 struct leaf *l = (struct leaf *) n;
1057
1058 li = leaf_info_new(plen);
1059
1060 if (!li) {
1061 *err = -ENOMEM;
1062 goto err;
1063 }
1064
1065 fa_head = &li->falh;
1066 insert_leaf_info(&l->list, li);
1067 goto done;
1068 }
1069 t->size++;
1070 l = leaf_new();
1071
1072 if (!l) {
1073 *err = -ENOMEM;
1074 goto err;
1075 }
1076
1077 l->key = key;
1078 li = leaf_info_new(plen);
1079
1080 if (!li) {
1081 tnode_free((struct tnode *) l);
1082 *err = -ENOMEM;
1083 goto err;
1084 }
1085
1086 fa_head = &li->falh;
1087 insert_leaf_info(&l->list, li);
1088
1089 if (t->trie && n == NULL) {
1090 /* Case 2: n is NULL, and will just insert a new leaf */
1091
1092 node_set_parent((struct node *)l, tp);
1093
1094 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1095 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1096 } else {
1097 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1098 /*
1099 * Add a new tnode here
1100 * first tnode need some special handling
1101 */
1102
1103 if (tp)
1104 pos = tp->pos+tp->bits;
1105 else
1106 pos = 0;
1107
1108 if (n) {
1109 newpos = tkey_mismatch(key, pos, n->key);
1110 tn = tnode_new(n->key, newpos, 1);
1111 } else {
1112 newpos = 0;
1113 tn = tnode_new(key, newpos, 1); /* First tnode */
1114 }
1115
1116 if (!tn) {
1117 free_leaf_info(li);
1118 tnode_free((struct tnode *) l);
1119 *err = -ENOMEM;
1120 goto err;
1121 }
1122
1123 node_set_parent((struct node *)tn, tp);
1124
1125 missbit = tkey_extract_bits(key, newpos, 1);
1126 put_child(t, tn, missbit, (struct node *)l);
1127 put_child(t, tn, 1-missbit, n);
1128
1129 if (tp) {
1130 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1131 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1132 } else {
1133 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1134 tp = tn;
1135 }
1136 }
1137
1138 if (tp && tp->pos + tp->bits > 32)
1139 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1140 tp, tp->pos, tp->bits, key, plen);
1141
1142 /* Rebalance the trie */
1143
1144 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1145 done:
1146 t->revision++;
1147 err:
1148 return fa_head;
1149 }
1150
1151 /*
1152 * Caller must hold RTNL.
1153 */
1154 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1155 {
1156 struct trie *t = (struct trie *) tb->tb_data;
1157 struct fib_alias *fa, *new_fa;
1158 struct list_head *fa_head = NULL;
1159 struct fib_info *fi;
1160 int plen = cfg->fc_dst_len;
1161 u8 tos = cfg->fc_tos;
1162 u32 key, mask;
1163 int err;
1164 struct leaf *l;
1165
1166 if (plen > 32)
1167 return -EINVAL;
1168
1169 key = ntohl(cfg->fc_dst);
1170
1171 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1172
1173 mask = ntohl(inet_make_mask(plen));
1174
1175 if (key & ~mask)
1176 return -EINVAL;
1177
1178 key = key & mask;
1179
1180 fi = fib_create_info(cfg);
1181 if (IS_ERR(fi)) {
1182 err = PTR_ERR(fi);
1183 goto err;
1184 }
1185
1186 l = fib_find_node(t, key);
1187 fa = NULL;
1188
1189 if (l) {
1190 fa_head = get_fa_head(l, plen);
1191 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1192 }
1193
1194 /* Now fa, if non-NULL, points to the first fib alias
1195 * with the same keys [prefix,tos,priority], if such key already
1196 * exists or to the node before which we will insert new one.
1197 *
1198 * If fa is NULL, we will need to allocate a new one and
1199 * insert to the head of f.
1200 *
1201 * If f is NULL, no fib node matched the destination key
1202 * and we need to allocate a new one of those as well.
1203 */
1204
1205 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1206 struct fib_alias *fa_orig;
1207
1208 err = -EEXIST;
1209 if (cfg->fc_nlflags & NLM_F_EXCL)
1210 goto out;
1211
1212 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1213 struct fib_info *fi_drop;
1214 u8 state;
1215
1216 if (fi->fib_treeref > 1)
1217 goto out;
1218
1219 err = -ENOBUFS;
1220 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1221 if (new_fa == NULL)
1222 goto out;
1223
1224 fi_drop = fa->fa_info;
1225 new_fa->fa_tos = fa->fa_tos;
1226 new_fa->fa_info = fi;
1227 new_fa->fa_type = cfg->fc_type;
1228 new_fa->fa_scope = cfg->fc_scope;
1229 state = fa->fa_state;
1230 new_fa->fa_state &= ~FA_S_ACCESSED;
1231
1232 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1233 alias_free_mem_rcu(fa);
1234
1235 fib_release_info(fi_drop);
1236 if (state & FA_S_ACCESSED)
1237 rt_cache_flush(-1);
1238 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1239 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1240
1241 goto succeeded;
1242 }
1243 /* Error if we find a perfect match which
1244 * uses the same scope, type, and nexthop
1245 * information.
1246 */
1247 fa_orig = fa;
1248 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1249 if (fa->fa_tos != tos)
1250 break;
1251 if (fa->fa_info->fib_priority != fi->fib_priority)
1252 break;
1253 if (fa->fa_type == cfg->fc_type &&
1254 fa->fa_scope == cfg->fc_scope &&
1255 fa->fa_info == fi) {
1256 goto out;
1257 }
1258 }
1259 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1260 fa = fa_orig;
1261 }
1262 err = -ENOENT;
1263 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1264 goto out;
1265
1266 err = -ENOBUFS;
1267 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1268 if (new_fa == NULL)
1269 goto out;
1270
1271 new_fa->fa_info = fi;
1272 new_fa->fa_tos = tos;
1273 new_fa->fa_type = cfg->fc_type;
1274 new_fa->fa_scope = cfg->fc_scope;
1275 new_fa->fa_state = 0;
1276 /*
1277 * Insert new entry to the list.
1278 */
1279
1280 if (!fa_head) {
1281 err = 0;
1282 fa_head = fib_insert_node(t, &err, key, plen);
1283 if (err)
1284 goto out_free_new_fa;
1285 }
1286
1287 list_add_tail_rcu(&new_fa->fa_list,
1288 (fa ? &fa->fa_list : fa_head));
1289
1290 rt_cache_flush(-1);
1291 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1292 &cfg->fc_nlinfo, 0);
1293 succeeded:
1294 return 0;
1295
1296 out_free_new_fa:
1297 kmem_cache_free(fn_alias_kmem, new_fa);
1298 out:
1299 fib_release_info(fi);
1300 err:
1301 return err;
1302 }
1303
1304
1305 /* should be called with rcu_read_lock */
1306 static inline int check_leaf(struct trie *t, struct leaf *l,
1307 t_key key, int *plen, const struct flowi *flp,
1308 struct fib_result *res)
1309 {
1310 int err, i;
1311 __be32 mask;
1312 struct leaf_info *li;
1313 struct hlist_head *hhead = &l->list;
1314 struct hlist_node *node;
1315
1316 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1317 i = li->plen;
1318 mask = inet_make_mask(i);
1319 if (l->key != (key & ntohl(mask)))
1320 continue;
1321
1322 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1323 *plen = i;
1324 #ifdef CONFIG_IP_FIB_TRIE_STATS
1325 t->stats.semantic_match_passed++;
1326 #endif
1327 return err;
1328 }
1329 #ifdef CONFIG_IP_FIB_TRIE_STATS
1330 t->stats.semantic_match_miss++;
1331 #endif
1332 }
1333 return 1;
1334 }
1335
1336 static int
1337 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1338 {
1339 struct trie *t = (struct trie *) tb->tb_data;
1340 int plen, ret = 0;
1341 struct node *n;
1342 struct tnode *pn;
1343 int pos, bits;
1344 t_key key = ntohl(flp->fl4_dst);
1345 int chopped_off;
1346 t_key cindex = 0;
1347 int current_prefix_length = KEYLENGTH;
1348 struct tnode *cn;
1349 t_key node_prefix, key_prefix, pref_mismatch;
1350 int mp;
1351
1352 rcu_read_lock();
1353
1354 n = rcu_dereference(t->trie);
1355 if (!n)
1356 goto failed;
1357
1358 #ifdef CONFIG_IP_FIB_TRIE_STATS
1359 t->stats.gets++;
1360 #endif
1361
1362 /* Just a leaf? */
1363 if (IS_LEAF(n)) {
1364 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1365 goto found;
1366 goto failed;
1367 }
1368 pn = (struct tnode *) n;
1369 chopped_off = 0;
1370
1371 while (pn) {
1372 pos = pn->pos;
1373 bits = pn->bits;
1374
1375 if (!chopped_off)
1376 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1377 pos, bits);
1378
1379 n = tnode_get_child(pn, cindex);
1380
1381 if (n == NULL) {
1382 #ifdef CONFIG_IP_FIB_TRIE_STATS
1383 t->stats.null_node_hit++;
1384 #endif
1385 goto backtrace;
1386 }
1387
1388 if (IS_LEAF(n)) {
1389 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1390 goto found;
1391 else
1392 goto backtrace;
1393 }
1394
1395 #define HL_OPTIMIZE
1396 #ifdef HL_OPTIMIZE
1397 cn = (struct tnode *)n;
1398
1399 /*
1400 * It's a tnode, and we can do some extra checks here if we
1401 * like, to avoid descending into a dead-end branch.
1402 * This tnode is in the parent's child array at index
1403 * key[p_pos..p_pos+p_bits] but potentially with some bits
1404 * chopped off, so in reality the index may be just a
1405 * subprefix, padded with zero at the end.
1406 * We can also take a look at any skipped bits in this
1407 * tnode - everything up to p_pos is supposed to be ok,
1408 * and the non-chopped bits of the index (se previous
1409 * paragraph) are also guaranteed ok, but the rest is
1410 * considered unknown.
1411 *
1412 * The skipped bits are key[pos+bits..cn->pos].
1413 */
1414
1415 /* If current_prefix_length < pos+bits, we are already doing
1416 * actual prefix matching, which means everything from
1417 * pos+(bits-chopped_off) onward must be zero along some
1418 * branch of this subtree - otherwise there is *no* valid
1419 * prefix present. Here we can only check the skipped
1420 * bits. Remember, since we have already indexed into the
1421 * parent's child array, we know that the bits we chopped of
1422 * *are* zero.
1423 */
1424
1425 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1426
1427 if (current_prefix_length < pos+bits) {
1428 if (tkey_extract_bits(cn->key, current_prefix_length,
1429 cn->pos - current_prefix_length) != 0 ||
1430 !(cn->child[0]))
1431 goto backtrace;
1432 }
1433
1434 /*
1435 * If chopped_off=0, the index is fully validated and we
1436 * only need to look at the skipped bits for this, the new,
1437 * tnode. What we actually want to do is to find out if
1438 * these skipped bits match our key perfectly, or if we will
1439 * have to count on finding a matching prefix further down,
1440 * because if we do, we would like to have some way of
1441 * verifying the existence of such a prefix at this point.
1442 */
1443
1444 /* The only thing we can do at this point is to verify that
1445 * any such matching prefix can indeed be a prefix to our
1446 * key, and if the bits in the node we are inspecting that
1447 * do not match our key are not ZERO, this cannot be true.
1448 * Thus, find out where there is a mismatch (before cn->pos)
1449 * and verify that all the mismatching bits are zero in the
1450 * new tnode's key.
1451 */
1452
1453 /* Note: We aren't very concerned about the piece of the key
1454 * that precede pn->pos+pn->bits, since these have already been
1455 * checked. The bits after cn->pos aren't checked since these are
1456 * by definition "unknown" at this point. Thus, what we want to
1457 * see is if we are about to enter the "prefix matching" state,
1458 * and in that case verify that the skipped bits that will prevail
1459 * throughout this subtree are zero, as they have to be if we are
1460 * to find a matching prefix.
1461 */
1462
1463 node_prefix = mask_pfx(cn->key, cn->pos);
1464 key_prefix = mask_pfx(key, cn->pos);
1465 pref_mismatch = key_prefix^node_prefix;
1466 mp = 0;
1467
1468 /* In short: If skipped bits in this node do not match the search
1469 * key, enter the "prefix matching" state.directly.
1470 */
1471 if (pref_mismatch) {
1472 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1473 mp++;
1474 pref_mismatch = pref_mismatch <<1;
1475 }
1476 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1477
1478 if (key_prefix != 0)
1479 goto backtrace;
1480
1481 if (current_prefix_length >= cn->pos)
1482 current_prefix_length = mp;
1483 }
1484 #endif
1485 pn = (struct tnode *)n; /* Descend */
1486 chopped_off = 0;
1487 continue;
1488
1489 backtrace:
1490 chopped_off++;
1491
1492 /* As zero don't change the child key (cindex) */
1493 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1494 chopped_off++;
1495
1496 /* Decrease current_... with bits chopped off */
1497 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1498 current_prefix_length = pn->pos + pn->bits - chopped_off;
1499
1500 /*
1501 * Either we do the actual chop off according or if we have
1502 * chopped off all bits in this tnode walk up to our parent.
1503 */
1504
1505 if (chopped_off <= pn->bits) {
1506 cindex &= ~(1 << (chopped_off-1));
1507 } else {
1508 struct tnode *parent = node_parent((struct node *) pn);
1509 if (!parent)
1510 goto failed;
1511
1512 /* Get Child's index */
1513 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1514 pn = parent;
1515 chopped_off = 0;
1516
1517 #ifdef CONFIG_IP_FIB_TRIE_STATS
1518 t->stats.backtrack++;
1519 #endif
1520 goto backtrace;
1521 }
1522 }
1523 failed:
1524 ret = 1;
1525 found:
1526 rcu_read_unlock();
1527 return ret;
1528 }
1529
1530 /* only called from updater side */
1531 static int trie_leaf_remove(struct trie *t, t_key key)
1532 {
1533 t_key cindex;
1534 struct tnode *tp = NULL;
1535 struct node *n = t->trie;
1536 struct leaf *l;
1537
1538 pr_debug("entering trie_leaf_remove(%p)\n", n);
1539
1540 /* Note that in the case skipped bits, those bits are *not* checked!
1541 * When we finish this, we will have NULL or a T_LEAF, and the
1542 * T_LEAF may or may not match our key.
1543 */
1544
1545 while (n != NULL && IS_TNODE(n)) {
1546 struct tnode *tn = (struct tnode *) n;
1547 check_tnode(tn);
1548 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1549
1550 BUG_ON(n && node_parent(n) != tn);
1551 }
1552 l = (struct leaf *) n;
1553
1554 if (!n || !tkey_equals(l->key, key))
1555 return 0;
1556
1557 /*
1558 * Key found.
1559 * Remove the leaf and rebalance the tree
1560 */
1561
1562 t->revision++;
1563 t->size--;
1564
1565 tp = node_parent(n);
1566 tnode_free((struct tnode *) n);
1567
1568 if (tp) {
1569 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1570 put_child(t, (struct tnode *)tp, cindex, NULL);
1571 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1572 } else
1573 rcu_assign_pointer(t->trie, NULL);
1574
1575 return 1;
1576 }
1577
1578 /*
1579 * Caller must hold RTNL.
1580 */
1581 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1582 {
1583 struct trie *t = (struct trie *) tb->tb_data;
1584 u32 key, mask;
1585 int plen = cfg->fc_dst_len;
1586 u8 tos = cfg->fc_tos;
1587 struct fib_alias *fa, *fa_to_delete;
1588 struct list_head *fa_head;
1589 struct leaf *l;
1590 struct leaf_info *li;
1591
1592 if (plen > 32)
1593 return -EINVAL;
1594
1595 key = ntohl(cfg->fc_dst);
1596 mask = ntohl(inet_make_mask(plen));
1597
1598 if (key & ~mask)
1599 return -EINVAL;
1600
1601 key = key & mask;
1602 l = fib_find_node(t, key);
1603
1604 if (!l)
1605 return -ESRCH;
1606
1607 fa_head = get_fa_head(l, plen);
1608 fa = fib_find_alias(fa_head, tos, 0);
1609
1610 if (!fa)
1611 return -ESRCH;
1612
1613 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1614
1615 fa_to_delete = NULL;
1616 fa_head = fa->fa_list.prev;
1617
1618 list_for_each_entry(fa, fa_head, fa_list) {
1619 struct fib_info *fi = fa->fa_info;
1620
1621 if (fa->fa_tos != tos)
1622 break;
1623
1624 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1625 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1626 fa->fa_scope == cfg->fc_scope) &&
1627 (!cfg->fc_protocol ||
1628 fi->fib_protocol == cfg->fc_protocol) &&
1629 fib_nh_match(cfg, fi) == 0) {
1630 fa_to_delete = fa;
1631 break;
1632 }
1633 }
1634
1635 if (!fa_to_delete)
1636 return -ESRCH;
1637
1638 fa = fa_to_delete;
1639 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1640 &cfg->fc_nlinfo, 0);
1641
1642 l = fib_find_node(t, key);
1643 li = find_leaf_info(l, plen);
1644
1645 list_del_rcu(&fa->fa_list);
1646
1647 if (list_empty(fa_head)) {
1648 hlist_del_rcu(&li->hlist);
1649 free_leaf_info(li);
1650 }
1651
1652 if (hlist_empty(&l->list))
1653 trie_leaf_remove(t, key);
1654
1655 if (fa->fa_state & FA_S_ACCESSED)
1656 rt_cache_flush(-1);
1657
1658 fib_release_info(fa->fa_info);
1659 alias_free_mem_rcu(fa);
1660 return 0;
1661 }
1662
1663 static int trie_flush_list(struct trie *t, struct list_head *head)
1664 {
1665 struct fib_alias *fa, *fa_node;
1666 int found = 0;
1667
1668 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1669 struct fib_info *fi = fa->fa_info;
1670
1671 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1672 list_del_rcu(&fa->fa_list);
1673 fib_release_info(fa->fa_info);
1674 alias_free_mem_rcu(fa);
1675 found++;
1676 }
1677 }
1678 return found;
1679 }
1680
1681 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1682 {
1683 int found = 0;
1684 struct hlist_head *lih = &l->list;
1685 struct hlist_node *node, *tmp;
1686 struct leaf_info *li = NULL;
1687
1688 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1689 found += trie_flush_list(t, &li->falh);
1690
1691 if (list_empty(&li->falh)) {
1692 hlist_del_rcu(&li->hlist);
1693 free_leaf_info(li);
1694 }
1695 }
1696 return found;
1697 }
1698
1699 /* rcu_read_lock needs to be hold by caller from readside */
1700
1701 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1702 {
1703 struct node *c = (struct node *) thisleaf;
1704 struct tnode *p;
1705 int idx;
1706 struct node *trie = rcu_dereference(t->trie);
1707
1708 if (c == NULL) {
1709 if (trie == NULL)
1710 return NULL;
1711
1712 if (IS_LEAF(trie)) /* trie w. just a leaf */
1713 return (struct leaf *) trie;
1714
1715 p = (struct tnode*) trie; /* Start */
1716 } else
1717 p = node_parent(c);
1718
1719 while (p) {
1720 int pos, last;
1721
1722 /* Find the next child of the parent */
1723 if (c)
1724 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1725 else
1726 pos = 0;
1727
1728 last = 1 << p->bits;
1729 for (idx = pos; idx < last ; idx++) {
1730 c = rcu_dereference(p->child[idx]);
1731
1732 if (!c)
1733 continue;
1734
1735 /* Decend if tnode */
1736 while (IS_TNODE(c)) {
1737 p = (struct tnode *) c;
1738 idx = 0;
1739
1740 /* Rightmost non-NULL branch */
1741 if (p && IS_TNODE(p))
1742 while (!(c = rcu_dereference(p->child[idx]))
1743 && idx < (1<<p->bits)) idx++;
1744
1745 /* Done with this tnode? */
1746 if (idx >= (1 << p->bits) || !c)
1747 goto up;
1748 }
1749 return (struct leaf *) c;
1750 }
1751 up:
1752 /* No more children go up one step */
1753 c = (struct node *) p;
1754 p = node_parent(c);
1755 }
1756 return NULL; /* Ready. Root of trie */
1757 }
1758
1759 /*
1760 * Caller must hold RTNL.
1761 */
1762 static int fn_trie_flush(struct fib_table *tb)
1763 {
1764 struct trie *t = (struct trie *) tb->tb_data;
1765 struct leaf *ll = NULL, *l = NULL;
1766 int found = 0, h;
1767
1768 t->revision++;
1769
1770 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1771 found += trie_flush_leaf(t, l);
1772
1773 if (ll && hlist_empty(&ll->list))
1774 trie_leaf_remove(t, ll->key);
1775 ll = l;
1776 }
1777
1778 if (ll && hlist_empty(&ll->list))
1779 trie_leaf_remove(t, ll->key);
1780
1781 pr_debug("trie_flush found=%d\n", found);
1782 return found;
1783 }
1784
1785 static void
1786 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1787 {
1788 struct trie *t = (struct trie *) tb->tb_data;
1789 int order, last_idx;
1790 struct fib_info *fi = NULL;
1791 struct fib_info *last_resort;
1792 struct fib_alias *fa = NULL;
1793 struct list_head *fa_head;
1794 struct leaf *l;
1795
1796 last_idx = -1;
1797 last_resort = NULL;
1798 order = -1;
1799
1800 rcu_read_lock();
1801
1802 l = fib_find_node(t, 0);
1803 if (!l)
1804 goto out;
1805
1806 fa_head = get_fa_head(l, 0);
1807 if (!fa_head)
1808 goto out;
1809
1810 if (list_empty(fa_head))
1811 goto out;
1812
1813 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1814 struct fib_info *next_fi = fa->fa_info;
1815
1816 if (fa->fa_scope != res->scope ||
1817 fa->fa_type != RTN_UNICAST)
1818 continue;
1819
1820 if (next_fi->fib_priority > res->fi->fib_priority)
1821 break;
1822 if (!next_fi->fib_nh[0].nh_gw ||
1823 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1824 continue;
1825 fa->fa_state |= FA_S_ACCESSED;
1826
1827 if (fi == NULL) {
1828 if (next_fi != res->fi)
1829 break;
1830 } else if (!fib_detect_death(fi, order, &last_resort,
1831 &last_idx, tb->tb_default)) {
1832 fib_result_assign(res, fi);
1833 tb->tb_default = order;
1834 goto out;
1835 }
1836 fi = next_fi;
1837 order++;
1838 }
1839 if (order <= 0 || fi == NULL) {
1840 tb->tb_default = -1;
1841 goto out;
1842 }
1843
1844 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1845 tb->tb_default)) {
1846 fib_result_assign(res, fi);
1847 tb->tb_default = order;
1848 goto out;
1849 }
1850 if (last_idx >= 0)
1851 fib_result_assign(res, last_resort);
1852 tb->tb_default = last_idx;
1853 out:
1854 rcu_read_unlock();
1855 }
1856
1857 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1858 struct sk_buff *skb, struct netlink_callback *cb)
1859 {
1860 int i, s_i;
1861 struct fib_alias *fa;
1862
1863 __be32 xkey = htonl(key);
1864
1865 s_i = cb->args[4];
1866 i = 0;
1867
1868 /* rcu_read_lock is hold by caller */
1869
1870 list_for_each_entry_rcu(fa, fah, fa_list) {
1871 if (i < s_i) {
1872 i++;
1873 continue;
1874 }
1875 BUG_ON(!fa->fa_info);
1876
1877 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1878 cb->nlh->nlmsg_seq,
1879 RTM_NEWROUTE,
1880 tb->tb_id,
1881 fa->fa_type,
1882 fa->fa_scope,
1883 xkey,
1884 plen,
1885 fa->fa_tos,
1886 fa->fa_info, 0) < 0) {
1887 cb->args[4] = i;
1888 return -1;
1889 }
1890 i++;
1891 }
1892 cb->args[4] = i;
1893 return skb->len;
1894 }
1895
1896 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1897 struct netlink_callback *cb)
1898 {
1899 int h, s_h;
1900 struct list_head *fa_head;
1901 struct leaf *l = NULL;
1902
1903 s_h = cb->args[3];
1904
1905 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1906 if (h < s_h)
1907 continue;
1908 if (h > s_h)
1909 memset(&cb->args[4], 0,
1910 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1911
1912 fa_head = get_fa_head(l, plen);
1913
1914 if (!fa_head)
1915 continue;
1916
1917 if (list_empty(fa_head))
1918 continue;
1919
1920 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1921 cb->args[3] = h;
1922 return -1;
1923 }
1924 }
1925 cb->args[3] = h;
1926 return skb->len;
1927 }
1928
1929 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1930 {
1931 int m, s_m;
1932 struct trie *t = (struct trie *) tb->tb_data;
1933
1934 s_m = cb->args[2];
1935
1936 rcu_read_lock();
1937 for (m = 0; m <= 32; m++) {
1938 if (m < s_m)
1939 continue;
1940 if (m > s_m)
1941 memset(&cb->args[3], 0,
1942 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1943
1944 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1945 cb->args[2] = m;
1946 goto out;
1947 }
1948 }
1949 rcu_read_unlock();
1950 cb->args[2] = m;
1951 return skb->len;
1952 out:
1953 rcu_read_unlock();
1954 return -1;
1955 }
1956
1957 /* Fix more generic FIB names for init later */
1958
1959 #ifdef CONFIG_IP_MULTIPLE_TABLES
1960 struct fib_table * fib_hash_init(u32 id)
1961 #else
1962 struct fib_table * __init fib_hash_init(u32 id)
1963 #endif
1964 {
1965 struct fib_table *tb;
1966 struct trie *t;
1967
1968 if (fn_alias_kmem == NULL)
1969 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1970 sizeof(struct fib_alias),
1971 0, SLAB_HWCACHE_ALIGN,
1972 NULL);
1973
1974 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1975 GFP_KERNEL);
1976 if (tb == NULL)
1977 return NULL;
1978
1979 tb->tb_id = id;
1980 tb->tb_default = -1;
1981 tb->tb_lookup = fn_trie_lookup;
1982 tb->tb_insert = fn_trie_insert;
1983 tb->tb_delete = fn_trie_delete;
1984 tb->tb_flush = fn_trie_flush;
1985 tb->tb_select_default = fn_trie_select_default;
1986 tb->tb_dump = fn_trie_dump;
1987 memset(tb->tb_data, 0, sizeof(struct trie));
1988
1989 t = (struct trie *) tb->tb_data;
1990
1991 trie_init(t);
1992
1993 if (id == RT_TABLE_LOCAL)
1994 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
1995
1996 return tb;
1997 }
1998
1999 #ifdef CONFIG_PROC_FS
2000 /* Depth first Trie walk iterator */
2001 struct fib_trie_iter {
2002 struct trie *trie_local, *trie_main;
2003 struct tnode *tnode;
2004 struct trie *trie;
2005 unsigned index;
2006 unsigned depth;
2007 };
2008
2009 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2010 {
2011 struct tnode *tn = iter->tnode;
2012 unsigned cindex = iter->index;
2013 struct tnode *p;
2014
2015 /* A single entry routing table */
2016 if (!tn)
2017 return NULL;
2018
2019 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2020 iter->tnode, iter->index, iter->depth);
2021 rescan:
2022 while (cindex < (1<<tn->bits)) {
2023 struct node *n = tnode_get_child(tn, cindex);
2024
2025 if (n) {
2026 if (IS_LEAF(n)) {
2027 iter->tnode = tn;
2028 iter->index = cindex + 1;
2029 } else {
2030 /* push down one level */
2031 iter->tnode = (struct tnode *) n;
2032 iter->index = 0;
2033 ++iter->depth;
2034 }
2035 return n;
2036 }
2037
2038 ++cindex;
2039 }
2040
2041 /* Current node exhausted, pop back up */
2042 p = node_parent((struct node *)tn);
2043 if (p) {
2044 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2045 tn = p;
2046 --iter->depth;
2047 goto rescan;
2048 }
2049
2050 /* got root? */
2051 return NULL;
2052 }
2053
2054 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2055 struct trie *t)
2056 {
2057 struct node *n ;
2058
2059 if (!t)
2060 return NULL;
2061
2062 n = rcu_dereference(t->trie);
2063
2064 if (!iter)
2065 return NULL;
2066
2067 if (n) {
2068 if (IS_TNODE(n)) {
2069 iter->tnode = (struct tnode *) n;
2070 iter->trie = t;
2071 iter->index = 0;
2072 iter->depth = 1;
2073 } else {
2074 iter->tnode = NULL;
2075 iter->trie = t;
2076 iter->index = 0;
2077 iter->depth = 0;
2078 }
2079 return n;
2080 }
2081 return NULL;
2082 }
2083
2084 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2085 {
2086 struct node *n;
2087 struct fib_trie_iter iter;
2088
2089 memset(s, 0, sizeof(*s));
2090
2091 rcu_read_lock();
2092 for (n = fib_trie_get_first(&iter, t); n;
2093 n = fib_trie_get_next(&iter)) {
2094 if (IS_LEAF(n)) {
2095 s->leaves++;
2096 s->totdepth += iter.depth;
2097 if (iter.depth > s->maxdepth)
2098 s->maxdepth = iter.depth;
2099 } else {
2100 const struct tnode *tn = (const struct tnode *) n;
2101 int i;
2102
2103 s->tnodes++;
2104 if (tn->bits < MAX_STAT_DEPTH)
2105 s->nodesizes[tn->bits]++;
2106
2107 for (i = 0; i < (1<<tn->bits); i++)
2108 if (!tn->child[i])
2109 s->nullpointers++;
2110 }
2111 }
2112 rcu_read_unlock();
2113 }
2114
2115 /*
2116 * This outputs /proc/net/fib_triestats
2117 */
2118 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2119 {
2120 unsigned i, max, pointers, bytes, avdepth;
2121
2122 if (stat->leaves)
2123 avdepth = stat->totdepth*100 / stat->leaves;
2124 else
2125 avdepth = 0;
2126
2127 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2128 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2129
2130 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2131
2132 bytes = sizeof(struct leaf) * stat->leaves;
2133 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2134 bytes += sizeof(struct tnode) * stat->tnodes;
2135
2136 max = MAX_STAT_DEPTH;
2137 while (max > 0 && stat->nodesizes[max-1] == 0)
2138 max--;
2139
2140 pointers = 0;
2141 for (i = 1; i <= max; i++)
2142 if (stat->nodesizes[i] != 0) {
2143 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2144 pointers += (1<<i) * stat->nodesizes[i];
2145 }
2146 seq_putc(seq, '\n');
2147 seq_printf(seq, "\tPointers: %d\n", pointers);
2148
2149 bytes += sizeof(struct node *) * pointers;
2150 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2151 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2152
2153 #ifdef CONFIG_IP_FIB_TRIE_STATS
2154 seq_printf(seq, "Counters:\n---------\n");
2155 seq_printf(seq,"gets = %d\n", t->stats.gets);
2156 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2157 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2158 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2159 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2160 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2161 #ifdef CLEAR_STATS
2162 memset(&(t->stats), 0, sizeof(t->stats));
2163 #endif
2164 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2165 }
2166
2167 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2168 {
2169 struct trie *trie_local, *trie_main;
2170 struct trie_stat *stat;
2171 struct fib_table *tb;
2172
2173 trie_local = NULL;
2174 tb = fib_get_table(RT_TABLE_LOCAL);
2175 if (tb)
2176 trie_local = (struct trie *) tb->tb_data;
2177
2178 trie_main = NULL;
2179 tb = fib_get_table(RT_TABLE_MAIN);
2180 if (tb)
2181 trie_main = (struct trie *) tb->tb_data;
2182
2183
2184 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2185 if (!stat)
2186 return -ENOMEM;
2187
2188 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2189 sizeof(struct leaf), sizeof(struct tnode));
2190
2191 if (trie_local) {
2192 seq_printf(seq, "Local:\n");
2193 trie_collect_stats(trie_local, stat);
2194 trie_show_stats(seq, stat);
2195 }
2196
2197 if (trie_main) {
2198 seq_printf(seq, "Main:\n");
2199 trie_collect_stats(trie_main, stat);
2200 trie_show_stats(seq, stat);
2201 }
2202 kfree(stat);
2203
2204 return 0;
2205 }
2206
2207 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2208 {
2209 return single_open(file, fib_triestat_seq_show, NULL);
2210 }
2211
2212 static const struct file_operations fib_triestat_fops = {
2213 .owner = THIS_MODULE,
2214 .open = fib_triestat_seq_open,
2215 .read = seq_read,
2216 .llseek = seq_lseek,
2217 .release = single_release,
2218 };
2219
2220 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2221 loff_t pos)
2222 {
2223 loff_t idx = 0;
2224 struct node *n;
2225
2226 for (n = fib_trie_get_first(iter, iter->trie_local);
2227 n; ++idx, n = fib_trie_get_next(iter)) {
2228 if (pos == idx)
2229 return n;
2230 }
2231
2232 for (n = fib_trie_get_first(iter, iter->trie_main);
2233 n; ++idx, n = fib_trie_get_next(iter)) {
2234 if (pos == idx)
2235 return n;
2236 }
2237 return NULL;
2238 }
2239
2240 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2241 {
2242 struct fib_trie_iter *iter = seq->private;
2243 struct fib_table *tb;
2244
2245 if (!iter->trie_local) {
2246 tb = fib_get_table(RT_TABLE_LOCAL);
2247 if (tb)
2248 iter->trie_local = (struct trie *) tb->tb_data;
2249 }
2250 if (!iter->trie_main) {
2251 tb = fib_get_table(RT_TABLE_MAIN);
2252 if (tb)
2253 iter->trie_main = (struct trie *) tb->tb_data;
2254 }
2255 rcu_read_lock();
2256 if (*pos == 0)
2257 return SEQ_START_TOKEN;
2258 return fib_trie_get_idx(iter, *pos - 1);
2259 }
2260
2261 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2262 {
2263 struct fib_trie_iter *iter = seq->private;
2264 void *l = v;
2265
2266 ++*pos;
2267 if (v == SEQ_START_TOKEN)
2268 return fib_trie_get_idx(iter, 0);
2269
2270 v = fib_trie_get_next(iter);
2271 BUG_ON(v == l);
2272 if (v)
2273 return v;
2274
2275 /* continue scan in next trie */
2276 if (iter->trie == iter->trie_local)
2277 return fib_trie_get_first(iter, iter->trie_main);
2278
2279 return NULL;
2280 }
2281
2282 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2283 {
2284 rcu_read_unlock();
2285 }
2286
2287 static void seq_indent(struct seq_file *seq, int n)
2288 {
2289 while (n-- > 0) seq_puts(seq, " ");
2290 }
2291
2292 static inline const char *rtn_scope(enum rt_scope_t s)
2293 {
2294 static char buf[32];
2295
2296 switch (s) {
2297 case RT_SCOPE_UNIVERSE: return "universe";
2298 case RT_SCOPE_SITE: return "site";
2299 case RT_SCOPE_LINK: return "link";
2300 case RT_SCOPE_HOST: return "host";
2301 case RT_SCOPE_NOWHERE: return "nowhere";
2302 default:
2303 snprintf(buf, sizeof(buf), "scope=%d", s);
2304 return buf;
2305 }
2306 }
2307
2308 static const char *rtn_type_names[__RTN_MAX] = {
2309 [RTN_UNSPEC] = "UNSPEC",
2310 [RTN_UNICAST] = "UNICAST",
2311 [RTN_LOCAL] = "LOCAL",
2312 [RTN_BROADCAST] = "BROADCAST",
2313 [RTN_ANYCAST] = "ANYCAST",
2314 [RTN_MULTICAST] = "MULTICAST",
2315 [RTN_BLACKHOLE] = "BLACKHOLE",
2316 [RTN_UNREACHABLE] = "UNREACHABLE",
2317 [RTN_PROHIBIT] = "PROHIBIT",
2318 [RTN_THROW] = "THROW",
2319 [RTN_NAT] = "NAT",
2320 [RTN_XRESOLVE] = "XRESOLVE",
2321 };
2322
2323 static inline const char *rtn_type(unsigned t)
2324 {
2325 static char buf[32];
2326
2327 if (t < __RTN_MAX && rtn_type_names[t])
2328 return rtn_type_names[t];
2329 snprintf(buf, sizeof(buf), "type %d", t);
2330 return buf;
2331 }
2332
2333 /* Pretty print the trie */
2334 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2335 {
2336 const struct fib_trie_iter *iter = seq->private;
2337 struct node *n = v;
2338
2339 if (v == SEQ_START_TOKEN)
2340 return 0;
2341
2342 if (!node_parent(n)) {
2343 if (iter->trie == iter->trie_local)
2344 seq_puts(seq, "<local>:\n");
2345 else
2346 seq_puts(seq, "<main>:\n");
2347 }
2348
2349 if (IS_TNODE(n)) {
2350 struct tnode *tn = (struct tnode *) n;
2351 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2352
2353 seq_indent(seq, iter->depth-1);
2354 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2355 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2356 tn->empty_children);
2357
2358 } else {
2359 struct leaf *l = (struct leaf *) n;
2360 int i;
2361 __be32 val = htonl(l->key);
2362
2363 seq_indent(seq, iter->depth);
2364 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2365 for (i = 32; i >= 0; i--) {
2366 struct leaf_info *li = find_leaf_info(l, i);
2367 if (li) {
2368 struct fib_alias *fa;
2369 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2370 seq_indent(seq, iter->depth+1);
2371 seq_printf(seq, " /%d %s %s", i,
2372 rtn_scope(fa->fa_scope),
2373 rtn_type(fa->fa_type));
2374 if (fa->fa_tos)
2375 seq_printf(seq, "tos =%d\n",
2376 fa->fa_tos);
2377 seq_putc(seq, '\n');
2378 }
2379 }
2380 }
2381 }
2382
2383 return 0;
2384 }
2385
2386 static const struct seq_operations fib_trie_seq_ops = {
2387 .start = fib_trie_seq_start,
2388 .next = fib_trie_seq_next,
2389 .stop = fib_trie_seq_stop,
2390 .show = fib_trie_seq_show,
2391 };
2392
2393 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2394 {
2395 return seq_open_private(file, &fib_trie_seq_ops,
2396 sizeof(struct fib_trie_iter));
2397 }
2398
2399 static const struct file_operations fib_trie_fops = {
2400 .owner = THIS_MODULE,
2401 .open = fib_trie_seq_open,
2402 .read = seq_read,
2403 .llseek = seq_lseek,
2404 .release = seq_release_private,
2405 };
2406
2407 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2408 {
2409 static unsigned type2flags[RTN_MAX + 1] = {
2410 [7] = RTF_REJECT, [8] = RTF_REJECT,
2411 };
2412 unsigned flags = type2flags[type];
2413
2414 if (fi && fi->fib_nh->nh_gw)
2415 flags |= RTF_GATEWAY;
2416 if (mask == htonl(0xFFFFFFFF))
2417 flags |= RTF_HOST;
2418 flags |= RTF_UP;
2419 return flags;
2420 }
2421
2422 /*
2423 * This outputs /proc/net/route.
2424 * The format of the file is not supposed to be changed
2425 * and needs to be same as fib_hash output to avoid breaking
2426 * legacy utilities
2427 */
2428 static int fib_route_seq_show(struct seq_file *seq, void *v)
2429 {
2430 const struct fib_trie_iter *iter = seq->private;
2431 struct leaf *l = v;
2432 int i;
2433 char bf[128];
2434
2435 if (v == SEQ_START_TOKEN) {
2436 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2437 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2438 "\tWindow\tIRTT");
2439 return 0;
2440 }
2441
2442 if (iter->trie == iter->trie_local)
2443 return 0;
2444 if (IS_TNODE(l))
2445 return 0;
2446
2447 for (i=32; i>=0; i--) {
2448 struct leaf_info *li = find_leaf_info(l, i);
2449 struct fib_alias *fa;
2450 __be32 mask, prefix;
2451
2452 if (!li)
2453 continue;
2454
2455 mask = inet_make_mask(li->plen);
2456 prefix = htonl(l->key);
2457
2458 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2459 const struct fib_info *fi = fa->fa_info;
2460 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2461
2462 if (fa->fa_type == RTN_BROADCAST
2463 || fa->fa_type == RTN_MULTICAST)
2464 continue;
2465
2466 if (fi)
2467 snprintf(bf, sizeof(bf),
2468 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2469 fi->fib_dev ? fi->fib_dev->name : "*",
2470 prefix,
2471 fi->fib_nh->nh_gw, flags, 0, 0,
2472 fi->fib_priority,
2473 mask,
2474 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2475 fi->fib_window,
2476 fi->fib_rtt >> 3);
2477 else
2478 snprintf(bf, sizeof(bf),
2479 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2480 prefix, 0, flags, 0, 0, 0,
2481 mask, 0, 0, 0);
2482
2483 seq_printf(seq, "%-127s\n", bf);
2484 }
2485 }
2486
2487 return 0;
2488 }
2489
2490 static const struct seq_operations fib_route_seq_ops = {
2491 .start = fib_trie_seq_start,
2492 .next = fib_trie_seq_next,
2493 .stop = fib_trie_seq_stop,
2494 .show = fib_route_seq_show,
2495 };
2496
2497 static int fib_route_seq_open(struct inode *inode, struct file *file)
2498 {
2499 return seq_open_private(file, &fib_route_seq_ops,
2500 sizeof(struct fib_trie_iter));
2501 }
2502
2503 static const struct file_operations fib_route_fops = {
2504 .owner = THIS_MODULE,
2505 .open = fib_route_seq_open,
2506 .read = seq_read,
2507 .llseek = seq_lseek,
2508 .release = seq_release_private,
2509 };
2510
2511 int __init fib_proc_init(void)
2512 {
2513 if (!proc_net_fops_create(&init_net, "fib_trie", S_IRUGO, &fib_trie_fops))
2514 goto out1;
2515
2516 if (!proc_net_fops_create(&init_net, "fib_triestat", S_IRUGO, &fib_triestat_fops))
2517 goto out2;
2518
2519 if (!proc_net_fops_create(&init_net, "route", S_IRUGO, &fib_route_fops))
2520 goto out3;
2521
2522 return 0;
2523
2524 out3:
2525 proc_net_remove(&init_net, "fib_triestat");
2526 out2:
2527 proc_net_remove(&init_net, "fib_trie");
2528 out1:
2529 return -ENOMEM;
2530 }
2531
2532 void __init fib_proc_exit(void)
2533 {
2534 proc_net_remove(&init_net, "fib_trie");
2535 proc_net_remove(&init_net, "fib_triestat");
2536 proc_net_remove(&init_net, "route");
2537 }
2538
2539 #endif /* CONFIG_PROC_FS */
This page took 0.129744 seconds and 4 git commands to generate.