[IPV4] fib_trie: Get rid of trie_init().
[deliverable/linux.git] / net / ipv4 / fib_trie.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
15 * This work is based on the LPC-trie which is originally descibed in:
16 *
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
20 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
25 * Version: $Id: fib_trie.c,v 1.3 2005/06/08 14:20:01 robert Exp $
26 *
27 *
28 * Code from fib_hash has been reused which includes the following header:
29 *
30 *
31 * INET An implementation of the TCP/IP protocol suite for the LINUX
32 * operating system. INET is implemented using the BSD Socket
33 * interface as the means of communication with the user level.
34 *
35 * IPv4 FIB: lookup engine and maintenance routines.
36 *
37 *
38 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
39 *
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
44 *
45 * Substantial contributions to this work comes from:
46 *
47 * David S. Miller, <davem@davemloft.net>
48 * Stephen Hemminger <shemminger@osdl.org>
49 * Paul E. McKenney <paulmck@us.ibm.com>
50 * Patrick McHardy <kaber@trash.net>
51 */
52
53 #define VERSION "0.408"
54
55 #include <asm/uaccess.h>
56 #include <asm/system.h>
57 #include <linux/bitops.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60 #include <linux/mm.h>
61 #include <linux/string.h>
62 #include <linux/socket.h>
63 #include <linux/sockios.h>
64 #include <linux/errno.h>
65 #include <linux/in.h>
66 #include <linux/inet.h>
67 #include <linux/inetdevice.h>
68 #include <linux/netdevice.h>
69 #include <linux/if_arp.h>
70 #include <linux/proc_fs.h>
71 #include <linux/rcupdate.h>
72 #include <linux/skbuff.h>
73 #include <linux/netlink.h>
74 #include <linux/init.h>
75 #include <linux/list.h>
76 #include <net/net_namespace.h>
77 #include <net/ip.h>
78 #include <net/protocol.h>
79 #include <net/route.h>
80 #include <net/tcp.h>
81 #include <net/sock.h>
82 #include <net/ip_fib.h>
83 #include "fib_lookup.h"
84
85 #undef CONFIG_IP_FIB_TRIE_STATS
86 #define MAX_STAT_DEPTH 32
87
88 #define KEYLENGTH (8*sizeof(t_key))
89
90 typedef unsigned int t_key;
91
92 #define T_TNODE 0
93 #define T_LEAF 1
94 #define NODE_TYPE_MASK 0x1UL
95 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
96
97 #define IS_TNODE(n) (!(n->parent & T_LEAF))
98 #define IS_LEAF(n) (n->parent & T_LEAF)
99
100 struct node {
101 t_key key;
102 unsigned long parent;
103 };
104
105 struct leaf {
106 t_key key;
107 unsigned long parent;
108 struct hlist_head list;
109 struct rcu_head rcu;
110 };
111
112 struct leaf_info {
113 struct hlist_node hlist;
114 struct rcu_head rcu;
115 int plen;
116 struct list_head falh;
117 };
118
119 struct tnode {
120 t_key key;
121 unsigned long parent;
122 unsigned short pos:5; /* 2log(KEYLENGTH) bits needed */
123 unsigned short bits:5; /* 2log(KEYLENGTH) bits needed */
124 unsigned short full_children; /* KEYLENGTH bits needed */
125 unsigned short empty_children; /* KEYLENGTH bits needed */
126 struct rcu_head rcu;
127 struct node *child[0];
128 };
129
130 #ifdef CONFIG_IP_FIB_TRIE_STATS
131 struct trie_use_stats {
132 unsigned int gets;
133 unsigned int backtrack;
134 unsigned int semantic_match_passed;
135 unsigned int semantic_match_miss;
136 unsigned int null_node_hit;
137 unsigned int resize_node_skipped;
138 };
139 #endif
140
141 struct trie_stat {
142 unsigned int totdepth;
143 unsigned int maxdepth;
144 unsigned int tnodes;
145 unsigned int leaves;
146 unsigned int nullpointers;
147 unsigned int nodesizes[MAX_STAT_DEPTH];
148 };
149
150 struct trie {
151 struct node *trie;
152 #ifdef CONFIG_IP_FIB_TRIE_STATS
153 struct trie_use_stats stats;
154 #endif
155 int size;
156 unsigned int revision;
157 };
158
159 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
160 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull);
161 static struct node *resize(struct trie *t, struct tnode *tn);
162 static struct tnode *inflate(struct trie *t, struct tnode *tn);
163 static struct tnode *halve(struct trie *t, struct tnode *tn);
164 static void tnode_free(struct tnode *tn);
165
166 static struct kmem_cache *fn_alias_kmem __read_mostly;
167
168 static inline struct tnode *node_parent(struct node *node)
169 {
170 struct tnode *ret;
171
172 ret = (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
173 return rcu_dereference(ret);
174 }
175
176 static inline void node_set_parent(struct node *node, struct tnode *ptr)
177 {
178 rcu_assign_pointer(node->parent,
179 (unsigned long)ptr | NODE_TYPE(node));
180 }
181
182 /* rcu_read_lock needs to be hold by caller from readside */
183
184 static inline struct node *tnode_get_child(struct tnode *tn, int i)
185 {
186 BUG_ON(i >= 1 << tn->bits);
187
188 return rcu_dereference(tn->child[i]);
189 }
190
191 static inline int tnode_child_length(const struct tnode *tn)
192 {
193 return 1 << tn->bits;
194 }
195
196 static inline t_key mask_pfx(t_key k, unsigned short l)
197 {
198 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
199 }
200
201 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
202 {
203 if (offset < KEYLENGTH)
204 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
205 else
206 return 0;
207 }
208
209 static inline int tkey_equals(t_key a, t_key b)
210 {
211 return a == b;
212 }
213
214 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
215 {
216 if (bits == 0 || offset >= KEYLENGTH)
217 return 1;
218 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
219 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
220 }
221
222 static inline int tkey_mismatch(t_key a, int offset, t_key b)
223 {
224 t_key diff = a ^ b;
225 int i = offset;
226
227 if (!diff)
228 return 0;
229 while ((diff << i) >> (KEYLENGTH-1) == 0)
230 i++;
231 return i;
232 }
233
234 /*
235 To understand this stuff, an understanding of keys and all their bits is
236 necessary. Every node in the trie has a key associated with it, but not
237 all of the bits in that key are significant.
238
239 Consider a node 'n' and its parent 'tp'.
240
241 If n is a leaf, every bit in its key is significant. Its presence is
242 necessitated by path compression, since during a tree traversal (when
243 searching for a leaf - unless we are doing an insertion) we will completely
244 ignore all skipped bits we encounter. Thus we need to verify, at the end of
245 a potentially successful search, that we have indeed been walking the
246 correct key path.
247
248 Note that we can never "miss" the correct key in the tree if present by
249 following the wrong path. Path compression ensures that segments of the key
250 that are the same for all keys with a given prefix are skipped, but the
251 skipped part *is* identical for each node in the subtrie below the skipped
252 bit! trie_insert() in this implementation takes care of that - note the
253 call to tkey_sub_equals() in trie_insert().
254
255 if n is an internal node - a 'tnode' here, the various parts of its key
256 have many different meanings.
257
258 Example:
259 _________________________________________________________________
260 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
261 -----------------------------------------------------------------
262 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
263
264 _________________________________________________________________
265 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
266 -----------------------------------------------------------------
267 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
268
269 tp->pos = 7
270 tp->bits = 3
271 n->pos = 15
272 n->bits = 4
273
274 First, let's just ignore the bits that come before the parent tp, that is
275 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
276 not use them for anything.
277
278 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
279 index into the parent's child array. That is, they will be used to find
280 'n' among tp's children.
281
282 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
283 for the node n.
284
285 All the bits we have seen so far are significant to the node n. The rest
286 of the bits are really not needed or indeed known in n->key.
287
288 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
289 n's child array, and will of course be different for each child.
290
291
292 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
293 at this point.
294
295 */
296
297 static inline void check_tnode(const struct tnode *tn)
298 {
299 WARN_ON(tn && tn->pos+tn->bits > 32);
300 }
301
302 static const int halve_threshold = 25;
303 static const int inflate_threshold = 50;
304 static const int halve_threshold_root = 8;
305 static const int inflate_threshold_root = 15;
306
307
308 static void __alias_free_mem(struct rcu_head *head)
309 {
310 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
311 kmem_cache_free(fn_alias_kmem, fa);
312 }
313
314 static inline void alias_free_mem_rcu(struct fib_alias *fa)
315 {
316 call_rcu(&fa->rcu, __alias_free_mem);
317 }
318
319 static void __leaf_free_rcu(struct rcu_head *head)
320 {
321 kfree(container_of(head, struct leaf, rcu));
322 }
323
324 static void __leaf_info_free_rcu(struct rcu_head *head)
325 {
326 kfree(container_of(head, struct leaf_info, rcu));
327 }
328
329 static inline void free_leaf_info(struct leaf_info *leaf)
330 {
331 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
332 }
333
334 static struct tnode *tnode_alloc(unsigned int size)
335 {
336 struct page *pages;
337
338 if (size <= PAGE_SIZE)
339 return kcalloc(size, 1, GFP_KERNEL);
340
341 pages = alloc_pages(GFP_KERNEL|__GFP_ZERO, get_order(size));
342 if (!pages)
343 return NULL;
344
345 return page_address(pages);
346 }
347
348 static void __tnode_free_rcu(struct rcu_head *head)
349 {
350 struct tnode *tn = container_of(head, struct tnode, rcu);
351 unsigned int size = sizeof(struct tnode) +
352 (1 << tn->bits) * sizeof(struct node *);
353
354 if (size <= PAGE_SIZE)
355 kfree(tn);
356 else
357 free_pages((unsigned long)tn, get_order(size));
358 }
359
360 static inline void tnode_free(struct tnode *tn)
361 {
362 if (IS_LEAF(tn)) {
363 struct leaf *l = (struct leaf *) tn;
364 call_rcu_bh(&l->rcu, __leaf_free_rcu);
365 } else
366 call_rcu(&tn->rcu, __tnode_free_rcu);
367 }
368
369 static struct leaf *leaf_new(void)
370 {
371 struct leaf *l = kmalloc(sizeof(struct leaf), GFP_KERNEL);
372 if (l) {
373 l->parent = T_LEAF;
374 INIT_HLIST_HEAD(&l->list);
375 }
376 return l;
377 }
378
379 static struct leaf_info *leaf_info_new(int plen)
380 {
381 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
382 if (li) {
383 li->plen = plen;
384 INIT_LIST_HEAD(&li->falh);
385 }
386 return li;
387 }
388
389 static struct tnode* tnode_new(t_key key, int pos, int bits)
390 {
391 int nchildren = 1<<bits;
392 int sz = sizeof(struct tnode) + nchildren * sizeof(struct node *);
393 struct tnode *tn = tnode_alloc(sz);
394
395 if (tn) {
396 memset(tn, 0, sz);
397 tn->parent = T_TNODE;
398 tn->pos = pos;
399 tn->bits = bits;
400 tn->key = key;
401 tn->full_children = 0;
402 tn->empty_children = 1<<bits;
403 }
404
405 pr_debug("AT %p s=%u %u\n", tn, (unsigned int) sizeof(struct tnode),
406 (unsigned int) (sizeof(struct node) * 1<<bits));
407 return tn;
408 }
409
410 /*
411 * Check whether a tnode 'n' is "full", i.e. it is an internal node
412 * and no bits are skipped. See discussion in dyntree paper p. 6
413 */
414
415 static inline int tnode_full(const struct tnode *tn, const struct node *n)
416 {
417 if (n == NULL || IS_LEAF(n))
418 return 0;
419
420 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
421 }
422
423 static inline void put_child(struct trie *t, struct tnode *tn, int i, struct node *n)
424 {
425 tnode_put_child_reorg(tn, i, n, -1);
426 }
427
428 /*
429 * Add a child at position i overwriting the old value.
430 * Update the value of full_children and empty_children.
431 */
432
433 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n, int wasfull)
434 {
435 struct node *chi = tn->child[i];
436 int isfull;
437
438 BUG_ON(i >= 1<<tn->bits);
439
440
441 /* update emptyChildren */
442 if (n == NULL && chi != NULL)
443 tn->empty_children++;
444 else if (n != NULL && chi == NULL)
445 tn->empty_children--;
446
447 /* update fullChildren */
448 if (wasfull == -1)
449 wasfull = tnode_full(tn, chi);
450
451 isfull = tnode_full(tn, n);
452 if (wasfull && !isfull)
453 tn->full_children--;
454 else if (!wasfull && isfull)
455 tn->full_children++;
456
457 if (n)
458 node_set_parent(n, tn);
459
460 rcu_assign_pointer(tn->child[i], n);
461 }
462
463 static struct node *resize(struct trie *t, struct tnode *tn)
464 {
465 int i;
466 int err = 0;
467 struct tnode *old_tn;
468 int inflate_threshold_use;
469 int halve_threshold_use;
470 int max_resize;
471
472 if (!tn)
473 return NULL;
474
475 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
476 tn, inflate_threshold, halve_threshold);
477
478 /* No children */
479 if (tn->empty_children == tnode_child_length(tn)) {
480 tnode_free(tn);
481 return NULL;
482 }
483 /* One child */
484 if (tn->empty_children == tnode_child_length(tn) - 1)
485 for (i = 0; i < tnode_child_length(tn); i++) {
486 struct node *n;
487
488 n = tn->child[i];
489 if (!n)
490 continue;
491
492 /* compress one level */
493 node_set_parent(n, NULL);
494 tnode_free(tn);
495 return n;
496 }
497 /*
498 * Double as long as the resulting node has a number of
499 * nonempty nodes that are above the threshold.
500 */
501
502 /*
503 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
504 * the Helsinki University of Technology and Matti Tikkanen of Nokia
505 * Telecommunications, page 6:
506 * "A node is doubled if the ratio of non-empty children to all
507 * children in the *doubled* node is at least 'high'."
508 *
509 * 'high' in this instance is the variable 'inflate_threshold'. It
510 * is expressed as a percentage, so we multiply it with
511 * tnode_child_length() and instead of multiplying by 2 (since the
512 * child array will be doubled by inflate()) and multiplying
513 * the left-hand side by 100 (to handle the percentage thing) we
514 * multiply the left-hand side by 50.
515 *
516 * The left-hand side may look a bit weird: tnode_child_length(tn)
517 * - tn->empty_children is of course the number of non-null children
518 * in the current node. tn->full_children is the number of "full"
519 * children, that is non-null tnodes with a skip value of 0.
520 * All of those will be doubled in the resulting inflated tnode, so
521 * we just count them one extra time here.
522 *
523 * A clearer way to write this would be:
524 *
525 * to_be_doubled = tn->full_children;
526 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
527 * tn->full_children;
528 *
529 * new_child_length = tnode_child_length(tn) * 2;
530 *
531 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
532 * new_child_length;
533 * if (new_fill_factor >= inflate_threshold)
534 *
535 * ...and so on, tho it would mess up the while () loop.
536 *
537 * anyway,
538 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
539 * inflate_threshold
540 *
541 * avoid a division:
542 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
543 * inflate_threshold * new_child_length
544 *
545 * expand not_to_be_doubled and to_be_doubled, and shorten:
546 * 100 * (tnode_child_length(tn) - tn->empty_children +
547 * tn->full_children) >= inflate_threshold * new_child_length
548 *
549 * expand new_child_length:
550 * 100 * (tnode_child_length(tn) - tn->empty_children +
551 * tn->full_children) >=
552 * inflate_threshold * tnode_child_length(tn) * 2
553 *
554 * shorten again:
555 * 50 * (tn->full_children + tnode_child_length(tn) -
556 * tn->empty_children) >= inflate_threshold *
557 * tnode_child_length(tn)
558 *
559 */
560
561 check_tnode(tn);
562
563 /* Keep root node larger */
564
565 if (!tn->parent)
566 inflate_threshold_use = inflate_threshold_root;
567 else
568 inflate_threshold_use = inflate_threshold;
569
570 err = 0;
571 max_resize = 10;
572 while ((tn->full_children > 0 && max_resize-- &&
573 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >=
574 inflate_threshold_use * tnode_child_length(tn))) {
575
576 old_tn = tn;
577 tn = inflate(t, tn);
578 if (IS_ERR(tn)) {
579 tn = old_tn;
580 #ifdef CONFIG_IP_FIB_TRIE_STATS
581 t->stats.resize_node_skipped++;
582 #endif
583 break;
584 }
585 }
586
587 if (max_resize < 0) {
588 if (!tn->parent)
589 printk(KERN_WARNING "Fix inflate_threshold_root. Now=%d size=%d bits\n",
590 inflate_threshold_root, tn->bits);
591 else
592 printk(KERN_WARNING "Fix inflate_threshold. Now=%d size=%d bits\n",
593 inflate_threshold, tn->bits);
594 }
595
596 check_tnode(tn);
597
598 /*
599 * Halve as long as the number of empty children in this
600 * node is above threshold.
601 */
602
603
604 /* Keep root node larger */
605
606 if (!tn->parent)
607 halve_threshold_use = halve_threshold_root;
608 else
609 halve_threshold_use = halve_threshold;
610
611 err = 0;
612 max_resize = 10;
613 while (tn->bits > 1 && max_resize-- &&
614 100 * (tnode_child_length(tn) - tn->empty_children) <
615 halve_threshold_use * tnode_child_length(tn)) {
616
617 old_tn = tn;
618 tn = halve(t, tn);
619 if (IS_ERR(tn)) {
620 tn = old_tn;
621 #ifdef CONFIG_IP_FIB_TRIE_STATS
622 t->stats.resize_node_skipped++;
623 #endif
624 break;
625 }
626 }
627
628 if (max_resize < 0) {
629 if (!tn->parent)
630 printk(KERN_WARNING "Fix halve_threshold_root. Now=%d size=%d bits\n",
631 halve_threshold_root, tn->bits);
632 else
633 printk(KERN_WARNING "Fix halve_threshold. Now=%d size=%d bits\n",
634 halve_threshold, tn->bits);
635 }
636
637 /* Only one child remains */
638 if (tn->empty_children == tnode_child_length(tn) - 1)
639 for (i = 0; i < tnode_child_length(tn); i++) {
640 struct node *n;
641
642 n = tn->child[i];
643 if (!n)
644 continue;
645
646 /* compress one level */
647
648 node_set_parent(n, NULL);
649 tnode_free(tn);
650 return n;
651 }
652
653 return (struct node *) tn;
654 }
655
656 static struct tnode *inflate(struct trie *t, struct tnode *tn)
657 {
658 struct tnode *inode;
659 struct tnode *oldtnode = tn;
660 int olen = tnode_child_length(tn);
661 int i;
662
663 pr_debug("In inflate\n");
664
665 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
666
667 if (!tn)
668 return ERR_PTR(-ENOMEM);
669
670 /*
671 * Preallocate and store tnodes before the actual work so we
672 * don't get into an inconsistent state if memory allocation
673 * fails. In case of failure we return the oldnode and inflate
674 * of tnode is ignored.
675 */
676
677 for (i = 0; i < olen; i++) {
678 struct tnode *inode = (struct tnode *) tnode_get_child(oldtnode, i);
679
680 if (inode &&
681 IS_TNODE(inode) &&
682 inode->pos == oldtnode->pos + oldtnode->bits &&
683 inode->bits > 1) {
684 struct tnode *left, *right;
685 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
686
687 left = tnode_new(inode->key&(~m), inode->pos + 1,
688 inode->bits - 1);
689 if (!left)
690 goto nomem;
691
692 right = tnode_new(inode->key|m, inode->pos + 1,
693 inode->bits - 1);
694
695 if (!right) {
696 tnode_free(left);
697 goto nomem;
698 }
699
700 put_child(t, tn, 2*i, (struct node *) left);
701 put_child(t, tn, 2*i+1, (struct node *) right);
702 }
703 }
704
705 for (i = 0; i < olen; i++) {
706 struct node *node = tnode_get_child(oldtnode, i);
707 struct tnode *left, *right;
708 int size, j;
709
710 /* An empty child */
711 if (node == NULL)
712 continue;
713
714 /* A leaf or an internal node with skipped bits */
715
716 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
717 tn->pos + tn->bits - 1) {
718 if (tkey_extract_bits(node->key, oldtnode->pos + oldtnode->bits,
719 1) == 0)
720 put_child(t, tn, 2*i, node);
721 else
722 put_child(t, tn, 2*i+1, node);
723 continue;
724 }
725
726 /* An internal node with two children */
727 inode = (struct tnode *) node;
728
729 if (inode->bits == 1) {
730 put_child(t, tn, 2*i, inode->child[0]);
731 put_child(t, tn, 2*i+1, inode->child[1]);
732
733 tnode_free(inode);
734 continue;
735 }
736
737 /* An internal node with more than two children */
738
739 /* We will replace this node 'inode' with two new
740 * ones, 'left' and 'right', each with half of the
741 * original children. The two new nodes will have
742 * a position one bit further down the key and this
743 * means that the "significant" part of their keys
744 * (see the discussion near the top of this file)
745 * will differ by one bit, which will be "0" in
746 * left's key and "1" in right's key. Since we are
747 * moving the key position by one step, the bit that
748 * we are moving away from - the bit at position
749 * (inode->pos) - is the one that will differ between
750 * left and right. So... we synthesize that bit in the
751 * two new keys.
752 * The mask 'm' below will be a single "one" bit at
753 * the position (inode->pos)
754 */
755
756 /* Use the old key, but set the new significant
757 * bit to zero.
758 */
759
760 left = (struct tnode *) tnode_get_child(tn, 2*i);
761 put_child(t, tn, 2*i, NULL);
762
763 BUG_ON(!left);
764
765 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
766 put_child(t, tn, 2*i+1, NULL);
767
768 BUG_ON(!right);
769
770 size = tnode_child_length(left);
771 for (j = 0; j < size; j++) {
772 put_child(t, left, j, inode->child[j]);
773 put_child(t, right, j, inode->child[j + size]);
774 }
775 put_child(t, tn, 2*i, resize(t, left));
776 put_child(t, tn, 2*i+1, resize(t, right));
777
778 tnode_free(inode);
779 }
780 tnode_free(oldtnode);
781 return tn;
782 nomem:
783 {
784 int size = tnode_child_length(tn);
785 int j;
786
787 for (j = 0; j < size; j++)
788 if (tn->child[j])
789 tnode_free((struct tnode *)tn->child[j]);
790
791 tnode_free(tn);
792
793 return ERR_PTR(-ENOMEM);
794 }
795 }
796
797 static struct tnode *halve(struct trie *t, struct tnode *tn)
798 {
799 struct tnode *oldtnode = tn;
800 struct node *left, *right;
801 int i;
802 int olen = tnode_child_length(tn);
803
804 pr_debug("In halve\n");
805
806 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
807
808 if (!tn)
809 return ERR_PTR(-ENOMEM);
810
811 /*
812 * Preallocate and store tnodes before the actual work so we
813 * don't get into an inconsistent state if memory allocation
814 * fails. In case of failure we return the oldnode and halve
815 * of tnode is ignored.
816 */
817
818 for (i = 0; i < olen; i += 2) {
819 left = tnode_get_child(oldtnode, i);
820 right = tnode_get_child(oldtnode, i+1);
821
822 /* Two nonempty children */
823 if (left && right) {
824 struct tnode *newn;
825
826 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
827
828 if (!newn)
829 goto nomem;
830
831 put_child(t, tn, i/2, (struct node *)newn);
832 }
833
834 }
835
836 for (i = 0; i < olen; i += 2) {
837 struct tnode *newBinNode;
838
839 left = tnode_get_child(oldtnode, i);
840 right = tnode_get_child(oldtnode, i+1);
841
842 /* At least one of the children is empty */
843 if (left == NULL) {
844 if (right == NULL) /* Both are empty */
845 continue;
846 put_child(t, tn, i/2, right);
847 continue;
848 }
849
850 if (right == NULL) {
851 put_child(t, tn, i/2, left);
852 continue;
853 }
854
855 /* Two nonempty children */
856 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
857 put_child(t, tn, i/2, NULL);
858 put_child(t, newBinNode, 0, left);
859 put_child(t, newBinNode, 1, right);
860 put_child(t, tn, i/2, resize(t, newBinNode));
861 }
862 tnode_free(oldtnode);
863 return tn;
864 nomem:
865 {
866 int size = tnode_child_length(tn);
867 int j;
868
869 for (j = 0; j < size; j++)
870 if (tn->child[j])
871 tnode_free((struct tnode *)tn->child[j]);
872
873 tnode_free(tn);
874
875 return ERR_PTR(-ENOMEM);
876 }
877 }
878
879 /* readside must use rcu_read_lock currently dump routines
880 via get_fa_head and dump */
881
882 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
883 {
884 struct hlist_head *head = &l->list;
885 struct hlist_node *node;
886 struct leaf_info *li;
887
888 hlist_for_each_entry_rcu(li, node, head, hlist)
889 if (li->plen == plen)
890 return li;
891
892 return NULL;
893 }
894
895 static inline struct list_head * get_fa_head(struct leaf *l, int plen)
896 {
897 struct leaf_info *li = find_leaf_info(l, plen);
898
899 if (!li)
900 return NULL;
901
902 return &li->falh;
903 }
904
905 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
906 {
907 struct leaf_info *li = NULL, *last = NULL;
908 struct hlist_node *node;
909
910 if (hlist_empty(head)) {
911 hlist_add_head_rcu(&new->hlist, head);
912 } else {
913 hlist_for_each_entry(li, node, head, hlist) {
914 if (new->plen > li->plen)
915 break;
916
917 last = li;
918 }
919 if (last)
920 hlist_add_after_rcu(&last->hlist, &new->hlist);
921 else
922 hlist_add_before_rcu(&new->hlist, &li->hlist);
923 }
924 }
925
926 /* rcu_read_lock needs to be hold by caller from readside */
927
928 static struct leaf *
929 fib_find_node(struct trie *t, u32 key)
930 {
931 int pos;
932 struct tnode *tn;
933 struct node *n;
934
935 pos = 0;
936 n = rcu_dereference(t->trie);
937
938 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
939 tn = (struct tnode *) n;
940
941 check_tnode(tn);
942
943 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
944 pos = tn->pos + tn->bits;
945 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
946 } else
947 break;
948 }
949 /* Case we have found a leaf. Compare prefixes */
950
951 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
952 return (struct leaf *)n;
953
954 return NULL;
955 }
956
957 static struct node *trie_rebalance(struct trie *t, struct tnode *tn)
958 {
959 int wasfull;
960 t_key cindex, key = tn->key;
961 struct tnode *tp;
962
963 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
964 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
965 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
966 tn = (struct tnode *) resize (t, (struct tnode *)tn);
967 tnode_put_child_reorg((struct tnode *)tp, cindex,(struct node*)tn, wasfull);
968
969 tp = node_parent((struct node *) tn);
970 if (!tp)
971 break;
972 tn = tp;
973 }
974
975 /* Handle last (top) tnode */
976 if (IS_TNODE(tn))
977 tn = (struct tnode*) resize(t, (struct tnode *)tn);
978
979 return (struct node*) tn;
980 }
981
982 /* only used from updater-side */
983
984 static struct list_head *
985 fib_insert_node(struct trie *t, int *err, u32 key, int plen)
986 {
987 int pos, newpos;
988 struct tnode *tp = NULL, *tn = NULL;
989 struct node *n;
990 struct leaf *l;
991 int missbit;
992 struct list_head *fa_head = NULL;
993 struct leaf_info *li;
994 t_key cindex;
995
996 pos = 0;
997 n = t->trie;
998
999 /* If we point to NULL, stop. Either the tree is empty and we should
1000 * just put a new leaf in if, or we have reached an empty child slot,
1001 * and we should just put our new leaf in that.
1002 * If we point to a T_TNODE, check if it matches our key. Note that
1003 * a T_TNODE might be skipping any number of bits - its 'pos' need
1004 * not be the parent's 'pos'+'bits'!
1005 *
1006 * If it does match the current key, get pos/bits from it, extract
1007 * the index from our key, push the T_TNODE and walk the tree.
1008 *
1009 * If it doesn't, we have to replace it with a new T_TNODE.
1010 *
1011 * If we point to a T_LEAF, it might or might not have the same key
1012 * as we do. If it does, just change the value, update the T_LEAF's
1013 * value, and return it.
1014 * If it doesn't, we need to replace it with a T_TNODE.
1015 */
1016
1017 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1018 tn = (struct tnode *) n;
1019
1020 check_tnode(tn);
1021
1022 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1023 tp = tn;
1024 pos = tn->pos + tn->bits;
1025 n = tnode_get_child(tn, tkey_extract_bits(key, tn->pos, tn->bits));
1026
1027 BUG_ON(n && node_parent(n) != tn);
1028 } else
1029 break;
1030 }
1031
1032 /*
1033 * n ----> NULL, LEAF or TNODE
1034 *
1035 * tp is n's (parent) ----> NULL or TNODE
1036 */
1037
1038 BUG_ON(tp && IS_LEAF(tp));
1039
1040 /* Case 1: n is a leaf. Compare prefixes */
1041
1042 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1043 struct leaf *l = (struct leaf *) n;
1044
1045 li = leaf_info_new(plen);
1046
1047 if (!li) {
1048 *err = -ENOMEM;
1049 goto err;
1050 }
1051
1052 fa_head = &li->falh;
1053 insert_leaf_info(&l->list, li);
1054 goto done;
1055 }
1056 t->size++;
1057 l = leaf_new();
1058
1059 if (!l) {
1060 *err = -ENOMEM;
1061 goto err;
1062 }
1063
1064 l->key = key;
1065 li = leaf_info_new(plen);
1066
1067 if (!li) {
1068 tnode_free((struct tnode *) l);
1069 *err = -ENOMEM;
1070 goto err;
1071 }
1072
1073 fa_head = &li->falh;
1074 insert_leaf_info(&l->list, li);
1075
1076 if (t->trie && n == NULL) {
1077 /* Case 2: n is NULL, and will just insert a new leaf */
1078
1079 node_set_parent((struct node *)l, tp);
1080
1081 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1082 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1083 } else {
1084 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1085 /*
1086 * Add a new tnode here
1087 * first tnode need some special handling
1088 */
1089
1090 if (tp)
1091 pos = tp->pos+tp->bits;
1092 else
1093 pos = 0;
1094
1095 if (n) {
1096 newpos = tkey_mismatch(key, pos, n->key);
1097 tn = tnode_new(n->key, newpos, 1);
1098 } else {
1099 newpos = 0;
1100 tn = tnode_new(key, newpos, 1); /* First tnode */
1101 }
1102
1103 if (!tn) {
1104 free_leaf_info(li);
1105 tnode_free((struct tnode *) l);
1106 *err = -ENOMEM;
1107 goto err;
1108 }
1109
1110 node_set_parent((struct node *)tn, tp);
1111
1112 missbit = tkey_extract_bits(key, newpos, 1);
1113 put_child(t, tn, missbit, (struct node *)l);
1114 put_child(t, tn, 1-missbit, n);
1115
1116 if (tp) {
1117 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1118 put_child(t, (struct tnode *)tp, cindex, (struct node *)tn);
1119 } else {
1120 rcu_assign_pointer(t->trie, (struct node *)tn); /* First tnode */
1121 tp = tn;
1122 }
1123 }
1124
1125 if (tp && tp->pos + tp->bits > 32)
1126 printk(KERN_WARNING "fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1127 tp, tp->pos, tp->bits, key, plen);
1128
1129 /* Rebalance the trie */
1130
1131 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1132 done:
1133 t->revision++;
1134 err:
1135 return fa_head;
1136 }
1137
1138 /*
1139 * Caller must hold RTNL.
1140 */
1141 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1142 {
1143 struct trie *t = (struct trie *) tb->tb_data;
1144 struct fib_alias *fa, *new_fa;
1145 struct list_head *fa_head = NULL;
1146 struct fib_info *fi;
1147 int plen = cfg->fc_dst_len;
1148 u8 tos = cfg->fc_tos;
1149 u32 key, mask;
1150 int err;
1151 struct leaf *l;
1152
1153 if (plen > 32)
1154 return -EINVAL;
1155
1156 key = ntohl(cfg->fc_dst);
1157
1158 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1159
1160 mask = ntohl(inet_make_mask(plen));
1161
1162 if (key & ~mask)
1163 return -EINVAL;
1164
1165 key = key & mask;
1166
1167 fi = fib_create_info(cfg);
1168 if (IS_ERR(fi)) {
1169 err = PTR_ERR(fi);
1170 goto err;
1171 }
1172
1173 l = fib_find_node(t, key);
1174 fa = NULL;
1175
1176 if (l) {
1177 fa_head = get_fa_head(l, plen);
1178 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1179 }
1180
1181 /* Now fa, if non-NULL, points to the first fib alias
1182 * with the same keys [prefix,tos,priority], if such key already
1183 * exists or to the node before which we will insert new one.
1184 *
1185 * If fa is NULL, we will need to allocate a new one and
1186 * insert to the head of f.
1187 *
1188 * If f is NULL, no fib node matched the destination key
1189 * and we need to allocate a new one of those as well.
1190 */
1191
1192 if (fa && fa->fa_info->fib_priority == fi->fib_priority) {
1193 struct fib_alias *fa_orig;
1194
1195 err = -EEXIST;
1196 if (cfg->fc_nlflags & NLM_F_EXCL)
1197 goto out;
1198
1199 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1200 struct fib_info *fi_drop;
1201 u8 state;
1202
1203 if (fi->fib_treeref > 1)
1204 goto out;
1205
1206 err = -ENOBUFS;
1207 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1208 if (new_fa == NULL)
1209 goto out;
1210
1211 fi_drop = fa->fa_info;
1212 new_fa->fa_tos = fa->fa_tos;
1213 new_fa->fa_info = fi;
1214 new_fa->fa_type = cfg->fc_type;
1215 new_fa->fa_scope = cfg->fc_scope;
1216 state = fa->fa_state;
1217 new_fa->fa_state &= ~FA_S_ACCESSED;
1218
1219 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1220 alias_free_mem_rcu(fa);
1221
1222 fib_release_info(fi_drop);
1223 if (state & FA_S_ACCESSED)
1224 rt_cache_flush(-1);
1225 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1226 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1227
1228 goto succeeded;
1229 }
1230 /* Error if we find a perfect match which
1231 * uses the same scope, type, and nexthop
1232 * information.
1233 */
1234 fa_orig = fa;
1235 list_for_each_entry(fa, fa_orig->fa_list.prev, fa_list) {
1236 if (fa->fa_tos != tos)
1237 break;
1238 if (fa->fa_info->fib_priority != fi->fib_priority)
1239 break;
1240 if (fa->fa_type == cfg->fc_type &&
1241 fa->fa_scope == cfg->fc_scope &&
1242 fa->fa_info == fi) {
1243 goto out;
1244 }
1245 }
1246 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1247 fa = fa_orig;
1248 }
1249 err = -ENOENT;
1250 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1251 goto out;
1252
1253 err = -ENOBUFS;
1254 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1255 if (new_fa == NULL)
1256 goto out;
1257
1258 new_fa->fa_info = fi;
1259 new_fa->fa_tos = tos;
1260 new_fa->fa_type = cfg->fc_type;
1261 new_fa->fa_scope = cfg->fc_scope;
1262 new_fa->fa_state = 0;
1263 /*
1264 * Insert new entry to the list.
1265 */
1266
1267 if (!fa_head) {
1268 err = 0;
1269 fa_head = fib_insert_node(t, &err, key, plen);
1270 if (err)
1271 goto out_free_new_fa;
1272 }
1273
1274 list_add_tail_rcu(&new_fa->fa_list,
1275 (fa ? &fa->fa_list : fa_head));
1276
1277 rt_cache_flush(-1);
1278 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1279 &cfg->fc_nlinfo, 0);
1280 succeeded:
1281 return 0;
1282
1283 out_free_new_fa:
1284 kmem_cache_free(fn_alias_kmem, new_fa);
1285 out:
1286 fib_release_info(fi);
1287 err:
1288 return err;
1289 }
1290
1291
1292 /* should be called with rcu_read_lock */
1293 static inline int check_leaf(struct trie *t, struct leaf *l,
1294 t_key key, int *plen, const struct flowi *flp,
1295 struct fib_result *res)
1296 {
1297 int err, i;
1298 __be32 mask;
1299 struct leaf_info *li;
1300 struct hlist_head *hhead = &l->list;
1301 struct hlist_node *node;
1302
1303 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1304 i = li->plen;
1305 mask = inet_make_mask(i);
1306 if (l->key != (key & ntohl(mask)))
1307 continue;
1308
1309 if ((err = fib_semantic_match(&li->falh, flp, res, htonl(l->key), mask, i)) <= 0) {
1310 *plen = i;
1311 #ifdef CONFIG_IP_FIB_TRIE_STATS
1312 t->stats.semantic_match_passed++;
1313 #endif
1314 return err;
1315 }
1316 #ifdef CONFIG_IP_FIB_TRIE_STATS
1317 t->stats.semantic_match_miss++;
1318 #endif
1319 }
1320 return 1;
1321 }
1322
1323 static int
1324 fn_trie_lookup(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1325 {
1326 struct trie *t = (struct trie *) tb->tb_data;
1327 int plen, ret = 0;
1328 struct node *n;
1329 struct tnode *pn;
1330 int pos, bits;
1331 t_key key = ntohl(flp->fl4_dst);
1332 int chopped_off;
1333 t_key cindex = 0;
1334 int current_prefix_length = KEYLENGTH;
1335 struct tnode *cn;
1336 t_key node_prefix, key_prefix, pref_mismatch;
1337 int mp;
1338
1339 rcu_read_lock();
1340
1341 n = rcu_dereference(t->trie);
1342 if (!n)
1343 goto failed;
1344
1345 #ifdef CONFIG_IP_FIB_TRIE_STATS
1346 t->stats.gets++;
1347 #endif
1348
1349 /* Just a leaf? */
1350 if (IS_LEAF(n)) {
1351 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1352 goto found;
1353 goto failed;
1354 }
1355 pn = (struct tnode *) n;
1356 chopped_off = 0;
1357
1358 while (pn) {
1359 pos = pn->pos;
1360 bits = pn->bits;
1361
1362 if (!chopped_off)
1363 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1364 pos, bits);
1365
1366 n = tnode_get_child(pn, cindex);
1367
1368 if (n == NULL) {
1369 #ifdef CONFIG_IP_FIB_TRIE_STATS
1370 t->stats.null_node_hit++;
1371 #endif
1372 goto backtrace;
1373 }
1374
1375 if (IS_LEAF(n)) {
1376 if ((ret = check_leaf(t, (struct leaf *)n, key, &plen, flp, res)) <= 0)
1377 goto found;
1378 else
1379 goto backtrace;
1380 }
1381
1382 #define HL_OPTIMIZE
1383 #ifdef HL_OPTIMIZE
1384 cn = (struct tnode *)n;
1385
1386 /*
1387 * It's a tnode, and we can do some extra checks here if we
1388 * like, to avoid descending into a dead-end branch.
1389 * This tnode is in the parent's child array at index
1390 * key[p_pos..p_pos+p_bits] but potentially with some bits
1391 * chopped off, so in reality the index may be just a
1392 * subprefix, padded with zero at the end.
1393 * We can also take a look at any skipped bits in this
1394 * tnode - everything up to p_pos is supposed to be ok,
1395 * and the non-chopped bits of the index (se previous
1396 * paragraph) are also guaranteed ok, but the rest is
1397 * considered unknown.
1398 *
1399 * The skipped bits are key[pos+bits..cn->pos].
1400 */
1401
1402 /* If current_prefix_length < pos+bits, we are already doing
1403 * actual prefix matching, which means everything from
1404 * pos+(bits-chopped_off) onward must be zero along some
1405 * branch of this subtree - otherwise there is *no* valid
1406 * prefix present. Here we can only check the skipped
1407 * bits. Remember, since we have already indexed into the
1408 * parent's child array, we know that the bits we chopped of
1409 * *are* zero.
1410 */
1411
1412 /* NOTA BENE: CHECKING ONLY SKIPPED BITS FOR THE NEW NODE HERE */
1413
1414 if (current_prefix_length < pos+bits) {
1415 if (tkey_extract_bits(cn->key, current_prefix_length,
1416 cn->pos - current_prefix_length) != 0 ||
1417 !(cn->child[0]))
1418 goto backtrace;
1419 }
1420
1421 /*
1422 * If chopped_off=0, the index is fully validated and we
1423 * only need to look at the skipped bits for this, the new,
1424 * tnode. What we actually want to do is to find out if
1425 * these skipped bits match our key perfectly, or if we will
1426 * have to count on finding a matching prefix further down,
1427 * because if we do, we would like to have some way of
1428 * verifying the existence of such a prefix at this point.
1429 */
1430
1431 /* The only thing we can do at this point is to verify that
1432 * any such matching prefix can indeed be a prefix to our
1433 * key, and if the bits in the node we are inspecting that
1434 * do not match our key are not ZERO, this cannot be true.
1435 * Thus, find out where there is a mismatch (before cn->pos)
1436 * and verify that all the mismatching bits are zero in the
1437 * new tnode's key.
1438 */
1439
1440 /* Note: We aren't very concerned about the piece of the key
1441 * that precede pn->pos+pn->bits, since these have already been
1442 * checked. The bits after cn->pos aren't checked since these are
1443 * by definition "unknown" at this point. Thus, what we want to
1444 * see is if we are about to enter the "prefix matching" state,
1445 * and in that case verify that the skipped bits that will prevail
1446 * throughout this subtree are zero, as they have to be if we are
1447 * to find a matching prefix.
1448 */
1449
1450 node_prefix = mask_pfx(cn->key, cn->pos);
1451 key_prefix = mask_pfx(key, cn->pos);
1452 pref_mismatch = key_prefix^node_prefix;
1453 mp = 0;
1454
1455 /* In short: If skipped bits in this node do not match the search
1456 * key, enter the "prefix matching" state.directly.
1457 */
1458 if (pref_mismatch) {
1459 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1460 mp++;
1461 pref_mismatch = pref_mismatch <<1;
1462 }
1463 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1464
1465 if (key_prefix != 0)
1466 goto backtrace;
1467
1468 if (current_prefix_length >= cn->pos)
1469 current_prefix_length = mp;
1470 }
1471 #endif
1472 pn = (struct tnode *)n; /* Descend */
1473 chopped_off = 0;
1474 continue;
1475
1476 backtrace:
1477 chopped_off++;
1478
1479 /* As zero don't change the child key (cindex) */
1480 while ((chopped_off <= pn->bits) && !(cindex & (1<<(chopped_off-1))))
1481 chopped_off++;
1482
1483 /* Decrease current_... with bits chopped off */
1484 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1485 current_prefix_length = pn->pos + pn->bits - chopped_off;
1486
1487 /*
1488 * Either we do the actual chop off according or if we have
1489 * chopped off all bits in this tnode walk up to our parent.
1490 */
1491
1492 if (chopped_off <= pn->bits) {
1493 cindex &= ~(1 << (chopped_off-1));
1494 } else {
1495 struct tnode *parent = node_parent((struct node *) pn);
1496 if (!parent)
1497 goto failed;
1498
1499 /* Get Child's index */
1500 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1501 pn = parent;
1502 chopped_off = 0;
1503
1504 #ifdef CONFIG_IP_FIB_TRIE_STATS
1505 t->stats.backtrack++;
1506 #endif
1507 goto backtrace;
1508 }
1509 }
1510 failed:
1511 ret = 1;
1512 found:
1513 rcu_read_unlock();
1514 return ret;
1515 }
1516
1517 /* only called from updater side */
1518 static int trie_leaf_remove(struct trie *t, t_key key)
1519 {
1520 t_key cindex;
1521 struct tnode *tp = NULL;
1522 struct node *n = t->trie;
1523 struct leaf *l;
1524
1525 pr_debug("entering trie_leaf_remove(%p)\n", n);
1526
1527 /* Note that in the case skipped bits, those bits are *not* checked!
1528 * When we finish this, we will have NULL or a T_LEAF, and the
1529 * T_LEAF may or may not match our key.
1530 */
1531
1532 while (n != NULL && IS_TNODE(n)) {
1533 struct tnode *tn = (struct tnode *) n;
1534 check_tnode(tn);
1535 n = tnode_get_child(tn ,tkey_extract_bits(key, tn->pos, tn->bits));
1536
1537 BUG_ON(n && node_parent(n) != tn);
1538 }
1539 l = (struct leaf *) n;
1540
1541 if (!n || !tkey_equals(l->key, key))
1542 return 0;
1543
1544 /*
1545 * Key found.
1546 * Remove the leaf and rebalance the tree
1547 */
1548
1549 t->revision++;
1550 t->size--;
1551
1552 tp = node_parent(n);
1553 tnode_free((struct tnode *) n);
1554
1555 if (tp) {
1556 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1557 put_child(t, (struct tnode *)tp, cindex, NULL);
1558 rcu_assign_pointer(t->trie, trie_rebalance(t, tp));
1559 } else
1560 rcu_assign_pointer(t->trie, NULL);
1561
1562 return 1;
1563 }
1564
1565 /*
1566 * Caller must hold RTNL.
1567 */
1568 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1569 {
1570 struct trie *t = (struct trie *) tb->tb_data;
1571 u32 key, mask;
1572 int plen = cfg->fc_dst_len;
1573 u8 tos = cfg->fc_tos;
1574 struct fib_alias *fa, *fa_to_delete;
1575 struct list_head *fa_head;
1576 struct leaf *l;
1577 struct leaf_info *li;
1578
1579 if (plen > 32)
1580 return -EINVAL;
1581
1582 key = ntohl(cfg->fc_dst);
1583 mask = ntohl(inet_make_mask(plen));
1584
1585 if (key & ~mask)
1586 return -EINVAL;
1587
1588 key = key & mask;
1589 l = fib_find_node(t, key);
1590
1591 if (!l)
1592 return -ESRCH;
1593
1594 fa_head = get_fa_head(l, plen);
1595 fa = fib_find_alias(fa_head, tos, 0);
1596
1597 if (!fa)
1598 return -ESRCH;
1599
1600 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1601
1602 fa_to_delete = NULL;
1603 fa_head = fa->fa_list.prev;
1604
1605 list_for_each_entry(fa, fa_head, fa_list) {
1606 struct fib_info *fi = fa->fa_info;
1607
1608 if (fa->fa_tos != tos)
1609 break;
1610
1611 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1612 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1613 fa->fa_scope == cfg->fc_scope) &&
1614 (!cfg->fc_protocol ||
1615 fi->fib_protocol == cfg->fc_protocol) &&
1616 fib_nh_match(cfg, fi) == 0) {
1617 fa_to_delete = fa;
1618 break;
1619 }
1620 }
1621
1622 if (!fa_to_delete)
1623 return -ESRCH;
1624
1625 fa = fa_to_delete;
1626 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1627 &cfg->fc_nlinfo, 0);
1628
1629 l = fib_find_node(t, key);
1630 li = find_leaf_info(l, plen);
1631
1632 list_del_rcu(&fa->fa_list);
1633
1634 if (list_empty(fa_head)) {
1635 hlist_del_rcu(&li->hlist);
1636 free_leaf_info(li);
1637 }
1638
1639 if (hlist_empty(&l->list))
1640 trie_leaf_remove(t, key);
1641
1642 if (fa->fa_state & FA_S_ACCESSED)
1643 rt_cache_flush(-1);
1644
1645 fib_release_info(fa->fa_info);
1646 alias_free_mem_rcu(fa);
1647 return 0;
1648 }
1649
1650 static int trie_flush_list(struct trie *t, struct list_head *head)
1651 {
1652 struct fib_alias *fa, *fa_node;
1653 int found = 0;
1654
1655 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1656 struct fib_info *fi = fa->fa_info;
1657
1658 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1659 list_del_rcu(&fa->fa_list);
1660 fib_release_info(fa->fa_info);
1661 alias_free_mem_rcu(fa);
1662 found++;
1663 }
1664 }
1665 return found;
1666 }
1667
1668 static int trie_flush_leaf(struct trie *t, struct leaf *l)
1669 {
1670 int found = 0;
1671 struct hlist_head *lih = &l->list;
1672 struct hlist_node *node, *tmp;
1673 struct leaf_info *li = NULL;
1674
1675 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1676 found += trie_flush_list(t, &li->falh);
1677
1678 if (list_empty(&li->falh)) {
1679 hlist_del_rcu(&li->hlist);
1680 free_leaf_info(li);
1681 }
1682 }
1683 return found;
1684 }
1685
1686 /* rcu_read_lock needs to be hold by caller from readside */
1687
1688 static struct leaf *nextleaf(struct trie *t, struct leaf *thisleaf)
1689 {
1690 struct node *c = (struct node *) thisleaf;
1691 struct tnode *p;
1692 int idx;
1693 struct node *trie = rcu_dereference(t->trie);
1694
1695 if (c == NULL) {
1696 if (trie == NULL)
1697 return NULL;
1698
1699 if (IS_LEAF(trie)) /* trie w. just a leaf */
1700 return (struct leaf *) trie;
1701
1702 p = (struct tnode*) trie; /* Start */
1703 } else
1704 p = node_parent(c);
1705
1706 while (p) {
1707 int pos, last;
1708
1709 /* Find the next child of the parent */
1710 if (c)
1711 pos = 1 + tkey_extract_bits(c->key, p->pos, p->bits);
1712 else
1713 pos = 0;
1714
1715 last = 1 << p->bits;
1716 for (idx = pos; idx < last ; idx++) {
1717 c = rcu_dereference(p->child[idx]);
1718
1719 if (!c)
1720 continue;
1721
1722 /* Decend if tnode */
1723 while (IS_TNODE(c)) {
1724 p = (struct tnode *) c;
1725 idx = 0;
1726
1727 /* Rightmost non-NULL branch */
1728 if (p && IS_TNODE(p))
1729 while (!(c = rcu_dereference(p->child[idx]))
1730 && idx < (1<<p->bits)) idx++;
1731
1732 /* Done with this tnode? */
1733 if (idx >= (1 << p->bits) || !c)
1734 goto up;
1735 }
1736 return (struct leaf *) c;
1737 }
1738 up:
1739 /* No more children go up one step */
1740 c = (struct node *) p;
1741 p = node_parent(c);
1742 }
1743 return NULL; /* Ready. Root of trie */
1744 }
1745
1746 /*
1747 * Caller must hold RTNL.
1748 */
1749 static int fn_trie_flush(struct fib_table *tb)
1750 {
1751 struct trie *t = (struct trie *) tb->tb_data;
1752 struct leaf *ll = NULL, *l = NULL;
1753 int found = 0, h;
1754
1755 t->revision++;
1756
1757 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1758 found += trie_flush_leaf(t, l);
1759
1760 if (ll && hlist_empty(&ll->list))
1761 trie_leaf_remove(t, ll->key);
1762 ll = l;
1763 }
1764
1765 if (ll && hlist_empty(&ll->list))
1766 trie_leaf_remove(t, ll->key);
1767
1768 pr_debug("trie_flush found=%d\n", found);
1769 return found;
1770 }
1771
1772 static void
1773 fn_trie_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res)
1774 {
1775 struct trie *t = (struct trie *) tb->tb_data;
1776 int order, last_idx;
1777 struct fib_info *fi = NULL;
1778 struct fib_info *last_resort;
1779 struct fib_alias *fa = NULL;
1780 struct list_head *fa_head;
1781 struct leaf *l;
1782
1783 last_idx = -1;
1784 last_resort = NULL;
1785 order = -1;
1786
1787 rcu_read_lock();
1788
1789 l = fib_find_node(t, 0);
1790 if (!l)
1791 goto out;
1792
1793 fa_head = get_fa_head(l, 0);
1794 if (!fa_head)
1795 goto out;
1796
1797 if (list_empty(fa_head))
1798 goto out;
1799
1800 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1801 struct fib_info *next_fi = fa->fa_info;
1802
1803 if (fa->fa_scope != res->scope ||
1804 fa->fa_type != RTN_UNICAST)
1805 continue;
1806
1807 if (next_fi->fib_priority > res->fi->fib_priority)
1808 break;
1809 if (!next_fi->fib_nh[0].nh_gw ||
1810 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1811 continue;
1812 fa->fa_state |= FA_S_ACCESSED;
1813
1814 if (fi == NULL) {
1815 if (next_fi != res->fi)
1816 break;
1817 } else if (!fib_detect_death(fi, order, &last_resort,
1818 &last_idx, tb->tb_default)) {
1819 fib_result_assign(res, fi);
1820 tb->tb_default = order;
1821 goto out;
1822 }
1823 fi = next_fi;
1824 order++;
1825 }
1826 if (order <= 0 || fi == NULL) {
1827 tb->tb_default = -1;
1828 goto out;
1829 }
1830
1831 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1832 tb->tb_default)) {
1833 fib_result_assign(res, fi);
1834 tb->tb_default = order;
1835 goto out;
1836 }
1837 if (last_idx >= 0)
1838 fib_result_assign(res, last_resort);
1839 tb->tb_default = last_idx;
1840 out:
1841 rcu_read_unlock();
1842 }
1843
1844 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah, struct fib_table *tb,
1845 struct sk_buff *skb, struct netlink_callback *cb)
1846 {
1847 int i, s_i;
1848 struct fib_alias *fa;
1849
1850 __be32 xkey = htonl(key);
1851
1852 s_i = cb->args[4];
1853 i = 0;
1854
1855 /* rcu_read_lock is hold by caller */
1856
1857 list_for_each_entry_rcu(fa, fah, fa_list) {
1858 if (i < s_i) {
1859 i++;
1860 continue;
1861 }
1862 BUG_ON(!fa->fa_info);
1863
1864 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1865 cb->nlh->nlmsg_seq,
1866 RTM_NEWROUTE,
1867 tb->tb_id,
1868 fa->fa_type,
1869 fa->fa_scope,
1870 xkey,
1871 plen,
1872 fa->fa_tos,
1873 fa->fa_info, 0) < 0) {
1874 cb->args[4] = i;
1875 return -1;
1876 }
1877 i++;
1878 }
1879 cb->args[4] = i;
1880 return skb->len;
1881 }
1882
1883 static int fn_trie_dump_plen(struct trie *t, int plen, struct fib_table *tb, struct sk_buff *skb,
1884 struct netlink_callback *cb)
1885 {
1886 int h, s_h;
1887 struct list_head *fa_head;
1888 struct leaf *l = NULL;
1889
1890 s_h = cb->args[3];
1891
1892 for (h = 0; (l = nextleaf(t, l)) != NULL; h++) {
1893 if (h < s_h)
1894 continue;
1895 if (h > s_h)
1896 memset(&cb->args[4], 0,
1897 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1898
1899 fa_head = get_fa_head(l, plen);
1900
1901 if (!fa_head)
1902 continue;
1903
1904 if (list_empty(fa_head))
1905 continue;
1906
1907 if (fn_trie_dump_fa(l->key, plen, fa_head, tb, skb, cb)<0) {
1908 cb->args[3] = h;
1909 return -1;
1910 }
1911 }
1912 cb->args[3] = h;
1913 return skb->len;
1914 }
1915
1916 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb)
1917 {
1918 int m, s_m;
1919 struct trie *t = (struct trie *) tb->tb_data;
1920
1921 s_m = cb->args[2];
1922
1923 rcu_read_lock();
1924 for (m = 0; m <= 32; m++) {
1925 if (m < s_m)
1926 continue;
1927 if (m > s_m)
1928 memset(&cb->args[3], 0,
1929 sizeof(cb->args) - 3*sizeof(cb->args[0]));
1930
1931 if (fn_trie_dump_plen(t, 32-m, tb, skb, cb)<0) {
1932 cb->args[2] = m;
1933 goto out;
1934 }
1935 }
1936 rcu_read_unlock();
1937 cb->args[2] = m;
1938 return skb->len;
1939 out:
1940 rcu_read_unlock();
1941 return -1;
1942 }
1943
1944 /* Fix more generic FIB names for init later */
1945
1946 struct fib_table *fib_hash_init(u32 id)
1947 {
1948 struct fib_table *tb;
1949 struct trie *t;
1950
1951 if (fn_alias_kmem == NULL)
1952 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1953 sizeof(struct fib_alias),
1954 0, SLAB_HWCACHE_ALIGN,
1955 NULL);
1956
1957 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1958 GFP_KERNEL);
1959 if (tb == NULL)
1960 return NULL;
1961
1962 tb->tb_id = id;
1963 tb->tb_default = -1;
1964 tb->tb_lookup = fn_trie_lookup;
1965 tb->tb_insert = fn_trie_insert;
1966 tb->tb_delete = fn_trie_delete;
1967 tb->tb_flush = fn_trie_flush;
1968 tb->tb_select_default = fn_trie_select_default;
1969 tb->tb_dump = fn_trie_dump;
1970
1971 t = (struct trie *) tb->tb_data;
1972 memset(t, 0, sizeof(*t));
1973
1974 if (id == RT_TABLE_LOCAL)
1975 printk(KERN_INFO "IPv4 FIB: Using LC-trie version %s\n", VERSION);
1976
1977 return tb;
1978 }
1979
1980 #ifdef CONFIG_PROC_FS
1981 /* Depth first Trie walk iterator */
1982 struct fib_trie_iter {
1983 struct seq_net_private p;
1984 struct trie *trie_local, *trie_main;
1985 struct tnode *tnode;
1986 struct trie *trie;
1987 unsigned index;
1988 unsigned depth;
1989 };
1990
1991 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
1992 {
1993 struct tnode *tn = iter->tnode;
1994 unsigned cindex = iter->index;
1995 struct tnode *p;
1996
1997 /* A single entry routing table */
1998 if (!tn)
1999 return NULL;
2000
2001 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2002 iter->tnode, iter->index, iter->depth);
2003 rescan:
2004 while (cindex < (1<<tn->bits)) {
2005 struct node *n = tnode_get_child(tn, cindex);
2006
2007 if (n) {
2008 if (IS_LEAF(n)) {
2009 iter->tnode = tn;
2010 iter->index = cindex + 1;
2011 } else {
2012 /* push down one level */
2013 iter->tnode = (struct tnode *) n;
2014 iter->index = 0;
2015 ++iter->depth;
2016 }
2017 return n;
2018 }
2019
2020 ++cindex;
2021 }
2022
2023 /* Current node exhausted, pop back up */
2024 p = node_parent((struct node *)tn);
2025 if (p) {
2026 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2027 tn = p;
2028 --iter->depth;
2029 goto rescan;
2030 }
2031
2032 /* got root? */
2033 return NULL;
2034 }
2035
2036 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2037 struct trie *t)
2038 {
2039 struct node *n ;
2040
2041 if (!t)
2042 return NULL;
2043
2044 n = rcu_dereference(t->trie);
2045
2046 if (!iter)
2047 return NULL;
2048
2049 if (n) {
2050 if (IS_TNODE(n)) {
2051 iter->tnode = (struct tnode *) n;
2052 iter->trie = t;
2053 iter->index = 0;
2054 iter->depth = 1;
2055 } else {
2056 iter->tnode = NULL;
2057 iter->trie = t;
2058 iter->index = 0;
2059 iter->depth = 0;
2060 }
2061 return n;
2062 }
2063 return NULL;
2064 }
2065
2066 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2067 {
2068 struct node *n;
2069 struct fib_trie_iter iter;
2070
2071 memset(s, 0, sizeof(*s));
2072
2073 rcu_read_lock();
2074 for (n = fib_trie_get_first(&iter, t); n;
2075 n = fib_trie_get_next(&iter)) {
2076 if (IS_LEAF(n)) {
2077 s->leaves++;
2078 s->totdepth += iter.depth;
2079 if (iter.depth > s->maxdepth)
2080 s->maxdepth = iter.depth;
2081 } else {
2082 const struct tnode *tn = (const struct tnode *) n;
2083 int i;
2084
2085 s->tnodes++;
2086 if (tn->bits < MAX_STAT_DEPTH)
2087 s->nodesizes[tn->bits]++;
2088
2089 for (i = 0; i < (1<<tn->bits); i++)
2090 if (!tn->child[i])
2091 s->nullpointers++;
2092 }
2093 }
2094 rcu_read_unlock();
2095 }
2096
2097 /*
2098 * This outputs /proc/net/fib_triestats
2099 */
2100 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2101 {
2102 unsigned i, max, pointers, bytes, avdepth;
2103
2104 if (stat->leaves)
2105 avdepth = stat->totdepth*100 / stat->leaves;
2106 else
2107 avdepth = 0;
2108
2109 seq_printf(seq, "\tAver depth: %d.%02d\n", avdepth / 100, avdepth % 100 );
2110 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2111
2112 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2113
2114 bytes = sizeof(struct leaf) * stat->leaves;
2115 seq_printf(seq, "\tInternal nodes: %d\n\t", stat->tnodes);
2116 bytes += sizeof(struct tnode) * stat->tnodes;
2117
2118 max = MAX_STAT_DEPTH;
2119 while (max > 0 && stat->nodesizes[max-1] == 0)
2120 max--;
2121
2122 pointers = 0;
2123 for (i = 1; i <= max; i++)
2124 if (stat->nodesizes[i] != 0) {
2125 seq_printf(seq, " %d: %d", i, stat->nodesizes[i]);
2126 pointers += (1<<i) * stat->nodesizes[i];
2127 }
2128 seq_putc(seq, '\n');
2129 seq_printf(seq, "\tPointers: %d\n", pointers);
2130
2131 bytes += sizeof(struct node *) * pointers;
2132 seq_printf(seq, "Null ptrs: %d\n", stat->nullpointers);
2133 seq_printf(seq, "Total size: %d kB\n", (bytes + 1023) / 1024);
2134
2135 #ifdef CONFIG_IP_FIB_TRIE_STATS
2136 seq_printf(seq, "Counters:\n---------\n");
2137 seq_printf(seq,"gets = %d\n", t->stats.gets);
2138 seq_printf(seq,"backtracks = %d\n", t->stats.backtrack);
2139 seq_printf(seq,"semantic match passed = %d\n", t->stats.semantic_match_passed);
2140 seq_printf(seq,"semantic match miss = %d\n", t->stats.semantic_match_miss);
2141 seq_printf(seq,"null node hit= %d\n", t->stats.null_node_hit);
2142 seq_printf(seq,"skipped node resize = %d\n", t->stats.resize_node_skipped);
2143 #ifdef CLEAR_STATS
2144 memset(&(t->stats), 0, sizeof(t->stats));
2145 #endif
2146 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2147 }
2148
2149 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2150 {
2151 struct net *net = (struct net *)seq->private;
2152 struct trie *trie_local, *trie_main;
2153 struct trie_stat *stat;
2154 struct fib_table *tb;
2155
2156 trie_local = NULL;
2157 tb = fib_get_table(net, RT_TABLE_LOCAL);
2158 if (tb)
2159 trie_local = (struct trie *) tb->tb_data;
2160
2161 trie_main = NULL;
2162 tb = fib_get_table(net, RT_TABLE_MAIN);
2163 if (tb)
2164 trie_main = (struct trie *) tb->tb_data;
2165
2166
2167 stat = kmalloc(sizeof(*stat), GFP_KERNEL);
2168 if (!stat)
2169 return -ENOMEM;
2170
2171 seq_printf(seq, "Basic info: size of leaf: %Zd bytes, size of tnode: %Zd bytes.\n",
2172 sizeof(struct leaf), sizeof(struct tnode));
2173
2174 if (trie_local) {
2175 seq_printf(seq, "Local:\n");
2176 trie_collect_stats(trie_local, stat);
2177 trie_show_stats(seq, stat);
2178 }
2179
2180 if (trie_main) {
2181 seq_printf(seq, "Main:\n");
2182 trie_collect_stats(trie_main, stat);
2183 trie_show_stats(seq, stat);
2184 }
2185 kfree(stat);
2186
2187 return 0;
2188 }
2189
2190 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2191 {
2192 int err;
2193 struct net *net;
2194
2195 net = get_proc_net(inode);
2196 if (net == NULL)
2197 return -ENXIO;
2198 err = single_open(file, fib_triestat_seq_show, net);
2199 if (err < 0) {
2200 put_net(net);
2201 return err;
2202 }
2203 return 0;
2204 }
2205
2206 static int fib_triestat_seq_release(struct inode *ino, struct file *f)
2207 {
2208 struct seq_file *seq = f->private_data;
2209 put_net(seq->private);
2210 return single_release(ino, f);
2211 }
2212
2213 static const struct file_operations fib_triestat_fops = {
2214 .owner = THIS_MODULE,
2215 .open = fib_triestat_seq_open,
2216 .read = seq_read,
2217 .llseek = seq_lseek,
2218 .release = fib_triestat_seq_release,
2219 };
2220
2221 static struct node *fib_trie_get_idx(struct fib_trie_iter *iter,
2222 loff_t pos)
2223 {
2224 loff_t idx = 0;
2225 struct node *n;
2226
2227 for (n = fib_trie_get_first(iter, iter->trie_local);
2228 n; ++idx, n = fib_trie_get_next(iter)) {
2229 if (pos == idx)
2230 return n;
2231 }
2232
2233 for (n = fib_trie_get_first(iter, iter->trie_main);
2234 n; ++idx, n = fib_trie_get_next(iter)) {
2235 if (pos == idx)
2236 return n;
2237 }
2238 return NULL;
2239 }
2240
2241 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2242 {
2243 struct fib_trie_iter *iter = seq->private;
2244 struct fib_table *tb;
2245
2246 if (!iter->trie_local) {
2247 tb = fib_get_table(iter->p.net, RT_TABLE_LOCAL);
2248 if (tb)
2249 iter->trie_local = (struct trie *) tb->tb_data;
2250 }
2251 if (!iter->trie_main) {
2252 tb = fib_get_table(iter->p.net, RT_TABLE_MAIN);
2253 if (tb)
2254 iter->trie_main = (struct trie *) tb->tb_data;
2255 }
2256 rcu_read_lock();
2257 if (*pos == 0)
2258 return SEQ_START_TOKEN;
2259 return fib_trie_get_idx(iter, *pos - 1);
2260 }
2261
2262 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2263 {
2264 struct fib_trie_iter *iter = seq->private;
2265 void *l = v;
2266
2267 ++*pos;
2268 if (v == SEQ_START_TOKEN)
2269 return fib_trie_get_idx(iter, 0);
2270
2271 v = fib_trie_get_next(iter);
2272 BUG_ON(v == l);
2273 if (v)
2274 return v;
2275
2276 /* continue scan in next trie */
2277 if (iter->trie == iter->trie_local)
2278 return fib_trie_get_first(iter, iter->trie_main);
2279
2280 return NULL;
2281 }
2282
2283 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2284 {
2285 rcu_read_unlock();
2286 }
2287
2288 static void seq_indent(struct seq_file *seq, int n)
2289 {
2290 while (n-- > 0) seq_puts(seq, " ");
2291 }
2292
2293 static inline const char *rtn_scope(enum rt_scope_t s)
2294 {
2295 static char buf[32];
2296
2297 switch (s) {
2298 case RT_SCOPE_UNIVERSE: return "universe";
2299 case RT_SCOPE_SITE: return "site";
2300 case RT_SCOPE_LINK: return "link";
2301 case RT_SCOPE_HOST: return "host";
2302 case RT_SCOPE_NOWHERE: return "nowhere";
2303 default:
2304 snprintf(buf, sizeof(buf), "scope=%d", s);
2305 return buf;
2306 }
2307 }
2308
2309 static const char *rtn_type_names[__RTN_MAX] = {
2310 [RTN_UNSPEC] = "UNSPEC",
2311 [RTN_UNICAST] = "UNICAST",
2312 [RTN_LOCAL] = "LOCAL",
2313 [RTN_BROADCAST] = "BROADCAST",
2314 [RTN_ANYCAST] = "ANYCAST",
2315 [RTN_MULTICAST] = "MULTICAST",
2316 [RTN_BLACKHOLE] = "BLACKHOLE",
2317 [RTN_UNREACHABLE] = "UNREACHABLE",
2318 [RTN_PROHIBIT] = "PROHIBIT",
2319 [RTN_THROW] = "THROW",
2320 [RTN_NAT] = "NAT",
2321 [RTN_XRESOLVE] = "XRESOLVE",
2322 };
2323
2324 static inline const char *rtn_type(unsigned t)
2325 {
2326 static char buf[32];
2327
2328 if (t < __RTN_MAX && rtn_type_names[t])
2329 return rtn_type_names[t];
2330 snprintf(buf, sizeof(buf), "type %d", t);
2331 return buf;
2332 }
2333
2334 /* Pretty print the trie */
2335 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2336 {
2337 const struct fib_trie_iter *iter = seq->private;
2338 struct node *n = v;
2339
2340 if (v == SEQ_START_TOKEN)
2341 return 0;
2342
2343 if (!node_parent(n)) {
2344 if (iter->trie == iter->trie_local)
2345 seq_puts(seq, "<local>:\n");
2346 else
2347 seq_puts(seq, "<main>:\n");
2348 }
2349
2350 if (IS_TNODE(n)) {
2351 struct tnode *tn = (struct tnode *) n;
2352 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2353
2354 seq_indent(seq, iter->depth-1);
2355 seq_printf(seq, " +-- %d.%d.%d.%d/%d %d %d %d\n",
2356 NIPQUAD(prf), tn->pos, tn->bits, tn->full_children,
2357 tn->empty_children);
2358
2359 } else {
2360 struct leaf *l = (struct leaf *) n;
2361 int i;
2362 __be32 val = htonl(l->key);
2363
2364 seq_indent(seq, iter->depth);
2365 seq_printf(seq, " |-- %d.%d.%d.%d\n", NIPQUAD(val));
2366 for (i = 32; i >= 0; i--) {
2367 struct leaf_info *li = find_leaf_info(l, i);
2368 if (li) {
2369 struct fib_alias *fa;
2370 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2371 seq_indent(seq, iter->depth+1);
2372 seq_printf(seq, " /%d %s %s", i,
2373 rtn_scope(fa->fa_scope),
2374 rtn_type(fa->fa_type));
2375 if (fa->fa_tos)
2376 seq_printf(seq, "tos =%d\n",
2377 fa->fa_tos);
2378 seq_putc(seq, '\n');
2379 }
2380 }
2381 }
2382 }
2383
2384 return 0;
2385 }
2386
2387 static const struct seq_operations fib_trie_seq_ops = {
2388 .start = fib_trie_seq_start,
2389 .next = fib_trie_seq_next,
2390 .stop = fib_trie_seq_stop,
2391 .show = fib_trie_seq_show,
2392 };
2393
2394 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2395 {
2396 return seq_open_net(inode, file, &fib_trie_seq_ops,
2397 sizeof(struct fib_trie_iter));
2398 }
2399
2400 static const struct file_operations fib_trie_fops = {
2401 .owner = THIS_MODULE,
2402 .open = fib_trie_seq_open,
2403 .read = seq_read,
2404 .llseek = seq_lseek,
2405 .release = seq_release_net,
2406 };
2407
2408 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2409 {
2410 static unsigned type2flags[RTN_MAX + 1] = {
2411 [7] = RTF_REJECT, [8] = RTF_REJECT,
2412 };
2413 unsigned flags = type2flags[type];
2414
2415 if (fi && fi->fib_nh->nh_gw)
2416 flags |= RTF_GATEWAY;
2417 if (mask == htonl(0xFFFFFFFF))
2418 flags |= RTF_HOST;
2419 flags |= RTF_UP;
2420 return flags;
2421 }
2422
2423 /*
2424 * This outputs /proc/net/route.
2425 * The format of the file is not supposed to be changed
2426 * and needs to be same as fib_hash output to avoid breaking
2427 * legacy utilities
2428 */
2429 static int fib_route_seq_show(struct seq_file *seq, void *v)
2430 {
2431 const struct fib_trie_iter *iter = seq->private;
2432 struct leaf *l = v;
2433 int i;
2434 char bf[128];
2435
2436 if (v == SEQ_START_TOKEN) {
2437 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2438 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2439 "\tWindow\tIRTT");
2440 return 0;
2441 }
2442
2443 if (iter->trie == iter->trie_local)
2444 return 0;
2445 if (IS_TNODE(l))
2446 return 0;
2447
2448 for (i=32; i>=0; i--) {
2449 struct leaf_info *li = find_leaf_info(l, i);
2450 struct fib_alias *fa;
2451 __be32 mask, prefix;
2452
2453 if (!li)
2454 continue;
2455
2456 mask = inet_make_mask(li->plen);
2457 prefix = htonl(l->key);
2458
2459 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2460 const struct fib_info *fi = fa->fa_info;
2461 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2462
2463 if (fa->fa_type == RTN_BROADCAST
2464 || fa->fa_type == RTN_MULTICAST)
2465 continue;
2466
2467 if (fi)
2468 snprintf(bf, sizeof(bf),
2469 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2470 fi->fib_dev ? fi->fib_dev->name : "*",
2471 prefix,
2472 fi->fib_nh->nh_gw, flags, 0, 0,
2473 fi->fib_priority,
2474 mask,
2475 (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
2476 fi->fib_window,
2477 fi->fib_rtt >> 3);
2478 else
2479 snprintf(bf, sizeof(bf),
2480 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
2481 prefix, 0, flags, 0, 0, 0,
2482 mask, 0, 0, 0);
2483
2484 seq_printf(seq, "%-127s\n", bf);
2485 }
2486 }
2487
2488 return 0;
2489 }
2490
2491 static const struct seq_operations fib_route_seq_ops = {
2492 .start = fib_trie_seq_start,
2493 .next = fib_trie_seq_next,
2494 .stop = fib_trie_seq_stop,
2495 .show = fib_route_seq_show,
2496 };
2497
2498 static int fib_route_seq_open(struct inode *inode, struct file *file)
2499 {
2500 return seq_open_net(inode, file, &fib_route_seq_ops,
2501 sizeof(struct fib_trie_iter));
2502 }
2503
2504 static const struct file_operations fib_route_fops = {
2505 .owner = THIS_MODULE,
2506 .open = fib_route_seq_open,
2507 .read = seq_read,
2508 .llseek = seq_lseek,
2509 .release = seq_release_net,
2510 };
2511
2512 int __net_init fib_proc_init(struct net *net)
2513 {
2514 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2515 goto out1;
2516
2517 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2518 &fib_triestat_fops))
2519 goto out2;
2520
2521 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2522 goto out3;
2523
2524 return 0;
2525
2526 out3:
2527 proc_net_remove(net, "fib_triestat");
2528 out2:
2529 proc_net_remove(net, "fib_trie");
2530 out1:
2531 return -ENOMEM;
2532 }
2533
2534 void __net_exit fib_proc_exit(struct net *net)
2535 {
2536 proc_net_remove(net, "fib_trie");
2537 proc_net_remove(net, "fib_triestat");
2538 proc_net_remove(net, "route");
2539 }
2540
2541 #endif /* CONFIG_PROC_FS */
This page took 0.085345 seconds and 6 git commands to generate.