Merge branch 'mlx4-next'
[deliverable/linux.git] / net / ipv4 / fib_trie.c
CommitLineData
19baf839
RO
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
e905a9ed 10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
19baf839 11 * Agricultural Sciences.
e905a9ed 12 *
19baf839
RO
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
25985edc 15 * This work is based on the LPC-trie which is originally described in:
e905a9ed 16 *
19baf839
RO
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
631dd1a8 19 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
19baf839
RO
20 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
19baf839
RO
25 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
fd966255
RO
42 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
19baf839
RO
49 */
50
80b71b80 51#define VERSION "0.409"
19baf839 52
19baf839 53#include <asm/uaccess.h>
1977f032 54#include <linux/bitops.h>
19baf839
RO
55#include <linux/types.h>
56#include <linux/kernel.h>
19baf839
RO
57#include <linux/mm.h>
58#include <linux/string.h>
59#include <linux/socket.h>
60#include <linux/sockios.h>
61#include <linux/errno.h>
62#include <linux/in.h>
63#include <linux/inet.h>
cd8787ab 64#include <linux/inetdevice.h>
19baf839
RO
65#include <linux/netdevice.h>
66#include <linux/if_arp.h>
67#include <linux/proc_fs.h>
2373ce1c 68#include <linux/rcupdate.h>
19baf839
RO
69#include <linux/skbuff.h>
70#include <linux/netlink.h>
71#include <linux/init.h>
72#include <linux/list.h>
5a0e3ad6 73#include <linux/slab.h>
bc3b2d7f 74#include <linux/export.h>
457c4cbc 75#include <net/net_namespace.h>
19baf839
RO
76#include <net/ip.h>
77#include <net/protocol.h>
78#include <net/route.h>
79#include <net/tcp.h>
80#include <net/sock.h>
81#include <net/ip_fib.h>
82#include "fib_lookup.h"
83
06ef921d 84#define MAX_STAT_DEPTH 32
19baf839 85
19baf839 86#define KEYLENGTH (8*sizeof(t_key))
19baf839 87
19baf839
RO
88typedef unsigned int t_key;
89
64c9b6fb
AD
90#define IS_TNODE(n) ((n)->bits)
91#define IS_LEAF(n) (!(n)->bits)
2373ce1c 92
e9b44019 93#define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
9f9e636d 94
64c9b6fb
AD
95struct tnode {
96 t_key key;
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
5405afd1 99 unsigned char slen;
64c9b6fb 100 struct tnode __rcu *parent;
37fd30f2 101 struct rcu_head rcu;
adaf9816
AD
102 union {
103 /* The fields in this struct are valid if bits > 0 (TNODE) */
104 struct {
105 unsigned int full_children; /* KEYLENGTH bits needed */
106 unsigned int empty_children; /* KEYLENGTH bits needed */
107 struct tnode __rcu *child[0];
108 };
109 /* This list pointer if valid if bits == 0 (LEAF) */
110 struct hlist_head list;
111 };
19baf839
RO
112};
113
114struct leaf_info {
115 struct hlist_node hlist;
116 int plen;
5c74501f 117 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
19baf839 118 struct list_head falh;
5c74501f 119 struct rcu_head rcu;
19baf839
RO
120};
121
19baf839
RO
122#ifdef CONFIG_IP_FIB_TRIE_STATS
123struct trie_use_stats {
124 unsigned int gets;
125 unsigned int backtrack;
126 unsigned int semantic_match_passed;
127 unsigned int semantic_match_miss;
128 unsigned int null_node_hit;
2f36895a 129 unsigned int resize_node_skipped;
19baf839
RO
130};
131#endif
132
133struct trie_stat {
134 unsigned int totdepth;
135 unsigned int maxdepth;
136 unsigned int tnodes;
137 unsigned int leaves;
138 unsigned int nullpointers;
93672292 139 unsigned int prefixes;
06ef921d 140 unsigned int nodesizes[MAX_STAT_DEPTH];
c877efb2 141};
19baf839
RO
142
143struct trie {
adaf9816 144 struct tnode __rcu *trie;
19baf839 145#ifdef CONFIG_IP_FIB_TRIE_STATS
8274a97a 146 struct trie_use_stats __percpu *stats;
19baf839 147#endif
19baf839
RO
148};
149
ff181ed8 150static void resize(struct trie *t, struct tnode *tn);
c3059477
JP
151static size_t tnode_free_size;
152
153/*
154 * synchronize_rcu after call_rcu for that many pages; it should be especially
155 * useful before resizing the root node with PREEMPT_NONE configs; the value was
156 * obtained experimentally, aiming to avoid visible slowdown.
157 */
158static const int sync_pages = 128;
19baf839 159
e18b890b 160static struct kmem_cache *fn_alias_kmem __read_mostly;
bc3c8c1e 161static struct kmem_cache *trie_leaf_kmem __read_mostly;
19baf839 162
64c9b6fb
AD
163/* caller must hold RTNL */
164#define node_parent(n) rtnl_dereference((n)->parent)
0a5c0475 165
64c9b6fb
AD
166/* caller must hold RCU read lock or RTNL */
167#define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
0a5c0475 168
64c9b6fb 169/* wrapper for rcu_assign_pointer */
adaf9816 170static inline void node_set_parent(struct tnode *n, struct tnode *tp)
b59cfbf7 171{
adaf9816
AD
172 if (n)
173 rcu_assign_pointer(n->parent, tp);
06801916
SH
174}
175
64c9b6fb
AD
176#define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
177
178/* This provides us with the number of children in this node, in the case of a
179 * leaf this will return 0 meaning none of the children are accessible.
6440cc9e 180 */
98293e8d 181static inline unsigned long tnode_child_length(const struct tnode *tn)
06801916 182{
64c9b6fb 183 return (1ul << tn->bits) & ~(1ul);
06801916 184}
2373ce1c 185
98293e8d
AD
186/* caller must hold RTNL */
187static inline struct tnode *tnode_get_child(const struct tnode *tn,
188 unsigned long i)
b59cfbf7 189{
0a5c0475 190 return rtnl_dereference(tn->child[i]);
b59cfbf7
ED
191}
192
98293e8d
AD
193/* caller must hold RCU read lock or RTNL */
194static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
195 unsigned long i)
19baf839 196{
0a5c0475 197 return rcu_dereference_rtnl(tn->child[i]);
19baf839
RO
198}
199
e9b44019
AD
200/* To understand this stuff, an understanding of keys and all their bits is
201 * necessary. Every node in the trie has a key associated with it, but not
202 * all of the bits in that key are significant.
203 *
204 * Consider a node 'n' and its parent 'tp'.
205 *
206 * If n is a leaf, every bit in its key is significant. Its presence is
207 * necessitated by path compression, since during a tree traversal (when
208 * searching for a leaf - unless we are doing an insertion) we will completely
209 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
210 * a potentially successful search, that we have indeed been walking the
211 * correct key path.
212 *
213 * Note that we can never "miss" the correct key in the tree if present by
214 * following the wrong path. Path compression ensures that segments of the key
215 * that are the same for all keys with a given prefix are skipped, but the
216 * skipped part *is* identical for each node in the subtrie below the skipped
217 * bit! trie_insert() in this implementation takes care of that.
218 *
219 * if n is an internal node - a 'tnode' here, the various parts of its key
220 * have many different meanings.
221 *
222 * Example:
223 * _________________________________________________________________
224 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
225 * -----------------------------------------------------------------
226 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
227 *
228 * _________________________________________________________________
229 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
230 * -----------------------------------------------------------------
231 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
232 *
233 * tp->pos = 22
234 * tp->bits = 3
235 * n->pos = 13
236 * n->bits = 4
237 *
238 * First, let's just ignore the bits that come before the parent tp, that is
239 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
240 * point we do not use them for anything.
241 *
242 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
243 * index into the parent's child array. That is, they will be used to find
244 * 'n' among tp's children.
245 *
246 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
247 * for the node n.
248 *
249 * All the bits we have seen so far are significant to the node n. The rest
250 * of the bits are really not needed or indeed known in n->key.
251 *
252 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
253 * n's child array, and will of course be different for each child.
254 *
255 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
256 * at this point.
257 */
19baf839 258
f5026fab
DL
259static const int halve_threshold = 25;
260static const int inflate_threshold = 50;
345aa031 261static const int halve_threshold_root = 15;
80b71b80 262static const int inflate_threshold_root = 30;
2373ce1c
RO
263
264static void __alias_free_mem(struct rcu_head *head)
19baf839 265{
2373ce1c
RO
266 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
267 kmem_cache_free(fn_alias_kmem, fa);
19baf839
RO
268}
269
2373ce1c 270static inline void alias_free_mem_rcu(struct fib_alias *fa)
19baf839 271{
2373ce1c
RO
272 call_rcu(&fa->rcu, __alias_free_mem);
273}
91b9a277 274
37fd30f2 275#define TNODE_KMALLOC_MAX \
adaf9816 276 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
91b9a277 277
37fd30f2 278static void __node_free_rcu(struct rcu_head *head)
387a5487 279{
adaf9816 280 struct tnode *n = container_of(head, struct tnode, rcu);
37fd30f2
AD
281
282 if (IS_LEAF(n))
283 kmem_cache_free(trie_leaf_kmem, n);
284 else if (n->bits <= TNODE_KMALLOC_MAX)
285 kfree(n);
286 else
287 vfree(n);
387a5487
SH
288}
289
37fd30f2
AD
290#define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
291
2373ce1c 292static inline void free_leaf_info(struct leaf_info *leaf)
19baf839 293{
bceb0f45 294 kfree_rcu(leaf, rcu);
19baf839
RO
295}
296
8d965444 297static struct tnode *tnode_alloc(size_t size)
f0e36f8c 298{
2373ce1c 299 if (size <= PAGE_SIZE)
8d965444 300 return kzalloc(size, GFP_KERNEL);
15be75cd 301 else
7a1c8e5a 302 return vzalloc(size);
15be75cd 303}
2373ce1c 304
adaf9816 305static struct tnode *leaf_new(t_key key)
2373ce1c 306{
adaf9816 307 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
2373ce1c 308 if (l) {
64c9b6fb
AD
309 l->parent = NULL;
310 /* set key and pos to reflect full key value
311 * any trailing zeros in the key should be ignored
312 * as the nodes are searched
313 */
314 l->key = key;
5405afd1 315 l->slen = 0;
e9b44019 316 l->pos = 0;
64c9b6fb
AD
317 /* set bits to 0 indicating we are not a tnode */
318 l->bits = 0;
319
2373ce1c
RO
320 INIT_HLIST_HEAD(&l->list);
321 }
322 return l;
323}
324
325static struct leaf_info *leaf_info_new(int plen)
326{
327 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
328 if (li) {
329 li->plen = plen;
5c74501f 330 li->mask_plen = ntohl(inet_make_mask(plen));
2373ce1c
RO
331 INIT_LIST_HEAD(&li->falh);
332 }
333 return li;
334}
335
a07f5f50 336static struct tnode *tnode_new(t_key key, int pos, int bits)
19baf839 337{
37fd30f2 338 size_t sz = offsetof(struct tnode, child[1 << bits]);
f0e36f8c 339 struct tnode *tn = tnode_alloc(sz);
64c9b6fb
AD
340 unsigned int shift = pos + bits;
341
342 /* verify bits and pos their msb bits clear and values are valid */
343 BUG_ON(!bits || (shift > KEYLENGTH));
19baf839 344
91b9a277 345 if (tn) {
64c9b6fb 346 tn->parent = NULL;
5405afd1 347 tn->slen = pos;
19baf839
RO
348 tn->pos = pos;
349 tn->bits = bits;
e9b44019 350 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
19baf839
RO
351 tn->full_children = 0;
352 tn->empty_children = 1<<bits;
353 }
c877efb2 354
a034ee3c 355 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
adaf9816 356 sizeof(struct tnode *) << bits);
19baf839
RO
357 return tn;
358}
359
e9b44019 360/* Check whether a tnode 'n' is "full", i.e. it is an internal node
19baf839
RO
361 * and no bits are skipped. See discussion in dyntree paper p. 6
362 */
adaf9816 363static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
19baf839 364{
e9b44019 365 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
19baf839
RO
366}
367
ff181ed8
AD
368/* Add a child at position i overwriting the old value.
369 * Update the value of full_children and empty_children.
370 */
371static void put_child(struct tnode *tn, unsigned long i, struct tnode *n)
19baf839 372{
21d1f11d 373 struct tnode *chi = tnode_get_child(tn, i);
ff181ed8 374 int isfull, wasfull;
19baf839 375
98293e8d 376 BUG_ON(i >= tnode_child_length(tn));
0c7770c7 377
19baf839
RO
378 /* update emptyChildren */
379 if (n == NULL && chi != NULL)
380 tn->empty_children++;
381 else if (n != NULL && chi == NULL)
382 tn->empty_children--;
c877efb2 383
19baf839 384 /* update fullChildren */
ff181ed8 385 wasfull = tnode_full(tn, chi);
19baf839 386 isfull = tnode_full(tn, n);
ff181ed8 387
c877efb2 388 if (wasfull && !isfull)
19baf839 389 tn->full_children--;
c877efb2 390 else if (!wasfull && isfull)
19baf839 391 tn->full_children++;
91b9a277 392
5405afd1
AD
393 if (n && (tn->slen < n->slen))
394 tn->slen = n->slen;
395
cf778b00 396 rcu_assign_pointer(tn->child[i], n);
19baf839
RO
397}
398
836a0123
AD
399static void put_child_root(struct tnode *tp, struct trie *t,
400 t_key key, struct tnode *n)
401{
402 if (tp)
403 put_child(tp, get_index(key, tp), n);
404 else
405 rcu_assign_pointer(t->trie, n);
406}
407
fc86a93b 408static inline void tnode_free_init(struct tnode *tn)
0a5c0475 409{
fc86a93b
AD
410 tn->rcu.next = NULL;
411}
412
413static inline void tnode_free_append(struct tnode *tn, struct tnode *n)
414{
415 n->rcu.next = tn->rcu.next;
416 tn->rcu.next = &n->rcu;
417}
0a5c0475 418
fc86a93b
AD
419static void tnode_free(struct tnode *tn)
420{
421 struct callback_head *head = &tn->rcu;
422
423 while (head) {
424 head = head->next;
425 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
426 node_free(tn);
427
428 tn = container_of(head, struct tnode, rcu);
429 }
430
431 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
432 tnode_free_size = 0;
433 synchronize_rcu();
0a5c0475 434 }
0a5c0475
ED
435}
436
ff181ed8 437static int inflate(struct trie *t, struct tnode *oldtnode)
19baf839 438{
12c081a5
AD
439 struct tnode *inode, *node0, *node1, *tn, *tp;
440 unsigned long i, j, k;
e9b44019 441 t_key m;
19baf839 442
0c7770c7 443 pr_debug("In inflate\n");
19baf839 444
e9b44019 445 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
0c7770c7 446 if (!tn)
ff181ed8 447 return -ENOMEM;
2f36895a 448
12c081a5
AD
449 /* Assemble all of the pointers in our cluster, in this case that
450 * represents all of the pointers out of our allocated nodes that
451 * point to existing tnodes and the links between our allocated
452 * nodes.
2f36895a 453 */
12c081a5
AD
454 for (i = tnode_child_length(oldtnode), m = 1u << tn->pos; i;) {
455 inode = tnode_get_child(oldtnode, --i);
c877efb2 456
19baf839 457 /* An empty child */
adaf9816 458 if (inode == NULL)
19baf839
RO
459 continue;
460
461 /* A leaf or an internal node with skipped bits */
adaf9816 462 if (!tnode_full(oldtnode, inode)) {
e9b44019 463 put_child(tn, get_index(inode->key, tn), inode);
19baf839
RO
464 continue;
465 }
466
467 /* An internal node with two children */
19baf839 468 if (inode->bits == 1) {
12c081a5
AD
469 put_child(tn, 2 * i + 1, tnode_get_child(inode, 1));
470 put_child(tn, 2 * i, tnode_get_child(inode, 0));
91b9a277 471 continue;
19baf839
RO
472 }
473
91b9a277 474 /* We will replace this node 'inode' with two new
12c081a5 475 * ones, 'node0' and 'node1', each with half of the
91b9a277
OJ
476 * original children. The two new nodes will have
477 * a position one bit further down the key and this
478 * means that the "significant" part of their keys
479 * (see the discussion near the top of this file)
480 * will differ by one bit, which will be "0" in
12c081a5 481 * node0's key and "1" in node1's key. Since we are
91b9a277
OJ
482 * moving the key position by one step, the bit that
483 * we are moving away from - the bit at position
12c081a5
AD
484 * (tn->pos) - is the one that will differ between
485 * node0 and node1. So... we synthesize that bit in the
486 * two new keys.
91b9a277 487 */
12c081a5
AD
488 node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
489 if (!node1)
490 goto nomem;
491 tnode_free_append(tn, node1);
492
493 node0 = tnode_new(inode->key & ~m, inode->pos, inode->bits - 1);
494 if (!node0)
495 goto nomem;
496 tnode_free_append(tn, node0);
497
498 /* populate child pointers in new nodes */
499 for (k = tnode_child_length(inode), j = k / 2; j;) {
500 put_child(node1, --j, tnode_get_child(inode, --k));
501 put_child(node0, j, tnode_get_child(inode, j));
502 put_child(node1, --j, tnode_get_child(inode, --k));
503 put_child(node0, j, tnode_get_child(inode, j));
504 }
19baf839 505
12c081a5
AD
506 /* link new nodes to parent */
507 NODE_INIT_PARENT(node1, tn);
508 NODE_INIT_PARENT(node0, tn);
2f36895a 509
12c081a5
AD
510 /* link parent to nodes */
511 put_child(tn, 2 * i + 1, node1);
512 put_child(tn, 2 * i, node0);
513 }
2f36895a 514
12c081a5
AD
515 /* setup the parent pointer into and out of this node */
516 tp = node_parent(oldtnode);
517 NODE_INIT_PARENT(tn, tp);
518 put_child_root(tp, t, tn->key, tn);
519
520 /* prepare oldtnode to be freed */
521 tnode_free_init(oldtnode);
522
523 /* update all child nodes parent pointers to route to us */
524 for (i = tnode_child_length(oldtnode); i;) {
525 inode = tnode_get_child(oldtnode, --i);
526
527 /* A leaf or an internal node with skipped bits */
528 if (!tnode_full(oldtnode, inode)) {
529 node_set_parent(inode, tn);
530 continue;
531 }
2f36895a 532
12c081a5
AD
533 /* drop the node in the old tnode free list */
534 tnode_free_append(oldtnode, inode);
19baf839 535
12c081a5
AD
536 /* fetch new nodes */
537 node1 = tnode_get_child(tn, 2 * i + 1);
538 node0 = tnode_get_child(tn, 2 * i);
19baf839 539
12c081a5
AD
540 /* bits == 1 then node0 and node1 represent inode's children */
541 if (inode->bits == 1) {
542 node_set_parent(node1, tn);
543 node_set_parent(node0, tn);
544 continue;
19baf839 545 }
ff181ed8 546
12c081a5
AD
547 /* update parent pointers in child node's children */
548 for (k = tnode_child_length(inode), j = k / 2; j;) {
549 node_set_parent(tnode_get_child(inode, --k), node1);
550 node_set_parent(tnode_get_child(inode, --j), node0);
551 node_set_parent(tnode_get_child(inode, --k), node1);
552 node_set_parent(tnode_get_child(inode, --j), node0);
553 }
91b9a277 554
fc86a93b 555 /* resize child nodes */
12c081a5
AD
556 resize(t, node1);
557 resize(t, node0);
19baf839 558 }
ff181ed8 559
fc86a93b
AD
560 /* we completed without error, prepare to free old node */
561 tnode_free(oldtnode);
ff181ed8 562 return 0;
2f80b3c8 563nomem:
fc86a93b
AD
564 /* all pointers should be clean so we are done */
565 tnode_free(tn);
ff181ed8 566 return -ENOMEM;
19baf839
RO
567}
568
ff181ed8 569static int halve(struct trie *t, struct tnode *oldtnode)
19baf839 570{
12c081a5
AD
571 struct tnode *tn, *tp, *inode, *node0, *node1;
572 unsigned long i;
19baf839 573
0c7770c7 574 pr_debug("In halve\n");
c877efb2 575
e9b44019 576 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
2f80b3c8 577 if (!tn)
ff181ed8 578 return -ENOMEM;
2f36895a 579
12c081a5
AD
580 /* Assemble all of the pointers in our cluster, in this case that
581 * represents all of the pointers out of our allocated nodes that
582 * point to existing tnodes and the links between our allocated
583 * nodes.
2f36895a 584 */
12c081a5
AD
585 for (i = tnode_child_length(oldtnode); i;) {
586 node1 = tnode_get_child(oldtnode, --i);
587 node0 = tnode_get_child(oldtnode, --i);
2f36895a 588
12c081a5
AD
589 /* At least one of the children is empty */
590 if (!node1 || !node0) {
591 put_child(tn, i / 2, node1 ? : node0);
592 continue;
593 }
c877efb2 594
2f36895a 595 /* Two nonempty children */
12c081a5
AD
596 inode = tnode_new(node0->key, oldtnode->pos, 1);
597 if (!inode) {
598 tnode_free(tn);
599 return -ENOMEM;
2f36895a 600 }
12c081a5 601 tnode_free_append(tn, inode);
2f36895a 602
12c081a5
AD
603 /* initialize pointers out of node */
604 put_child(inode, 1, node1);
605 put_child(inode, 0, node0);
606 NODE_INIT_PARENT(inode, tn);
607
608 /* link parent to node */
609 put_child(tn, i / 2, inode);
2f36895a 610 }
19baf839 611
12c081a5
AD
612 /* setup the parent pointer out of and back into this node */
613 tp = node_parent(oldtnode);
614 NODE_INIT_PARENT(tn, tp);
615 put_child_root(tp, t, tn->key, tn);
616
fc86a93b
AD
617 /* prepare oldtnode to be freed */
618 tnode_free_init(oldtnode);
619
12c081a5
AD
620 /* update all of the child parent pointers */
621 for (i = tnode_child_length(tn); i;) {
622 inode = tnode_get_child(tn, --i);
c877efb2 623
12c081a5
AD
624 /* only new tnodes will be considered "full" nodes */
625 if (!tnode_full(tn, inode)) {
626 node_set_parent(inode, tn);
91b9a277
OJ
627 continue;
628 }
c877efb2 629
19baf839 630 /* Two nonempty children */
12c081a5
AD
631 node_set_parent(tnode_get_child(inode, 1), inode);
632 node_set_parent(tnode_get_child(inode, 0), inode);
ff181ed8 633
fc86a93b 634 /* resize child node */
12c081a5 635 resize(t, inode);
19baf839 636 }
ff181ed8 637
fc86a93b
AD
638 /* all pointers should be clean so we are done */
639 tnode_free(oldtnode);
ff181ed8
AD
640
641 return 0;
19baf839
RO
642}
643
5405afd1
AD
644static unsigned char update_suffix(struct tnode *tn)
645{
646 unsigned char slen = tn->pos;
647 unsigned long stride, i;
648
649 /* search though the list of children looking for nodes that might
650 * have a suffix greater than the one we currently have. This is
651 * why we start with a stride of 2 since a stride of 1 would
652 * represent the nodes with suffix length equal to tn->pos
653 */
654 for (i = 0, stride = 0x2ul ; i < tnode_child_length(tn); i += stride) {
655 struct tnode *n = tnode_get_child(tn, i);
656
657 if (!n || (n->slen <= slen))
658 continue;
659
660 /* update stride and slen based on new value */
661 stride <<= (n->slen - slen);
662 slen = n->slen;
663 i &= ~(stride - 1);
664
665 /* if slen covers all but the last bit we can stop here
666 * there will be nothing longer than that since only node
667 * 0 and 1 << (bits - 1) could have that as their suffix
668 * length.
669 */
670 if ((slen + 1) >= (tn->pos + tn->bits))
671 break;
672 }
673
674 tn->slen = slen;
675
676 return slen;
677}
678
f05a4819
AD
679/* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
680 * the Helsinki University of Technology and Matti Tikkanen of Nokia
681 * Telecommunications, page 6:
682 * "A node is doubled if the ratio of non-empty children to all
683 * children in the *doubled* node is at least 'high'."
684 *
685 * 'high' in this instance is the variable 'inflate_threshold'. It
686 * is expressed as a percentage, so we multiply it with
687 * tnode_child_length() and instead of multiplying by 2 (since the
688 * child array will be doubled by inflate()) and multiplying
689 * the left-hand side by 100 (to handle the percentage thing) we
690 * multiply the left-hand side by 50.
691 *
692 * The left-hand side may look a bit weird: tnode_child_length(tn)
693 * - tn->empty_children is of course the number of non-null children
694 * in the current node. tn->full_children is the number of "full"
695 * children, that is non-null tnodes with a skip value of 0.
696 * All of those will be doubled in the resulting inflated tnode, so
697 * we just count them one extra time here.
698 *
699 * A clearer way to write this would be:
700 *
701 * to_be_doubled = tn->full_children;
702 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
703 * tn->full_children;
704 *
705 * new_child_length = tnode_child_length(tn) * 2;
706 *
707 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
708 * new_child_length;
709 * if (new_fill_factor >= inflate_threshold)
710 *
711 * ...and so on, tho it would mess up the while () loop.
712 *
713 * anyway,
714 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
715 * inflate_threshold
716 *
717 * avoid a division:
718 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
719 * inflate_threshold * new_child_length
720 *
721 * expand not_to_be_doubled and to_be_doubled, and shorten:
722 * 100 * (tnode_child_length(tn) - tn->empty_children +
723 * tn->full_children) >= inflate_threshold * new_child_length
724 *
725 * expand new_child_length:
726 * 100 * (tnode_child_length(tn) - tn->empty_children +
727 * tn->full_children) >=
728 * inflate_threshold * tnode_child_length(tn) * 2
729 *
730 * shorten again:
731 * 50 * (tn->full_children + tnode_child_length(tn) -
732 * tn->empty_children) >= inflate_threshold *
733 * tnode_child_length(tn)
734 *
735 */
ff181ed8 736static bool should_inflate(const struct tnode *tp, const struct tnode *tn)
f05a4819
AD
737{
738 unsigned long used = tnode_child_length(tn);
739 unsigned long threshold = used;
740
741 /* Keep root node larger */
ff181ed8 742 threshold *= tp ? inflate_threshold : inflate_threshold_root;
f05a4819
AD
743 used += tn->full_children;
744 used -= tn->empty_children;
745
746 return tn->pos && ((50 * used) >= threshold);
747}
748
ff181ed8 749static bool should_halve(const struct tnode *tp, const struct tnode *tn)
f05a4819
AD
750{
751 unsigned long used = tnode_child_length(tn);
752 unsigned long threshold = used;
753
754 /* Keep root node larger */
ff181ed8 755 threshold *= tp ? halve_threshold : halve_threshold_root;
f05a4819
AD
756 used -= tn->empty_children;
757
758 return (tn->bits > 1) && ((100 * used) < threshold);
759}
760
cf3637bb 761#define MAX_WORK 10
ff181ed8 762static void resize(struct trie *t, struct tnode *tn)
cf3637bb 763{
ff181ed8
AD
764 struct tnode *tp = node_parent(tn), *n = NULL;
765 struct tnode __rcu **cptr;
cf3637bb
AD
766 int max_work;
767
cf3637bb
AD
768 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
769 tn, inflate_threshold, halve_threshold);
770
ff181ed8
AD
771 /* track the tnode via the pointer from the parent instead of
772 * doing it ourselves. This way we can let RCU fully do its
773 * thing without us interfering
774 */
775 cptr = tp ? &tp->child[get_index(tn->key, tp)] : &t->trie;
776 BUG_ON(tn != rtnl_dereference(*cptr));
777
cf3637bb
AD
778 /* No children */
779 if (tn->empty_children > (tnode_child_length(tn) - 1))
780 goto no_children;
781
782 /* One child */
783 if (tn->empty_children == (tnode_child_length(tn) - 1))
784 goto one_child;
cf3637bb 785
f05a4819
AD
786 /* Double as long as the resulting node has a number of
787 * nonempty nodes that are above the threshold.
cf3637bb 788 */
cf3637bb 789 max_work = MAX_WORK;
ff181ed8
AD
790 while (should_inflate(tp, tn) && max_work--) {
791 if (inflate(t, tn)) {
cf3637bb
AD
792#ifdef CONFIG_IP_FIB_TRIE_STATS
793 this_cpu_inc(t->stats->resize_node_skipped);
794#endif
795 break;
796 }
ff181ed8
AD
797
798 tn = rtnl_dereference(*cptr);
cf3637bb
AD
799 }
800
801 /* Return if at least one inflate is run */
802 if (max_work != MAX_WORK)
ff181ed8 803 return;
cf3637bb 804
f05a4819 805 /* Halve as long as the number of empty children in this
cf3637bb
AD
806 * node is above threshold.
807 */
cf3637bb 808 max_work = MAX_WORK;
ff181ed8
AD
809 while (should_halve(tp, tn) && max_work--) {
810 if (halve(t, tn)) {
cf3637bb
AD
811#ifdef CONFIG_IP_FIB_TRIE_STATS
812 this_cpu_inc(t->stats->resize_node_skipped);
813#endif
814 break;
815 }
cf3637bb 816
ff181ed8
AD
817 tn = rtnl_dereference(*cptr);
818 }
cf3637bb
AD
819
820 /* Only one child remains */
821 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
822 unsigned long i;
823one_child:
824 for (i = tnode_child_length(tn); !n && i;)
825 n = tnode_get_child(tn, --i);
826no_children:
827 /* compress one level */
ff181ed8
AD
828 put_child_root(tp, t, tn->key, n);
829 node_set_parent(n, tp);
830
831 /* drop dead node */
fc86a93b
AD
832 tnode_free_init(tn);
833 tnode_free(tn);
5405afd1
AD
834 return;
835 }
836
837 /* Return if at least one deflate was run */
838 if (max_work != MAX_WORK)
839 return;
840
841 /* push the suffix length to the parent node */
842 if (tn->slen > tn->pos) {
843 unsigned char slen = update_suffix(tn);
844
845 if (tp && (slen > tp->slen))
846 tp->slen = slen;
cf3637bb 847 }
cf3637bb
AD
848}
849
772cb712 850/* readside must use rcu_read_lock currently dump routines
2373ce1c
RO
851 via get_fa_head and dump */
852
adaf9816 853static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
19baf839 854{
772cb712 855 struct hlist_head *head = &l->list;
19baf839
RO
856 struct leaf_info *li;
857
b67bfe0d 858 hlist_for_each_entry_rcu(li, head, hlist)
c877efb2 859 if (li->plen == plen)
19baf839 860 return li;
91b9a277 861
19baf839
RO
862 return NULL;
863}
864
adaf9816 865static inline struct list_head *get_fa_head(struct tnode *l, int plen)
19baf839 866{
772cb712 867 struct leaf_info *li = find_leaf_info(l, plen);
c877efb2 868
91b9a277
OJ
869 if (!li)
870 return NULL;
c877efb2 871
91b9a277 872 return &li->falh;
19baf839
RO
873}
874
5405afd1
AD
875static void leaf_pull_suffix(struct tnode *l)
876{
877 struct tnode *tp = node_parent(l);
878
879 while (tp && (tp->slen > tp->pos) && (tp->slen > l->slen)) {
880 if (update_suffix(tp) > l->slen)
881 break;
882 tp = node_parent(tp);
883 }
884}
885
886static void leaf_push_suffix(struct tnode *l)
19baf839 887{
5405afd1
AD
888 struct tnode *tn = node_parent(l);
889
890 /* if this is a new leaf then tn will be NULL and we can sort
891 * out parent suffix lengths as a part of trie_rebalance
892 */
893 while (tn && (tn->slen < l->slen)) {
894 tn->slen = l->slen;
895 tn = node_parent(tn);
896 }
897}
898
899static void remove_leaf_info(struct tnode *l, struct leaf_info *old)
900{
901 struct hlist_node *prev;
902
903 /* record the location of the pointer to this object */
904 prev = rtnl_dereference(hlist_pprev_rcu(&old->hlist));
905
906 /* remove the leaf info from the list */
907 hlist_del_rcu(&old->hlist);
908
909 /* if we emptied the list this leaf will be freed and we can sort
910 * out parent suffix lengths as a part of trie_rebalance
911 */
912 if (hlist_empty(&l->list))
913 return;
914
915 /* if we removed the tail then we need to update slen */
916 if (!rcu_access_pointer(hlist_next_rcu(prev))) {
917 struct leaf_info *li = hlist_entry(prev, typeof(*li), hlist);
918
919 l->slen = KEYLENGTH - li->plen;
920 leaf_pull_suffix(l);
921 }
922}
923
924static void insert_leaf_info(struct tnode *l, struct leaf_info *new)
925{
926 struct hlist_head *head = &l->list;
e905a9ed 927 struct leaf_info *li = NULL, *last = NULL;
e905a9ed
YH
928
929 if (hlist_empty(head)) {
930 hlist_add_head_rcu(&new->hlist, head);
931 } else {
b67bfe0d 932 hlist_for_each_entry(li, head, hlist) {
e905a9ed
YH
933 if (new->plen > li->plen)
934 break;
935
936 last = li;
937 }
938 if (last)
1d023284 939 hlist_add_behind_rcu(&new->hlist, &last->hlist);
e905a9ed
YH
940 else
941 hlist_add_before_rcu(&new->hlist, &li->hlist);
942 }
5405afd1
AD
943
944 /* if we added to the tail node then we need to update slen */
945 if (!rcu_access_pointer(hlist_next_rcu(&new->hlist))) {
946 l->slen = KEYLENGTH - new->plen;
947 leaf_push_suffix(l);
948 }
19baf839
RO
949}
950
2373ce1c 951/* rcu_read_lock needs to be hold by caller from readside */
adaf9816 952static struct tnode *fib_find_node(struct trie *t, u32 key)
19baf839 953{
adaf9816 954 struct tnode *n = rcu_dereference_rtnl(t->trie);
939afb06
AD
955
956 while (n) {
957 unsigned long index = get_index(key, n);
958
959 /* This bit of code is a bit tricky but it combines multiple
960 * checks into a single check. The prefix consists of the
961 * prefix plus zeros for the bits in the cindex. The index
962 * is the difference between the key and this value. From
963 * this we can actually derive several pieces of data.
964 * if !(index >> bits)
965 * we know the value is cindex
966 * else
967 * we have a mismatch in skip bits and failed
968 */
969 if (index >> n->bits)
970 return NULL;
971
972 /* we have found a leaf. Prefixes have already been compared */
973 if (IS_LEAF(n))
19baf839 974 break;
19baf839 975
21d1f11d 976 n = tnode_get_child_rcu(n, index);
939afb06 977 }
91b9a277 978
939afb06 979 return n;
19baf839
RO
980}
981
7b85576d 982static void trie_rebalance(struct trie *t, struct tnode *tn)
19baf839 983{
06801916 984 struct tnode *tp;
19baf839 985
ff181ed8
AD
986 while ((tp = node_parent(tn)) != NULL) {
987 resize(t, tn);
06801916 988 tn = tp;
19baf839 989 }
06801916 990
19baf839 991 /* Handle last (top) tnode */
7b85576d 992 if (IS_TNODE(tn))
ff181ed8 993 resize(t, tn);
19baf839
RO
994}
995
2373ce1c
RO
996/* only used from updater-side */
997
fea86ad8 998static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
19baf839 999{
c877efb2 1000 struct list_head *fa_head = NULL;
836a0123 1001 struct tnode *l, *n, *tp = NULL;
19baf839 1002 struct leaf_info *li;
19baf839 1003
836a0123
AD
1004 li = leaf_info_new(plen);
1005 if (!li)
1006 return NULL;
1007 fa_head = &li->falh;
1008
0a5c0475 1009 n = rtnl_dereference(t->trie);
19baf839 1010
c877efb2
SH
1011 /* If we point to NULL, stop. Either the tree is empty and we should
1012 * just put a new leaf in if, or we have reached an empty child slot,
19baf839 1013 * and we should just put our new leaf in that.
19baf839 1014 *
836a0123
AD
1015 * If we hit a node with a key that does't match then we should stop
1016 * and create a new tnode to replace that node and insert ourselves
1017 * and the other node into the new tnode.
19baf839 1018 */
836a0123
AD
1019 while (n) {
1020 unsigned long index = get_index(key, n);
19baf839 1021
836a0123
AD
1022 /* This bit of code is a bit tricky but it combines multiple
1023 * checks into a single check. The prefix consists of the
1024 * prefix plus zeros for the "bits" in the prefix. The index
1025 * is the difference between the key and this value. From
1026 * this we can actually derive several pieces of data.
1027 * if !(index >> bits)
1028 * we know the value is child index
1029 * else
1030 * we have a mismatch in skip bits and failed
1031 */
1032 if (index >> n->bits)
19baf839 1033 break;
19baf839 1034
836a0123
AD
1035 /* we have found a leaf. Prefixes have already been compared */
1036 if (IS_LEAF(n)) {
1037 /* Case 1: n is a leaf, and prefixes match*/
5405afd1 1038 insert_leaf_info(n, li);
836a0123
AD
1039 return fa_head;
1040 }
19baf839 1041
836a0123 1042 tp = n;
21d1f11d 1043 n = tnode_get_child_rcu(n, index);
19baf839 1044 }
19baf839 1045
836a0123
AD
1046 l = leaf_new(key);
1047 if (!l) {
1048 free_leaf_info(li);
fea86ad8 1049 return NULL;
f835e471 1050 }
19baf839 1051
5405afd1 1052 insert_leaf_info(l, li);
19baf839 1053
836a0123
AD
1054 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
1055 *
1056 * Add a new tnode here
1057 * first tnode need some special handling
1058 * leaves us in position for handling as case 3
1059 */
1060 if (n) {
1061 struct tnode *tn;
19baf839 1062
e9b44019 1063 tn = tnode_new(key, __fls(key ^ n->key), 1);
c877efb2 1064 if (!tn) {
f835e471 1065 free_leaf_info(li);
37fd30f2 1066 node_free(l);
fea86ad8 1067 return NULL;
91b9a277
OJ
1068 }
1069
836a0123
AD
1070 /* initialize routes out of node */
1071 NODE_INIT_PARENT(tn, tp);
1072 put_child(tn, get_index(key, tn) ^ 1, n);
19baf839 1073
836a0123
AD
1074 /* start adding routes into the node */
1075 put_child_root(tp, t, key, tn);
1076 node_set_parent(n, tn);
e962f302 1077
836a0123 1078 /* parent now has a NULL spot where the leaf can go */
e962f302 1079 tp = tn;
19baf839 1080 }
91b9a277 1081
836a0123
AD
1082 /* Case 3: n is NULL, and will just insert a new leaf */
1083 if (tp) {
1084 NODE_INIT_PARENT(l, tp);
1085 put_child(tp, get_index(key, tp), l);
1086 trie_rebalance(t, tp);
1087 } else {
1088 rcu_assign_pointer(t->trie, l);
1089 }
2373ce1c 1090
19baf839
RO
1091 return fa_head;
1092}
1093
d562f1f8
RO
1094/*
1095 * Caller must hold RTNL.
1096 */
16c6cf8b 1097int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
19baf839
RO
1098{
1099 struct trie *t = (struct trie *) tb->tb_data;
1100 struct fib_alias *fa, *new_fa;
c877efb2 1101 struct list_head *fa_head = NULL;
19baf839 1102 struct fib_info *fi;
4e902c57
TG
1103 int plen = cfg->fc_dst_len;
1104 u8 tos = cfg->fc_tos;
19baf839
RO
1105 u32 key, mask;
1106 int err;
adaf9816 1107 struct tnode *l;
19baf839
RO
1108
1109 if (plen > 32)
1110 return -EINVAL;
1111
4e902c57 1112 key = ntohl(cfg->fc_dst);
19baf839 1113
2dfe55b4 1114 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
19baf839 1115
91b9a277 1116 mask = ntohl(inet_make_mask(plen));
19baf839 1117
c877efb2 1118 if (key & ~mask)
19baf839
RO
1119 return -EINVAL;
1120
1121 key = key & mask;
1122
4e902c57
TG
1123 fi = fib_create_info(cfg);
1124 if (IS_ERR(fi)) {
1125 err = PTR_ERR(fi);
19baf839 1126 goto err;
4e902c57 1127 }
19baf839
RO
1128
1129 l = fib_find_node(t, key);
c877efb2 1130 fa = NULL;
19baf839 1131
c877efb2 1132 if (l) {
19baf839
RO
1133 fa_head = get_fa_head(l, plen);
1134 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1135 }
1136
1137 /* Now fa, if non-NULL, points to the first fib alias
1138 * with the same keys [prefix,tos,priority], if such key already
1139 * exists or to the node before which we will insert new one.
1140 *
1141 * If fa is NULL, we will need to allocate a new one and
1142 * insert to the head of f.
1143 *
1144 * If f is NULL, no fib node matched the destination key
1145 * and we need to allocate a new one of those as well.
1146 */
1147
936f6f8e
JA
1148 if (fa && fa->fa_tos == tos &&
1149 fa->fa_info->fib_priority == fi->fib_priority) {
1150 struct fib_alias *fa_first, *fa_match;
19baf839
RO
1151
1152 err = -EEXIST;
4e902c57 1153 if (cfg->fc_nlflags & NLM_F_EXCL)
19baf839
RO
1154 goto out;
1155
936f6f8e
JA
1156 /* We have 2 goals:
1157 * 1. Find exact match for type, scope, fib_info to avoid
1158 * duplicate routes
1159 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1160 */
1161 fa_match = NULL;
1162 fa_first = fa;
1163 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1164 list_for_each_entry_continue(fa, fa_head, fa_list) {
1165 if (fa->fa_tos != tos)
1166 break;
1167 if (fa->fa_info->fib_priority != fi->fib_priority)
1168 break;
1169 if (fa->fa_type == cfg->fc_type &&
936f6f8e
JA
1170 fa->fa_info == fi) {
1171 fa_match = fa;
1172 break;
1173 }
1174 }
1175
4e902c57 1176 if (cfg->fc_nlflags & NLM_F_REPLACE) {
19baf839
RO
1177 struct fib_info *fi_drop;
1178 u8 state;
1179
936f6f8e
JA
1180 fa = fa_first;
1181 if (fa_match) {
1182 if (fa == fa_match)
1183 err = 0;
6725033f 1184 goto out;
936f6f8e 1185 }
2373ce1c 1186 err = -ENOBUFS;
e94b1766 1187 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
2373ce1c
RO
1188 if (new_fa == NULL)
1189 goto out;
19baf839
RO
1190
1191 fi_drop = fa->fa_info;
2373ce1c
RO
1192 new_fa->fa_tos = fa->fa_tos;
1193 new_fa->fa_info = fi;
4e902c57 1194 new_fa->fa_type = cfg->fc_type;
19baf839 1195 state = fa->fa_state;
936f6f8e 1196 new_fa->fa_state = state & ~FA_S_ACCESSED;
19baf839 1197
2373ce1c
RO
1198 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1199 alias_free_mem_rcu(fa);
19baf839
RO
1200
1201 fib_release_info(fi_drop);
1202 if (state & FA_S_ACCESSED)
4ccfe6d4 1203 rt_cache_flush(cfg->fc_nlinfo.nl_net);
b8f55831
MK
1204 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1205 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
19baf839 1206
91b9a277 1207 goto succeeded;
19baf839
RO
1208 }
1209 /* Error if we find a perfect match which
1210 * uses the same scope, type, and nexthop
1211 * information.
1212 */
936f6f8e
JA
1213 if (fa_match)
1214 goto out;
a07f5f50 1215
4e902c57 1216 if (!(cfg->fc_nlflags & NLM_F_APPEND))
936f6f8e 1217 fa = fa_first;
19baf839
RO
1218 }
1219 err = -ENOENT;
4e902c57 1220 if (!(cfg->fc_nlflags & NLM_F_CREATE))
19baf839
RO
1221 goto out;
1222
1223 err = -ENOBUFS;
e94b1766 1224 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
19baf839
RO
1225 if (new_fa == NULL)
1226 goto out;
1227
1228 new_fa->fa_info = fi;
1229 new_fa->fa_tos = tos;
4e902c57 1230 new_fa->fa_type = cfg->fc_type;
19baf839 1231 new_fa->fa_state = 0;
19baf839
RO
1232 /*
1233 * Insert new entry to the list.
1234 */
1235
c877efb2 1236 if (!fa_head) {
fea86ad8
SH
1237 fa_head = fib_insert_node(t, key, plen);
1238 if (unlikely(!fa_head)) {
1239 err = -ENOMEM;
f835e471 1240 goto out_free_new_fa;
fea86ad8 1241 }
f835e471 1242 }
19baf839 1243
21d8c49e
DM
1244 if (!plen)
1245 tb->tb_num_default++;
1246
2373ce1c
RO
1247 list_add_tail_rcu(&new_fa->fa_list,
1248 (fa ? &fa->fa_list : fa_head));
19baf839 1249
4ccfe6d4 1250 rt_cache_flush(cfg->fc_nlinfo.nl_net);
4e902c57 1251 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
b8f55831 1252 &cfg->fc_nlinfo, 0);
19baf839
RO
1253succeeded:
1254 return 0;
f835e471
RO
1255
1256out_free_new_fa:
1257 kmem_cache_free(fn_alias_kmem, new_fa);
19baf839
RO
1258out:
1259 fib_release_info(fi);
91b9a277 1260err:
19baf839
RO
1261 return err;
1262}
1263
9f9e636d
AD
1264static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1265{
1266 t_key prefix = n->key;
1267
1268 return (key ^ prefix) & (prefix | -prefix);
1269}
1270
345e9b54 1271/* should be called with rcu_read_lock */
22bd5b9b 1272int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
ebc0ffae 1273 struct fib_result *res, int fib_flags)
19baf839 1274{
9f9e636d 1275 struct trie *t = (struct trie *)tb->tb_data;
8274a97a
AD
1276#ifdef CONFIG_IP_FIB_TRIE_STATS
1277 struct trie_use_stats __percpu *stats = t->stats;
1278#endif
9f9e636d
AD
1279 const t_key key = ntohl(flp->daddr);
1280 struct tnode *n, *pn;
345e9b54 1281 struct leaf_info *li;
9f9e636d 1282 t_key cindex;
91b9a277 1283
2373ce1c 1284 n = rcu_dereference(t->trie);
c877efb2 1285 if (!n)
345e9b54 1286 return -EAGAIN;
19baf839
RO
1287
1288#ifdef CONFIG_IP_FIB_TRIE_STATS
8274a97a 1289 this_cpu_inc(stats->gets);
19baf839
RO
1290#endif
1291
adaf9816 1292 pn = n;
9f9e636d
AD
1293 cindex = 0;
1294
1295 /* Step 1: Travel to the longest prefix match in the trie */
1296 for (;;) {
1297 unsigned long index = get_index(key, n);
1298
1299 /* This bit of code is a bit tricky but it combines multiple
1300 * checks into a single check. The prefix consists of the
1301 * prefix plus zeros for the "bits" in the prefix. The index
1302 * is the difference between the key and this value. From
1303 * this we can actually derive several pieces of data.
1304 * if !(index >> bits)
1305 * we know the value is child index
1306 * else
1307 * we have a mismatch in skip bits and failed
1308 */
1309 if (index >> n->bits)
1310 break;
19baf839 1311
9f9e636d
AD
1312 /* we have found a leaf. Prefixes have already been compared */
1313 if (IS_LEAF(n))
a07f5f50 1314 goto found;
19baf839 1315
9f9e636d
AD
1316 /* only record pn and cindex if we are going to be chopping
1317 * bits later. Otherwise we are just wasting cycles.
91b9a277 1318 */
5405afd1 1319 if (n->slen > n->pos) {
9f9e636d
AD
1320 pn = n;
1321 cindex = index;
91b9a277 1322 }
19baf839 1323
21d1f11d 1324 n = tnode_get_child_rcu(n, index);
9f9e636d
AD
1325 if (unlikely(!n))
1326 goto backtrace;
1327 }
19baf839 1328
9f9e636d
AD
1329 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1330 for (;;) {
1331 /* record the pointer where our next node pointer is stored */
1332 struct tnode __rcu **cptr = n->child;
19baf839 1333
9f9e636d
AD
1334 /* This test verifies that none of the bits that differ
1335 * between the key and the prefix exist in the region of
1336 * the lsb and higher in the prefix.
91b9a277 1337 */
5405afd1 1338 if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
9f9e636d 1339 goto backtrace;
91b9a277 1340
9f9e636d
AD
1341 /* exit out and process leaf */
1342 if (unlikely(IS_LEAF(n)))
1343 break;
91b9a277 1344
9f9e636d
AD
1345 /* Don't bother recording parent info. Since we are in
1346 * prefix match mode we will have to come back to wherever
1347 * we started this traversal anyway
91b9a277 1348 */
91b9a277 1349
9f9e636d 1350 while ((n = rcu_dereference(*cptr)) == NULL) {
19baf839 1351backtrace:
19baf839 1352#ifdef CONFIG_IP_FIB_TRIE_STATS
9f9e636d
AD
1353 if (!n)
1354 this_cpu_inc(stats->null_node_hit);
19baf839 1355#endif
9f9e636d
AD
1356 /* If we are at cindex 0 there are no more bits for
1357 * us to strip at this level so we must ascend back
1358 * up one level to see if there are any more bits to
1359 * be stripped there.
1360 */
1361 while (!cindex) {
1362 t_key pkey = pn->key;
1363
1364 pn = node_parent_rcu(pn);
1365 if (unlikely(!pn))
345e9b54 1366 return -EAGAIN;
9f9e636d
AD
1367#ifdef CONFIG_IP_FIB_TRIE_STATS
1368 this_cpu_inc(stats->backtrack);
1369#endif
1370 /* Get Child's index */
1371 cindex = get_index(pkey, pn);
1372 }
1373
1374 /* strip the least significant bit from the cindex */
1375 cindex &= cindex - 1;
1376
1377 /* grab pointer for next child node */
1378 cptr = &pn->child[cindex];
c877efb2 1379 }
19baf839 1380 }
9f9e636d 1381
19baf839 1382found:
9f9e636d 1383 /* Step 3: Process the leaf, if that fails fall back to backtracing */
345e9b54
AD
1384 hlist_for_each_entry_rcu(li, &n->list, hlist) {
1385 struct fib_alias *fa;
1386
1387 if ((key ^ n->key) & li->mask_plen)
1388 continue;
1389
1390 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1391 struct fib_info *fi = fa->fa_info;
1392 int nhsel, err;
1393
1394 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1395 continue;
1396 if (fi->fib_dead)
1397 continue;
1398 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1399 continue;
1400 fib_alias_accessed(fa);
1401 err = fib_props[fa->fa_type].error;
1402 if (unlikely(err < 0)) {
1403#ifdef CONFIG_IP_FIB_TRIE_STATS
1404 this_cpu_inc(stats->semantic_match_passed);
1405#endif
1406 return err;
1407 }
1408 if (fi->fib_flags & RTNH_F_DEAD)
1409 continue;
1410 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1411 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1412
1413 if (nh->nh_flags & RTNH_F_DEAD)
1414 continue;
1415 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1416 continue;
1417
1418 if (!(fib_flags & FIB_LOOKUP_NOREF))
1419 atomic_inc(&fi->fib_clntref);
1420
1421 res->prefixlen = li->plen;
1422 res->nh_sel = nhsel;
1423 res->type = fa->fa_type;
1424 res->scope = fi->fib_scope;
1425 res->fi = fi;
1426 res->table = tb;
1427 res->fa_head = &li->falh;
1428#ifdef CONFIG_IP_FIB_TRIE_STATS
1429 this_cpu_inc(stats->semantic_match_passed);
1430#endif
1431 return err;
1432 }
1433 }
1434
1435#ifdef CONFIG_IP_FIB_TRIE_STATS
1436 this_cpu_inc(stats->semantic_match_miss);
1437#endif
1438 }
1439 goto backtrace;
19baf839 1440}
6fc01438 1441EXPORT_SYMBOL_GPL(fib_table_lookup);
19baf839 1442
9195bef7
SH
1443/*
1444 * Remove the leaf and return parent.
1445 */
adaf9816 1446static void trie_leaf_remove(struct trie *t, struct tnode *l)
19baf839 1447{
64c9b6fb 1448 struct tnode *tp = node_parent(l);
c877efb2 1449
9195bef7 1450 pr_debug("entering trie_leaf_remove(%p)\n", l);
19baf839 1451
c877efb2 1452 if (tp) {
836a0123 1453 put_child(tp, get_index(l->key, tp), NULL);
7b85576d 1454 trie_rebalance(t, tp);
836a0123 1455 } else {
a9b3cd7f 1456 RCU_INIT_POINTER(t->trie, NULL);
836a0123 1457 }
19baf839 1458
37fd30f2 1459 node_free(l);
19baf839
RO
1460}
1461
d562f1f8
RO
1462/*
1463 * Caller must hold RTNL.
1464 */
16c6cf8b 1465int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
19baf839
RO
1466{
1467 struct trie *t = (struct trie *) tb->tb_data;
1468 u32 key, mask;
4e902c57
TG
1469 int plen = cfg->fc_dst_len;
1470 u8 tos = cfg->fc_tos;
19baf839
RO
1471 struct fib_alias *fa, *fa_to_delete;
1472 struct list_head *fa_head;
adaf9816 1473 struct tnode *l;
91b9a277
OJ
1474 struct leaf_info *li;
1475
c877efb2 1476 if (plen > 32)
19baf839
RO
1477 return -EINVAL;
1478
4e902c57 1479 key = ntohl(cfg->fc_dst);
91b9a277 1480 mask = ntohl(inet_make_mask(plen));
19baf839 1481
c877efb2 1482 if (key & ~mask)
19baf839
RO
1483 return -EINVAL;
1484
1485 key = key & mask;
1486 l = fib_find_node(t, key);
1487
c877efb2 1488 if (!l)
19baf839
RO
1489 return -ESRCH;
1490
ad5b3102
IM
1491 li = find_leaf_info(l, plen);
1492
1493 if (!li)
1494 return -ESRCH;
1495
1496 fa_head = &li->falh;
19baf839
RO
1497 fa = fib_find_alias(fa_head, tos, 0);
1498
1499 if (!fa)
1500 return -ESRCH;
1501
0c7770c7 1502 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
19baf839
RO
1503
1504 fa_to_delete = NULL;
936f6f8e
JA
1505 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1506 list_for_each_entry_continue(fa, fa_head, fa_list) {
19baf839
RO
1507 struct fib_info *fi = fa->fa_info;
1508
1509 if (fa->fa_tos != tos)
1510 break;
1511
4e902c57
TG
1512 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1513 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
37e826c5 1514 fa->fa_info->fib_scope == cfg->fc_scope) &&
74cb3c10
JA
1515 (!cfg->fc_prefsrc ||
1516 fi->fib_prefsrc == cfg->fc_prefsrc) &&
4e902c57
TG
1517 (!cfg->fc_protocol ||
1518 fi->fib_protocol == cfg->fc_protocol) &&
1519 fib_nh_match(cfg, fi) == 0) {
19baf839
RO
1520 fa_to_delete = fa;
1521 break;
1522 }
1523 }
1524
91b9a277
OJ
1525 if (!fa_to_delete)
1526 return -ESRCH;
19baf839 1527
91b9a277 1528 fa = fa_to_delete;
4e902c57 1529 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
b8f55831 1530 &cfg->fc_nlinfo, 0);
91b9a277 1531
2373ce1c 1532 list_del_rcu(&fa->fa_list);
19baf839 1533
21d8c49e
DM
1534 if (!plen)
1535 tb->tb_num_default--;
1536
91b9a277 1537 if (list_empty(fa_head)) {
5405afd1 1538 remove_leaf_info(l, li);
91b9a277 1539 free_leaf_info(li);
2373ce1c 1540 }
19baf839 1541
91b9a277 1542 if (hlist_empty(&l->list))
9195bef7 1543 trie_leaf_remove(t, l);
19baf839 1544
91b9a277 1545 if (fa->fa_state & FA_S_ACCESSED)
4ccfe6d4 1546 rt_cache_flush(cfg->fc_nlinfo.nl_net);
19baf839 1547
2373ce1c
RO
1548 fib_release_info(fa->fa_info);
1549 alias_free_mem_rcu(fa);
91b9a277 1550 return 0;
19baf839
RO
1551}
1552
ef3660ce 1553static int trie_flush_list(struct list_head *head)
19baf839
RO
1554{
1555 struct fib_alias *fa, *fa_node;
1556 int found = 0;
1557
1558 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1559 struct fib_info *fi = fa->fa_info;
19baf839 1560
2373ce1c
RO
1561 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1562 list_del_rcu(&fa->fa_list);
1563 fib_release_info(fa->fa_info);
1564 alias_free_mem_rcu(fa);
19baf839
RO
1565 found++;
1566 }
1567 }
1568 return found;
1569}
1570
adaf9816 1571static int trie_flush_leaf(struct tnode *l)
19baf839
RO
1572{
1573 int found = 0;
1574 struct hlist_head *lih = &l->list;
b67bfe0d 1575 struct hlist_node *tmp;
19baf839
RO
1576 struct leaf_info *li = NULL;
1577
b67bfe0d 1578 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
ef3660ce 1579 found += trie_flush_list(&li->falh);
19baf839
RO
1580
1581 if (list_empty(&li->falh)) {
2373ce1c 1582 hlist_del_rcu(&li->hlist);
19baf839
RO
1583 free_leaf_info(li);
1584 }
1585 }
1586 return found;
1587}
1588
82cfbb00
SH
1589/*
1590 * Scan for the next right leaf starting at node p->child[idx]
1591 * Since we have back pointer, no recursion necessary.
1592 */
adaf9816 1593static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
19baf839 1594{
82cfbb00 1595 do {
98293e8d 1596 unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
2373ce1c 1597
98293e8d 1598 while (idx < tnode_child_length(p)) {
82cfbb00 1599 c = tnode_get_child_rcu(p, idx++);
2373ce1c 1600 if (!c)
91b9a277
OJ
1601 continue;
1602
aab515d7 1603 if (IS_LEAF(c))
adaf9816 1604 return c;
82cfbb00
SH
1605
1606 /* Rescan start scanning in new node */
adaf9816 1607 p = c;
82cfbb00 1608 idx = 0;
19baf839 1609 }
82cfbb00
SH
1610
1611 /* Node empty, walk back up to parent */
adaf9816 1612 c = p;
a034ee3c 1613 } while ((p = node_parent_rcu(c)) != NULL);
82cfbb00
SH
1614
1615 return NULL; /* Root of trie */
1616}
1617
adaf9816 1618static struct tnode *trie_firstleaf(struct trie *t)
82cfbb00 1619{
adaf9816 1620 struct tnode *n = rcu_dereference_rtnl(t->trie);
82cfbb00
SH
1621
1622 if (!n)
1623 return NULL;
1624
1625 if (IS_LEAF(n)) /* trie is just a leaf */
adaf9816 1626 return n;
82cfbb00
SH
1627
1628 return leaf_walk_rcu(n, NULL);
1629}
1630
adaf9816 1631static struct tnode *trie_nextleaf(struct tnode *l)
82cfbb00 1632{
adaf9816 1633 struct tnode *p = node_parent_rcu(l);
82cfbb00
SH
1634
1635 if (!p)
1636 return NULL; /* trie with just one leaf */
1637
adaf9816 1638 return leaf_walk_rcu(p, l);
19baf839
RO
1639}
1640
adaf9816 1641static struct tnode *trie_leafindex(struct trie *t, int index)
71d67e66 1642{
adaf9816 1643 struct tnode *l = trie_firstleaf(t);
71d67e66 1644
ec28cf73 1645 while (l && index-- > 0)
71d67e66 1646 l = trie_nextleaf(l);
ec28cf73 1647
71d67e66
SH
1648 return l;
1649}
1650
1651
d562f1f8
RO
1652/*
1653 * Caller must hold RTNL.
1654 */
16c6cf8b 1655int fib_table_flush(struct fib_table *tb)
19baf839
RO
1656{
1657 struct trie *t = (struct trie *) tb->tb_data;
adaf9816 1658 struct tnode *l, *ll = NULL;
82cfbb00 1659 int found = 0;
19baf839 1660
82cfbb00 1661 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
ef3660ce 1662 found += trie_flush_leaf(l);
19baf839
RO
1663
1664 if (ll && hlist_empty(&ll->list))
9195bef7 1665 trie_leaf_remove(t, ll);
19baf839
RO
1666 ll = l;
1667 }
1668
1669 if (ll && hlist_empty(&ll->list))
9195bef7 1670 trie_leaf_remove(t, ll);
19baf839 1671
0c7770c7 1672 pr_debug("trie_flush found=%d\n", found);
19baf839
RO
1673 return found;
1674}
1675
4aa2c466
PE
1676void fib_free_table(struct fib_table *tb)
1677{
8274a97a
AD
1678#ifdef CONFIG_IP_FIB_TRIE_STATS
1679 struct trie *t = (struct trie *)tb->tb_data;
1680
1681 free_percpu(t->stats);
1682#endif /* CONFIG_IP_FIB_TRIE_STATS */
4aa2c466
PE
1683 kfree(tb);
1684}
1685
a07f5f50
SH
1686static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1687 struct fib_table *tb,
19baf839
RO
1688 struct sk_buff *skb, struct netlink_callback *cb)
1689{
1690 int i, s_i;
1691 struct fib_alias *fa;
32ab5f80 1692 __be32 xkey = htonl(key);
19baf839 1693
71d67e66 1694 s_i = cb->args[5];
19baf839
RO
1695 i = 0;
1696
2373ce1c
RO
1697 /* rcu_read_lock is hold by caller */
1698
1699 list_for_each_entry_rcu(fa, fah, fa_list) {
19baf839
RO
1700 if (i < s_i) {
1701 i++;
1702 continue;
1703 }
19baf839 1704
15e47304 1705 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
19baf839
RO
1706 cb->nlh->nlmsg_seq,
1707 RTM_NEWROUTE,
1708 tb->tb_id,
1709 fa->fa_type,
be403ea1 1710 xkey,
19baf839
RO
1711 plen,
1712 fa->fa_tos,
64347f78 1713 fa->fa_info, NLM_F_MULTI) < 0) {
71d67e66 1714 cb->args[5] = i;
19baf839 1715 return -1;
91b9a277 1716 }
19baf839
RO
1717 i++;
1718 }
71d67e66 1719 cb->args[5] = i;
19baf839
RO
1720 return skb->len;
1721}
1722
adaf9816 1723static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
a88ee229 1724 struct sk_buff *skb, struct netlink_callback *cb)
19baf839 1725{
a88ee229 1726 struct leaf_info *li;
a88ee229 1727 int i, s_i;
19baf839 1728
71d67e66 1729 s_i = cb->args[4];
a88ee229 1730 i = 0;
19baf839 1731
a88ee229 1732 /* rcu_read_lock is hold by caller */
b67bfe0d 1733 hlist_for_each_entry_rcu(li, &l->list, hlist) {
a88ee229
SH
1734 if (i < s_i) {
1735 i++;
19baf839 1736 continue;
a88ee229 1737 }
91b9a277 1738
a88ee229 1739 if (i > s_i)
71d67e66 1740 cb->args[5] = 0;
19baf839 1741
a88ee229 1742 if (list_empty(&li->falh))
19baf839
RO
1743 continue;
1744
a88ee229 1745 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
71d67e66 1746 cb->args[4] = i;
19baf839
RO
1747 return -1;
1748 }
a88ee229 1749 i++;
19baf839 1750 }
a88ee229 1751
71d67e66 1752 cb->args[4] = i;
19baf839
RO
1753 return skb->len;
1754}
1755
16c6cf8b
SH
1756int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1757 struct netlink_callback *cb)
19baf839 1758{
adaf9816 1759 struct tnode *l;
19baf839 1760 struct trie *t = (struct trie *) tb->tb_data;
d5ce8a0e 1761 t_key key = cb->args[2];
71d67e66 1762 int count = cb->args[3];
19baf839 1763
2373ce1c 1764 rcu_read_lock();
d5ce8a0e
SH
1765 /* Dump starting at last key.
1766 * Note: 0.0.0.0/0 (ie default) is first key.
1767 */
71d67e66 1768 if (count == 0)
d5ce8a0e
SH
1769 l = trie_firstleaf(t);
1770 else {
71d67e66
SH
1771 /* Normally, continue from last key, but if that is missing
1772 * fallback to using slow rescan
1773 */
d5ce8a0e 1774 l = fib_find_node(t, key);
71d67e66
SH
1775 if (!l)
1776 l = trie_leafindex(t, count);
d5ce8a0e 1777 }
a88ee229 1778
d5ce8a0e
SH
1779 while (l) {
1780 cb->args[2] = l->key;
a88ee229 1781 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
71d67e66 1782 cb->args[3] = count;
a88ee229 1783 rcu_read_unlock();
a88ee229 1784 return -1;
19baf839 1785 }
d5ce8a0e 1786
71d67e66 1787 ++count;
d5ce8a0e 1788 l = trie_nextleaf(l);
71d67e66
SH
1789 memset(&cb->args[4], 0,
1790 sizeof(cb->args) - 4*sizeof(cb->args[0]));
19baf839 1791 }
71d67e66 1792 cb->args[3] = count;
2373ce1c 1793 rcu_read_unlock();
a88ee229 1794
19baf839 1795 return skb->len;
19baf839
RO
1796}
1797
5348ba85 1798void __init fib_trie_init(void)
7f9b8052 1799{
a07f5f50
SH
1800 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1801 sizeof(struct fib_alias),
bc3c8c1e
SH
1802 0, SLAB_PANIC, NULL);
1803
1804 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
adaf9816 1805 max(sizeof(struct tnode),
bc3c8c1e
SH
1806 sizeof(struct leaf_info)),
1807 0, SLAB_PANIC, NULL);
7f9b8052 1808}
19baf839 1809
7f9b8052 1810
5348ba85 1811struct fib_table *fib_trie_table(u32 id)
19baf839
RO
1812{
1813 struct fib_table *tb;
1814 struct trie *t;
1815
19baf839
RO
1816 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1817 GFP_KERNEL);
1818 if (tb == NULL)
1819 return NULL;
1820
1821 tb->tb_id = id;
971b893e 1822 tb->tb_default = -1;
21d8c49e 1823 tb->tb_num_default = 0;
19baf839
RO
1824
1825 t = (struct trie *) tb->tb_data;
8274a97a
AD
1826 RCU_INIT_POINTER(t->trie, NULL);
1827#ifdef CONFIG_IP_FIB_TRIE_STATS
1828 t->stats = alloc_percpu(struct trie_use_stats);
1829 if (!t->stats) {
1830 kfree(tb);
1831 tb = NULL;
1832 }
1833#endif
19baf839 1834
19baf839
RO
1835 return tb;
1836}
1837
cb7b593c
SH
1838#ifdef CONFIG_PROC_FS
1839/* Depth first Trie walk iterator */
1840struct fib_trie_iter {
1c340b2f 1841 struct seq_net_private p;
3d3b2d25 1842 struct fib_table *tb;
cb7b593c 1843 struct tnode *tnode;
a034ee3c
ED
1844 unsigned int index;
1845 unsigned int depth;
cb7b593c 1846};
19baf839 1847
adaf9816 1848static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
19baf839 1849{
98293e8d 1850 unsigned long cindex = iter->index;
cb7b593c 1851 struct tnode *tn = iter->tnode;
cb7b593c 1852 struct tnode *p;
19baf839 1853
6640e697
EB
1854 /* A single entry routing table */
1855 if (!tn)
1856 return NULL;
1857
cb7b593c
SH
1858 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1859 iter->tnode, iter->index, iter->depth);
1860rescan:
98293e8d 1861 while (cindex < tnode_child_length(tn)) {
adaf9816 1862 struct tnode *n = tnode_get_child_rcu(tn, cindex);
19baf839 1863
cb7b593c
SH
1864 if (n) {
1865 if (IS_LEAF(n)) {
1866 iter->tnode = tn;
1867 iter->index = cindex + 1;
1868 } else {
1869 /* push down one level */
adaf9816 1870 iter->tnode = n;
cb7b593c
SH
1871 iter->index = 0;
1872 ++iter->depth;
1873 }
1874 return n;
1875 }
19baf839 1876
cb7b593c
SH
1877 ++cindex;
1878 }
91b9a277 1879
cb7b593c 1880 /* Current node exhausted, pop back up */
adaf9816 1881 p = node_parent_rcu(tn);
cb7b593c 1882 if (p) {
e9b44019 1883 cindex = get_index(tn->key, p) + 1;
cb7b593c
SH
1884 tn = p;
1885 --iter->depth;
1886 goto rescan;
19baf839 1887 }
cb7b593c
SH
1888
1889 /* got root? */
1890 return NULL;
19baf839
RO
1891}
1892
adaf9816 1893static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
cb7b593c 1894 struct trie *t)
19baf839 1895{
adaf9816 1896 struct tnode *n;
5ddf0eb2 1897
132adf54 1898 if (!t)
5ddf0eb2
RO
1899 return NULL;
1900
1901 n = rcu_dereference(t->trie);
3d3b2d25 1902 if (!n)
5ddf0eb2 1903 return NULL;
19baf839 1904
3d3b2d25 1905 if (IS_TNODE(n)) {
adaf9816 1906 iter->tnode = n;
3d3b2d25
SH
1907 iter->index = 0;
1908 iter->depth = 1;
1909 } else {
1910 iter->tnode = NULL;
1911 iter->index = 0;
1912 iter->depth = 0;
91b9a277 1913 }
3d3b2d25
SH
1914
1915 return n;
cb7b593c 1916}
91b9a277 1917
cb7b593c
SH
1918static void trie_collect_stats(struct trie *t, struct trie_stat *s)
1919{
adaf9816 1920 struct tnode *n;
cb7b593c 1921 struct fib_trie_iter iter;
91b9a277 1922
cb7b593c 1923 memset(s, 0, sizeof(*s));
91b9a277 1924
cb7b593c 1925 rcu_read_lock();
3d3b2d25 1926 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
cb7b593c 1927 if (IS_LEAF(n)) {
93672292 1928 struct leaf_info *li;
93672292 1929
cb7b593c
SH
1930 s->leaves++;
1931 s->totdepth += iter.depth;
1932 if (iter.depth > s->maxdepth)
1933 s->maxdepth = iter.depth;
93672292 1934
adaf9816 1935 hlist_for_each_entry_rcu(li, &n->list, hlist)
93672292 1936 ++s->prefixes;
cb7b593c 1937 } else {
98293e8d 1938 unsigned long i;
cb7b593c
SH
1939
1940 s->tnodes++;
adaf9816
AD
1941 if (n->bits < MAX_STAT_DEPTH)
1942 s->nodesizes[n->bits]++;
06ef921d 1943
21d1f11d 1944 for (i = tnode_child_length(n); i--;) {
adaf9816 1945 if (!rcu_access_pointer(n->child[i]))
cb7b593c 1946 s->nullpointers++;
98293e8d 1947 }
19baf839 1948 }
19baf839 1949 }
2373ce1c 1950 rcu_read_unlock();
19baf839
RO
1951}
1952
cb7b593c
SH
1953/*
1954 * This outputs /proc/net/fib_triestats
1955 */
1956static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
19baf839 1957{
a034ee3c 1958 unsigned int i, max, pointers, bytes, avdepth;
c877efb2 1959
cb7b593c
SH
1960 if (stat->leaves)
1961 avdepth = stat->totdepth*100 / stat->leaves;
1962 else
1963 avdepth = 0;
91b9a277 1964
a07f5f50
SH
1965 seq_printf(seq, "\tAver depth: %u.%02d\n",
1966 avdepth / 100, avdepth % 100);
cb7b593c 1967 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
91b9a277 1968
cb7b593c 1969 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
adaf9816 1970 bytes = sizeof(struct tnode) * stat->leaves;
93672292
SH
1971
1972 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1973 bytes += sizeof(struct leaf_info) * stat->prefixes;
1974
187b5188 1975 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
cb7b593c 1976 bytes += sizeof(struct tnode) * stat->tnodes;
19baf839 1977
06ef921d
RO
1978 max = MAX_STAT_DEPTH;
1979 while (max > 0 && stat->nodesizes[max-1] == 0)
cb7b593c 1980 max--;
19baf839 1981
cb7b593c 1982 pointers = 0;
f585a991 1983 for (i = 1; i < max; i++)
cb7b593c 1984 if (stat->nodesizes[i] != 0) {
187b5188 1985 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
cb7b593c
SH
1986 pointers += (1<<i) * stat->nodesizes[i];
1987 }
1988 seq_putc(seq, '\n');
187b5188 1989 seq_printf(seq, "\tPointers: %u\n", pointers);
2373ce1c 1990
adaf9816 1991 bytes += sizeof(struct tnode *) * pointers;
187b5188
SH
1992 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1993 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
66a2f7fd 1994}
2373ce1c 1995
cb7b593c 1996#ifdef CONFIG_IP_FIB_TRIE_STATS
66a2f7fd 1997static void trie_show_usage(struct seq_file *seq,
8274a97a 1998 const struct trie_use_stats __percpu *stats)
66a2f7fd 1999{
8274a97a
AD
2000 struct trie_use_stats s = { 0 };
2001 int cpu;
2002
2003 /* loop through all of the CPUs and gather up the stats */
2004 for_each_possible_cpu(cpu) {
2005 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
2006
2007 s.gets += pcpu->gets;
2008 s.backtrack += pcpu->backtrack;
2009 s.semantic_match_passed += pcpu->semantic_match_passed;
2010 s.semantic_match_miss += pcpu->semantic_match_miss;
2011 s.null_node_hit += pcpu->null_node_hit;
2012 s.resize_node_skipped += pcpu->resize_node_skipped;
2013 }
2014
66a2f7fd 2015 seq_printf(seq, "\nCounters:\n---------\n");
8274a97a
AD
2016 seq_printf(seq, "gets = %u\n", s.gets);
2017 seq_printf(seq, "backtracks = %u\n", s.backtrack);
a07f5f50 2018 seq_printf(seq, "semantic match passed = %u\n",
8274a97a
AD
2019 s.semantic_match_passed);
2020 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
2021 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
2022 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
cb7b593c 2023}
66a2f7fd
SH
2024#endif /* CONFIG_IP_FIB_TRIE_STATS */
2025
3d3b2d25 2026static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
d717a9a6 2027{
3d3b2d25
SH
2028 if (tb->tb_id == RT_TABLE_LOCAL)
2029 seq_puts(seq, "Local:\n");
2030 else if (tb->tb_id == RT_TABLE_MAIN)
2031 seq_puts(seq, "Main:\n");
2032 else
2033 seq_printf(seq, "Id %d:\n", tb->tb_id);
d717a9a6 2034}
19baf839 2035
3d3b2d25 2036
cb7b593c
SH
2037static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2038{
1c340b2f 2039 struct net *net = (struct net *)seq->private;
3d3b2d25 2040 unsigned int h;
877a9bff 2041
d717a9a6 2042 seq_printf(seq,
a07f5f50
SH
2043 "Basic info: size of leaf:"
2044 " %Zd bytes, size of tnode: %Zd bytes.\n",
adaf9816 2045 sizeof(struct tnode), sizeof(struct tnode));
d717a9a6 2046
3d3b2d25
SH
2047 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2048 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
3d3b2d25
SH
2049 struct fib_table *tb;
2050
b67bfe0d 2051 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
3d3b2d25
SH
2052 struct trie *t = (struct trie *) tb->tb_data;
2053 struct trie_stat stat;
877a9bff 2054
3d3b2d25
SH
2055 if (!t)
2056 continue;
2057
2058 fib_table_print(seq, tb);
2059
2060 trie_collect_stats(t, &stat);
2061 trie_show_stats(seq, &stat);
2062#ifdef CONFIG_IP_FIB_TRIE_STATS
8274a97a 2063 trie_show_usage(seq, t->stats);
3d3b2d25
SH
2064#endif
2065 }
2066 }
19baf839 2067
cb7b593c 2068 return 0;
19baf839
RO
2069}
2070
cb7b593c 2071static int fib_triestat_seq_open(struct inode *inode, struct file *file)
19baf839 2072{
de05c557 2073 return single_open_net(inode, file, fib_triestat_seq_show);
1c340b2f
DL
2074}
2075
9a32144e 2076static const struct file_operations fib_triestat_fops = {
cb7b593c
SH
2077 .owner = THIS_MODULE,
2078 .open = fib_triestat_seq_open,
2079 .read = seq_read,
2080 .llseek = seq_lseek,
b6fcbdb4 2081 .release = single_release_net,
cb7b593c
SH
2082};
2083
adaf9816 2084static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
19baf839 2085{
1218854a
YH
2086 struct fib_trie_iter *iter = seq->private;
2087 struct net *net = seq_file_net(seq);
cb7b593c 2088 loff_t idx = 0;
3d3b2d25 2089 unsigned int h;
cb7b593c 2090
3d3b2d25
SH
2091 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2092 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
3d3b2d25 2093 struct fib_table *tb;
cb7b593c 2094
b67bfe0d 2095 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
adaf9816 2096 struct tnode *n;
3d3b2d25
SH
2097
2098 for (n = fib_trie_get_first(iter,
2099 (struct trie *) tb->tb_data);
2100 n; n = fib_trie_get_next(iter))
2101 if (pos == idx++) {
2102 iter->tb = tb;
2103 return n;
2104 }
2105 }
cb7b593c 2106 }
3d3b2d25 2107
19baf839
RO
2108 return NULL;
2109}
2110
cb7b593c 2111static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
c95aaf9a 2112 __acquires(RCU)
19baf839 2113{
cb7b593c 2114 rcu_read_lock();
1218854a 2115 return fib_trie_get_idx(seq, *pos);
19baf839
RO
2116}
2117
cb7b593c 2118static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
19baf839 2119{
cb7b593c 2120 struct fib_trie_iter *iter = seq->private;
1218854a 2121 struct net *net = seq_file_net(seq);
3d3b2d25
SH
2122 struct fib_table *tb = iter->tb;
2123 struct hlist_node *tb_node;
2124 unsigned int h;
adaf9816 2125 struct tnode *n;
cb7b593c 2126
19baf839 2127 ++*pos;
3d3b2d25
SH
2128 /* next node in same table */
2129 n = fib_trie_get_next(iter);
2130 if (n)
2131 return n;
19baf839 2132
3d3b2d25
SH
2133 /* walk rest of this hash chain */
2134 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
0a5c0475 2135 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
3d3b2d25
SH
2136 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2137 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2138 if (n)
2139 goto found;
2140 }
19baf839 2141
3d3b2d25
SH
2142 /* new hash chain */
2143 while (++h < FIB_TABLE_HASHSZ) {
2144 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
b67bfe0d 2145 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
3d3b2d25
SH
2146 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2147 if (n)
2148 goto found;
2149 }
2150 }
cb7b593c 2151 return NULL;
3d3b2d25
SH
2152
2153found:
2154 iter->tb = tb;
2155 return n;
cb7b593c 2156}
19baf839 2157
cb7b593c 2158static void fib_trie_seq_stop(struct seq_file *seq, void *v)
c95aaf9a 2159 __releases(RCU)
19baf839 2160{
cb7b593c
SH
2161 rcu_read_unlock();
2162}
91b9a277 2163
cb7b593c
SH
2164static void seq_indent(struct seq_file *seq, int n)
2165{
a034ee3c
ED
2166 while (n-- > 0)
2167 seq_puts(seq, " ");
cb7b593c 2168}
19baf839 2169
28d36e37 2170static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
cb7b593c 2171{
132adf54 2172 switch (s) {
cb7b593c
SH
2173 case RT_SCOPE_UNIVERSE: return "universe";
2174 case RT_SCOPE_SITE: return "site";
2175 case RT_SCOPE_LINK: return "link";
2176 case RT_SCOPE_HOST: return "host";
2177 case RT_SCOPE_NOWHERE: return "nowhere";
2178 default:
28d36e37 2179 snprintf(buf, len, "scope=%d", s);
cb7b593c
SH
2180 return buf;
2181 }
2182}
19baf839 2183
36cbd3dc 2184static const char *const rtn_type_names[__RTN_MAX] = {
cb7b593c
SH
2185 [RTN_UNSPEC] = "UNSPEC",
2186 [RTN_UNICAST] = "UNICAST",
2187 [RTN_LOCAL] = "LOCAL",
2188 [RTN_BROADCAST] = "BROADCAST",
2189 [RTN_ANYCAST] = "ANYCAST",
2190 [RTN_MULTICAST] = "MULTICAST",
2191 [RTN_BLACKHOLE] = "BLACKHOLE",
2192 [RTN_UNREACHABLE] = "UNREACHABLE",
2193 [RTN_PROHIBIT] = "PROHIBIT",
2194 [RTN_THROW] = "THROW",
2195 [RTN_NAT] = "NAT",
2196 [RTN_XRESOLVE] = "XRESOLVE",
2197};
19baf839 2198
a034ee3c 2199static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
cb7b593c 2200{
cb7b593c
SH
2201 if (t < __RTN_MAX && rtn_type_names[t])
2202 return rtn_type_names[t];
28d36e37 2203 snprintf(buf, len, "type %u", t);
cb7b593c 2204 return buf;
19baf839
RO
2205}
2206
cb7b593c
SH
2207/* Pretty print the trie */
2208static int fib_trie_seq_show(struct seq_file *seq, void *v)
19baf839 2209{
cb7b593c 2210 const struct fib_trie_iter *iter = seq->private;
adaf9816 2211 struct tnode *n = v;
c877efb2 2212
3d3b2d25
SH
2213 if (!node_parent_rcu(n))
2214 fib_table_print(seq, iter->tb);
095b8501 2215
cb7b593c 2216 if (IS_TNODE(n)) {
adaf9816 2217 __be32 prf = htonl(n->key);
91b9a277 2218
e9b44019
AD
2219 seq_indent(seq, iter->depth-1);
2220 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2221 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2222 n->full_children, n->empty_children);
cb7b593c 2223 } else {
1328042e 2224 struct leaf_info *li;
adaf9816 2225 __be32 val = htonl(n->key);
cb7b593c
SH
2226
2227 seq_indent(seq, iter->depth);
673d57e7 2228 seq_printf(seq, " |-- %pI4\n", &val);
1328042e 2229
adaf9816 2230 hlist_for_each_entry_rcu(li, &n->list, hlist) {
1328042e
SH
2231 struct fib_alias *fa;
2232
2233 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2234 char buf1[32], buf2[32];
2235
2236 seq_indent(seq, iter->depth+1);
2237 seq_printf(seq, " /%d %s %s", li->plen,
2238 rtn_scope(buf1, sizeof(buf1),
37e826c5 2239 fa->fa_info->fib_scope),
1328042e
SH
2240 rtn_type(buf2, sizeof(buf2),
2241 fa->fa_type));
2242 if (fa->fa_tos)
b9c4d82a 2243 seq_printf(seq, " tos=%d", fa->fa_tos);
1328042e 2244 seq_putc(seq, '\n');
cb7b593c
SH
2245 }
2246 }
19baf839 2247 }
cb7b593c 2248
19baf839
RO
2249 return 0;
2250}
2251
f690808e 2252static const struct seq_operations fib_trie_seq_ops = {
cb7b593c
SH
2253 .start = fib_trie_seq_start,
2254 .next = fib_trie_seq_next,
2255 .stop = fib_trie_seq_stop,
2256 .show = fib_trie_seq_show,
19baf839
RO
2257};
2258
cb7b593c 2259static int fib_trie_seq_open(struct inode *inode, struct file *file)
19baf839 2260{
1c340b2f
DL
2261 return seq_open_net(inode, file, &fib_trie_seq_ops,
2262 sizeof(struct fib_trie_iter));
19baf839
RO
2263}
2264
9a32144e 2265static const struct file_operations fib_trie_fops = {
cb7b593c
SH
2266 .owner = THIS_MODULE,
2267 .open = fib_trie_seq_open,
2268 .read = seq_read,
2269 .llseek = seq_lseek,
1c340b2f 2270 .release = seq_release_net,
19baf839
RO
2271};
2272
8315f5d8
SH
2273struct fib_route_iter {
2274 struct seq_net_private p;
2275 struct trie *main_trie;
2276 loff_t pos;
2277 t_key key;
2278};
2279
adaf9816 2280static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
8315f5d8 2281{
adaf9816 2282 struct tnode *l = NULL;
8315f5d8
SH
2283 struct trie *t = iter->main_trie;
2284
2285 /* use cache location of last found key */
2286 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2287 pos -= iter->pos;
2288 else {
2289 iter->pos = 0;
2290 l = trie_firstleaf(t);
2291 }
2292
2293 while (l && pos-- > 0) {
2294 iter->pos++;
2295 l = trie_nextleaf(l);
2296 }
2297
2298 if (l)
2299 iter->key = pos; /* remember it */
2300 else
2301 iter->pos = 0; /* forget it */
2302
2303 return l;
2304}
2305
2306static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2307 __acquires(RCU)
2308{
2309 struct fib_route_iter *iter = seq->private;
2310 struct fib_table *tb;
2311
2312 rcu_read_lock();
1218854a 2313 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
8315f5d8
SH
2314 if (!tb)
2315 return NULL;
2316
2317 iter->main_trie = (struct trie *) tb->tb_data;
2318 if (*pos == 0)
2319 return SEQ_START_TOKEN;
2320 else
2321 return fib_route_get_idx(iter, *pos - 1);
2322}
2323
2324static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2325{
2326 struct fib_route_iter *iter = seq->private;
adaf9816 2327 struct tnode *l = v;
8315f5d8
SH
2328
2329 ++*pos;
2330 if (v == SEQ_START_TOKEN) {
2331 iter->pos = 0;
2332 l = trie_firstleaf(iter->main_trie);
2333 } else {
2334 iter->pos++;
2335 l = trie_nextleaf(l);
2336 }
2337
2338 if (l)
2339 iter->key = l->key;
2340 else
2341 iter->pos = 0;
2342 return l;
2343}
2344
2345static void fib_route_seq_stop(struct seq_file *seq, void *v)
2346 __releases(RCU)
2347{
2348 rcu_read_unlock();
2349}
2350
a034ee3c 2351static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
19baf839 2352{
a034ee3c 2353 unsigned int flags = 0;
19baf839 2354
a034ee3c
ED
2355 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2356 flags = RTF_REJECT;
cb7b593c
SH
2357 if (fi && fi->fib_nh->nh_gw)
2358 flags |= RTF_GATEWAY;
32ab5f80 2359 if (mask == htonl(0xFFFFFFFF))
cb7b593c
SH
2360 flags |= RTF_HOST;
2361 flags |= RTF_UP;
2362 return flags;
19baf839
RO
2363}
2364
cb7b593c
SH
2365/*
2366 * This outputs /proc/net/route.
2367 * The format of the file is not supposed to be changed
a034ee3c 2368 * and needs to be same as fib_hash output to avoid breaking
cb7b593c
SH
2369 * legacy utilities
2370 */
2371static int fib_route_seq_show(struct seq_file *seq, void *v)
19baf839 2372{
adaf9816 2373 struct tnode *l = v;
1328042e 2374 struct leaf_info *li;
19baf839 2375
cb7b593c
SH
2376 if (v == SEQ_START_TOKEN) {
2377 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2378 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2379 "\tWindow\tIRTT");
2380 return 0;
2381 }
19baf839 2382
b67bfe0d 2383 hlist_for_each_entry_rcu(li, &l->list, hlist) {
cb7b593c 2384 struct fib_alias *fa;
32ab5f80 2385 __be32 mask, prefix;
91b9a277 2386
cb7b593c
SH
2387 mask = inet_make_mask(li->plen);
2388 prefix = htonl(l->key);
19baf839 2389
cb7b593c 2390 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1371e37d 2391 const struct fib_info *fi = fa->fa_info;
a034ee3c 2392 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
19baf839 2393
cb7b593c
SH
2394 if (fa->fa_type == RTN_BROADCAST
2395 || fa->fa_type == RTN_MULTICAST)
2396 continue;
19baf839 2397
652586df
TH
2398 seq_setwidth(seq, 127);
2399
cb7b593c 2400 if (fi)
5e659e4c
PE
2401 seq_printf(seq,
2402 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
652586df 2403 "%d\t%08X\t%d\t%u\t%u",
cb7b593c
SH
2404 fi->fib_dev ? fi->fib_dev->name : "*",
2405 prefix,
2406 fi->fib_nh->nh_gw, flags, 0, 0,
2407 fi->fib_priority,
2408 mask,
a07f5f50
SH
2409 (fi->fib_advmss ?
2410 fi->fib_advmss + 40 : 0),
cb7b593c 2411 fi->fib_window,
652586df 2412 fi->fib_rtt >> 3);
cb7b593c 2413 else
5e659e4c
PE
2414 seq_printf(seq,
2415 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
652586df 2416 "%d\t%08X\t%d\t%u\t%u",
cb7b593c 2417 prefix, 0, flags, 0, 0, 0,
652586df 2418 mask, 0, 0, 0);
19baf839 2419
652586df 2420 seq_pad(seq, '\n');
cb7b593c 2421 }
19baf839
RO
2422 }
2423
2424 return 0;
2425}
2426
f690808e 2427static const struct seq_operations fib_route_seq_ops = {
8315f5d8
SH
2428 .start = fib_route_seq_start,
2429 .next = fib_route_seq_next,
2430 .stop = fib_route_seq_stop,
cb7b593c 2431 .show = fib_route_seq_show,
19baf839
RO
2432};
2433
cb7b593c 2434static int fib_route_seq_open(struct inode *inode, struct file *file)
19baf839 2435{
1c340b2f 2436 return seq_open_net(inode, file, &fib_route_seq_ops,
8315f5d8 2437 sizeof(struct fib_route_iter));
19baf839
RO
2438}
2439
9a32144e 2440static const struct file_operations fib_route_fops = {
cb7b593c
SH
2441 .owner = THIS_MODULE,
2442 .open = fib_route_seq_open,
2443 .read = seq_read,
2444 .llseek = seq_lseek,
1c340b2f 2445 .release = seq_release_net,
19baf839
RO
2446};
2447
61a02653 2448int __net_init fib_proc_init(struct net *net)
19baf839 2449{
d4beaa66 2450 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
cb7b593c
SH
2451 goto out1;
2452
d4beaa66
G
2453 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2454 &fib_triestat_fops))
cb7b593c
SH
2455 goto out2;
2456
d4beaa66 2457 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
cb7b593c
SH
2458 goto out3;
2459
19baf839 2460 return 0;
cb7b593c
SH
2461
2462out3:
ece31ffd 2463 remove_proc_entry("fib_triestat", net->proc_net);
cb7b593c 2464out2:
ece31ffd 2465 remove_proc_entry("fib_trie", net->proc_net);
cb7b593c
SH
2466out1:
2467 return -ENOMEM;
19baf839
RO
2468}
2469
61a02653 2470void __net_exit fib_proc_exit(struct net *net)
19baf839 2471{
ece31ffd
G
2472 remove_proc_entry("fib_trie", net->proc_net);
2473 remove_proc_entry("fib_triestat", net->proc_net);
2474 remove_proc_entry("route", net->proc_net);
19baf839
RO
2475}
2476
2477#endif /* CONFIG_PROC_FS */
This page took 1.385727 seconds and 5 git commands to generate.