net: change ip_default_ttl documentation
[deliverable/linux.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
112#define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
1da177e4 119static int ip_rt_max_size;
817bc4db
SH
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
1080d709 132static int rt_chain_length_max __read_mostly = 20;
1da177e4 133
125bb8f5
ED
134static struct delayed_work expires_work;
135static unsigned long expires_ljiffies;
1da177e4
LT
136
137/*
138 * Interface to generic destination cache.
139 */
140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
143static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
144static void ipv4_link_failure(struct sk_buff *skb);
145static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 146static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 147
72cdd1d9
ED
148static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
149 int how)
150{
151}
1da177e4
LT
152
153static struct dst_ops ipv4_dst_ops = {
154 .family = AF_INET,
09640e63 155 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
156 .gc = rt_garbage_collect,
157 .check = ipv4_dst_check,
158 .destroy = ipv4_dst_destroy,
159 .ifdown = ipv4_dst_ifdown,
160 .negative_advice = ipv4_negative_advice,
161 .link_failure = ipv4_link_failure,
162 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 163 .local_out = __ip_local_out,
1da177e4
LT
164};
165
166#define ECN_OR_COST(class) TC_PRIO_##class
167
4839c52b 168const __u8 ip_tos2prio[16] = {
1da177e4
LT
169 TC_PRIO_BESTEFFORT,
170 ECN_OR_COST(FILLER),
171 TC_PRIO_BESTEFFORT,
172 ECN_OR_COST(BESTEFFORT),
173 TC_PRIO_BULK,
174 ECN_OR_COST(BULK),
175 TC_PRIO_BULK,
176 ECN_OR_COST(BULK),
177 TC_PRIO_INTERACTIVE,
178 ECN_OR_COST(INTERACTIVE),
179 TC_PRIO_INTERACTIVE,
180 ECN_OR_COST(INTERACTIVE),
181 TC_PRIO_INTERACTIVE_BULK,
182 ECN_OR_COST(INTERACTIVE_BULK),
183 TC_PRIO_INTERACTIVE_BULK,
184 ECN_OR_COST(INTERACTIVE_BULK)
185};
186
187
188/*
189 * Route cache.
190 */
191
192/* The locking scheme is rather straight forward:
193 *
194 * 1) Read-Copy Update protects the buckets of the central route hash.
195 * 2) Only writers remove entries, and they hold the lock
196 * as they look at rtable reference counts.
197 * 3) Only readers acquire references to rtable entries,
198 * they do so with atomic increments and with the
199 * lock held.
200 */
201
202struct rt_hash_bucket {
1c31720a 203 struct rtable __rcu *chain;
22c047cc 204};
1080d709 205
8a25d5de
IM
206#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
207 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
208/*
209 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
210 * The size of this table is a power of two and depends on the number of CPUS.
62051200 211 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 212 */
62051200
IM
213#ifdef CONFIG_LOCKDEP
214# define RT_HASH_LOCK_SZ 256
22c047cc 215#else
62051200
IM
216# if NR_CPUS >= 32
217# define RT_HASH_LOCK_SZ 4096
218# elif NR_CPUS >= 16
219# define RT_HASH_LOCK_SZ 2048
220# elif NR_CPUS >= 8
221# define RT_HASH_LOCK_SZ 1024
222# elif NR_CPUS >= 4
223# define RT_HASH_LOCK_SZ 512
224# else
225# define RT_HASH_LOCK_SZ 256
226# endif
22c047cc
ED
227#endif
228
229static spinlock_t *rt_hash_locks;
230# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
231
232static __init void rt_hash_lock_init(void)
233{
234 int i;
235
236 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
237 GFP_KERNEL);
238 if (!rt_hash_locks)
239 panic("IP: failed to allocate rt_hash_locks\n");
240
241 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
242 spin_lock_init(&rt_hash_locks[i]);
243}
22c047cc
ED
244#else
245# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
246
247static inline void rt_hash_lock_init(void)
248{
249}
22c047cc 250#endif
1da177e4 251
817bc4db
SH
252static struct rt_hash_bucket *rt_hash_table __read_mostly;
253static unsigned rt_hash_mask __read_mostly;
254static unsigned int rt_hash_log __read_mostly;
1da177e4 255
2f970d83 256static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 257#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 258
b00180de 259static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 260 int genid)
1da177e4 261{
0eae88f3 262 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 263 idx, genid)
29e75252 264 & rt_hash_mask;
1da177e4
LT
265}
266
e84f84f2
DL
267static inline int rt_genid(struct net *net)
268{
269 return atomic_read(&net->ipv4.rt_genid);
270}
271
1da177e4
LT
272#ifdef CONFIG_PROC_FS
273struct rt_cache_iter_state {
a75e936f 274 struct seq_net_private p;
1da177e4 275 int bucket;
29e75252 276 int genid;
1da177e4
LT
277};
278
1218854a 279static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 280{
1218854a 281 struct rt_cache_iter_state *st = seq->private;
1da177e4 282 struct rtable *r = NULL;
1da177e4
LT
283
284 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 285 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 286 continue;
1da177e4 287 rcu_read_lock_bh();
a898def2 288 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 289 while (r) {
d8d1f30b 290 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 291 r->rt_genid == st->genid)
29e75252 292 return r;
d8d1f30b 293 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 294 }
1da177e4
LT
295 rcu_read_unlock_bh();
296 }
29e75252 297 return r;
1da177e4
LT
298}
299
1218854a 300static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 301 struct rtable *r)
1da177e4 302{
1218854a 303 struct rt_cache_iter_state *st = seq->private;
a6272665 304
1c31720a 305 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
306 while (!r) {
307 rcu_read_unlock_bh();
a6272665
ED
308 do {
309 if (--st->bucket < 0)
310 return NULL;
1c31720a 311 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 312 rcu_read_lock_bh();
1c31720a 313 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 314 }
1c31720a 315 return r;
1da177e4
LT
316}
317
1218854a 318static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
319 struct rtable *r)
320{
1218854a
YH
321 struct rt_cache_iter_state *st = seq->private;
322 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 323 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 324 continue;
642d6318
DL
325 if (r->rt_genid == st->genid)
326 break;
327 }
328 return r;
329}
330
1218854a 331static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 332{
1218854a 333 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
334
335 if (r)
1218854a 336 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
337 --pos;
338 return pos ? NULL : r;
339}
340
341static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
342{
29e75252 343 struct rt_cache_iter_state *st = seq->private;
29e75252 344 if (*pos)
1218854a 345 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 346 st->genid = rt_genid(seq_file_net(seq));
29e75252 347 return SEQ_START_TOKEN;
1da177e4
LT
348}
349
350static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
351{
29e75252 352 struct rtable *r;
1da177e4
LT
353
354 if (v == SEQ_START_TOKEN)
1218854a 355 r = rt_cache_get_first(seq);
1da177e4 356 else
1218854a 357 r = rt_cache_get_next(seq, v);
1da177e4
LT
358 ++*pos;
359 return r;
360}
361
362static void rt_cache_seq_stop(struct seq_file *seq, void *v)
363{
364 if (v && v != SEQ_START_TOKEN)
365 rcu_read_unlock_bh();
366}
367
368static int rt_cache_seq_show(struct seq_file *seq, void *v)
369{
370 if (v == SEQ_START_TOKEN)
371 seq_printf(seq, "%-127s\n",
372 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
373 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
374 "HHUptod\tSpecDst");
375 else {
376 struct rtable *r = v;
5e659e4c 377 int len;
1da177e4 378
0eae88f3
ED
379 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 381 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
382 (__force u32)r->rt_dst,
383 (__force u32)r->rt_gateway,
d8d1f30b
CG
384 r->rt_flags, atomic_read(&r->dst.__refcnt),
385 r->dst.__use, 0, (__force u32)r->rt_src,
386 (dst_metric(&r->dst, RTAX_ADVMSS) ?
387 (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
388 dst_metric(&r->dst, RTAX_WINDOW),
389 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
390 dst_metric(&r->dst, RTAX_RTTVAR)),
1da177e4 391 r->fl.fl4_tos,
d8d1f30b
CG
392 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
393 r->dst.hh ? (r->dst.hh->hh_output ==
1da177e4 394 dev_queue_xmit) : 0,
5e659e4c
PE
395 r->rt_spec_dst, &len);
396
397 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
398 }
399 return 0;
1da177e4
LT
400}
401
f690808e 402static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
403 .start = rt_cache_seq_start,
404 .next = rt_cache_seq_next,
405 .stop = rt_cache_seq_stop,
406 .show = rt_cache_seq_show,
407};
408
409static int rt_cache_seq_open(struct inode *inode, struct file *file)
410{
a75e936f 411 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 412 sizeof(struct rt_cache_iter_state));
1da177e4
LT
413}
414
9a32144e 415static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
416 .owner = THIS_MODULE,
417 .open = rt_cache_seq_open,
418 .read = seq_read,
419 .llseek = seq_lseek,
a75e936f 420 .release = seq_release_net,
1da177e4
LT
421};
422
423
424static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
425{
426 int cpu;
427
428 if (*pos == 0)
429 return SEQ_START_TOKEN;
430
0f23174a 431 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
432 if (!cpu_possible(cpu))
433 continue;
434 *pos = cpu+1;
2f970d83 435 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
436 }
437 return NULL;
438}
439
440static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
441{
442 int cpu;
443
0f23174a 444 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
445 if (!cpu_possible(cpu))
446 continue;
447 *pos = cpu+1;
2f970d83 448 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
449 }
450 return NULL;
e905a9ed 451
1da177e4
LT
452}
453
454static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
455{
456
457}
458
459static int rt_cpu_seq_show(struct seq_file *seq, void *v)
460{
461 struct rt_cache_stat *st = v;
462
463 if (v == SEQ_START_TOKEN) {
5bec0039 464 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
465 return 0;
466 }
e905a9ed 467
1da177e4
LT
468 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
469 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 470 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
471 st->in_hit,
472 st->in_slow_tot,
473 st->in_slow_mc,
474 st->in_no_route,
475 st->in_brd,
476 st->in_martian_dst,
477 st->in_martian_src,
478
479 st->out_hit,
480 st->out_slow_tot,
e905a9ed 481 st->out_slow_mc,
1da177e4
LT
482
483 st->gc_total,
484 st->gc_ignored,
485 st->gc_goal_miss,
486 st->gc_dst_overflow,
487 st->in_hlist_search,
488 st->out_hlist_search
489 );
490 return 0;
491}
492
f690808e 493static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
494 .start = rt_cpu_seq_start,
495 .next = rt_cpu_seq_next,
496 .stop = rt_cpu_seq_stop,
497 .show = rt_cpu_seq_show,
498};
499
500
501static int rt_cpu_seq_open(struct inode *inode, struct file *file)
502{
503 return seq_open(file, &rt_cpu_seq_ops);
504}
505
9a32144e 506static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
507 .owner = THIS_MODULE,
508 .open = rt_cpu_seq_open,
509 .read = seq_read,
510 .llseek = seq_lseek,
511 .release = seq_release,
512};
513
78c686e9 514#ifdef CONFIG_NET_CLS_ROUTE
a661c419 515static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 516{
a661c419
AD
517 struct ip_rt_acct *dst, *src;
518 unsigned int i, j;
519
520 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
521 if (!dst)
522 return -ENOMEM;
523
524 for_each_possible_cpu(i) {
525 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
526 for (j = 0; j < 256; j++) {
527 dst[j].o_bytes += src[j].o_bytes;
528 dst[j].o_packets += src[j].o_packets;
529 dst[j].i_bytes += src[j].i_bytes;
530 dst[j].i_packets += src[j].i_packets;
531 }
78c686e9
PE
532 }
533
a661c419
AD
534 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
535 kfree(dst);
536 return 0;
537}
78c686e9 538
a661c419
AD
539static int rt_acct_proc_open(struct inode *inode, struct file *file)
540{
541 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 542}
a661c419
AD
543
544static const struct file_operations rt_acct_proc_fops = {
545 .owner = THIS_MODULE,
546 .open = rt_acct_proc_open,
547 .read = seq_read,
548 .llseek = seq_lseek,
549 .release = single_release,
550};
78c686e9 551#endif
107f1634 552
73b38711 553static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
554{
555 struct proc_dir_entry *pde;
556
557 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
558 &rt_cache_seq_fops);
559 if (!pde)
560 goto err1;
561
77020720
WC
562 pde = proc_create("rt_cache", S_IRUGO,
563 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
564 if (!pde)
565 goto err2;
566
107f1634 567#ifdef CONFIG_NET_CLS_ROUTE
a661c419 568 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
569 if (!pde)
570 goto err3;
571#endif
572 return 0;
573
574#ifdef CONFIG_NET_CLS_ROUTE
575err3:
576 remove_proc_entry("rt_cache", net->proc_net_stat);
577#endif
578err2:
579 remove_proc_entry("rt_cache", net->proc_net);
580err1:
581 return -ENOMEM;
582}
73b38711
DL
583
584static void __net_exit ip_rt_do_proc_exit(struct net *net)
585{
586 remove_proc_entry("rt_cache", net->proc_net_stat);
587 remove_proc_entry("rt_cache", net->proc_net);
0a931acf 588#ifdef CONFIG_NET_CLS_ROUTE
73b38711 589 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 590#endif
73b38711
DL
591}
592
593static struct pernet_operations ip_rt_proc_ops __net_initdata = {
594 .init = ip_rt_do_proc_init,
595 .exit = ip_rt_do_proc_exit,
596};
597
598static int __init ip_rt_proc_init(void)
599{
600 return register_pernet_subsys(&ip_rt_proc_ops);
601}
602
107f1634 603#else
73b38711 604static inline int ip_rt_proc_init(void)
107f1634
PE
605{
606 return 0;
607}
1da177e4 608#endif /* CONFIG_PROC_FS */
e905a9ed 609
5969f71d 610static inline void rt_free(struct rtable *rt)
1da177e4 611{
d8d1f30b 612 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
613}
614
5969f71d 615static inline void rt_drop(struct rtable *rt)
1da177e4 616{
1da177e4 617 ip_rt_put(rt);
d8d1f30b 618 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
619}
620
5969f71d 621static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
622{
623 /* Kill broadcast/multicast entries very aggresively, if they
624 collide in hash table with more useful entries */
625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 626 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
627}
628
5969f71d 629static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
630{
631 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
d8d1f30b 632 rth->dst.expires;
1da177e4
LT
633}
634
635static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
636{
637 unsigned long age;
638 int ret = 0;
639
d8d1f30b 640 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
641 goto out;
642
643 ret = 1;
d8d1f30b
CG
644 if (rth->dst.expires &&
645 time_after_eq(jiffies, rth->dst.expires))
1da177e4
LT
646 goto out;
647
d8d1f30b 648 age = jiffies - rth->dst.lastuse;
1da177e4
LT
649 ret = 0;
650 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
651 (age <= tmo2 && rt_valuable(rth)))
652 goto out;
653 ret = 1;
654out: return ret;
655}
656
657/* Bits of score are:
658 * 31: very valuable
659 * 30: not quite useless
660 * 29..0: usage counter
661 */
662static inline u32 rt_score(struct rtable *rt)
663{
d8d1f30b 664 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
665
666 score = ~score & ~(3<<30);
667
668 if (rt_valuable(rt))
669 score |= (1<<31);
670
c7537967 671 if (rt_is_output_route(rt) ||
1da177e4
LT
672 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
673 score |= (1<<30);
674
675 return score;
676}
677
1080d709
NH
678static inline bool rt_caching(const struct net *net)
679{
680 return net->ipv4.current_rt_cache_rebuild_count <=
681 net->ipv4.sysctl_rt_cache_rebuild_count;
682}
683
684static inline bool compare_hash_inputs(const struct flowi *fl1,
685 const struct flowi *fl2)
686{
5811662b
CG
687 return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
688 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
1080d709
NH
689 (fl1->iif ^ fl2->iif)) == 0);
690}
691
1da177e4
LT
692static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
693{
5811662b
CG
694 return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
695 ((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
47dcf0cb 696 (fl1->mark ^ fl2->mark) |
5811662b 697 (*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
8238b218
DM
698 (fl1->oif ^ fl2->oif) |
699 (fl1->iif ^ fl2->iif)) == 0;
1da177e4
LT
700}
701
b5921910
DL
702static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
703{
d8d1f30b 704 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
705}
706
e84f84f2
DL
707static inline int rt_is_expired(struct rtable *rth)
708{
d8d1f30b 709 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
710}
711
beb659bd
ED
712/*
713 * Perform a full scan of hash table and free all entries.
714 * Can be called by a softirq or a process.
715 * In the later case, we want to be reschedule if necessary
716 */
717static void rt_do_flush(int process_context)
718{
719 unsigned int i;
720 struct rtable *rth, *next;
32cb5b4e 721 struct rtable * tail;
beb659bd
ED
722
723 for (i = 0; i <= rt_hash_mask; i++) {
724 if (process_context && need_resched())
725 cond_resched();
1c31720a 726 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
727 if (!rth)
728 continue;
729
730 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e
DL
731#ifdef CONFIG_NET_NS
732 {
1c31720a
ED
733 struct rtable __rcu **prev;
734 struct rtable *p;
32cb5b4e 735
1c31720a
ED
736 rth = rcu_dereference_protected(rt_hash_table[i].chain,
737 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e
DL
738
739 /* defer releasing the head of the list after spin_unlock */
1c31720a
ED
740 for (tail = rth; tail;
741 tail = rcu_dereference_protected(tail->dst.rt_next,
742 lockdep_is_held(rt_hash_lock_addr(i))))
32cb5b4e
DL
743 if (!rt_is_expired(tail))
744 break;
745 if (rth != tail)
746 rt_hash_table[i].chain = tail;
747
748 /* call rt_free on entries after the tail requiring flush */
749 prev = &rt_hash_table[i].chain;
1c31720a
ED
750 for (p = rcu_dereference_protected(*prev,
751 lockdep_is_held(rt_hash_lock_addr(i)));
752 p != NULL;
753 p = next) {
754 next = rcu_dereference_protected(p->dst.rt_next,
755 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 756 if (!rt_is_expired(p)) {
d8d1f30b 757 prev = &p->dst.rt_next;
32cb5b4e
DL
758 } else {
759 *prev = next;
760 rt_free(p);
761 }
762 }
763 }
764#else
1c31720a
ED
765 rth = rcu_dereference_protected(rt_hash_table[i].chain,
766 lockdep_is_held(rt_hash_lock_addr(i)));
767 rcu_assign_pointer(rt_hash_table[i].chain, NULL);
32cb5b4e
DL
768 tail = NULL;
769#endif
beb659bd
ED
770 spin_unlock_bh(rt_hash_lock_addr(i));
771
32cb5b4e 772 for (; rth != tail; rth = next) {
1c31720a 773 next = rcu_dereference_protected(rth->dst.rt_next, 1);
beb659bd
ED
774 rt_free(rth);
775 }
776 }
777}
778
1080d709
NH
779/*
780 * While freeing expired entries, we compute average chain length
781 * and standard deviation, using fixed-point arithmetic.
782 * This to have an estimation of rt_chain_length_max
783 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
784 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
785 */
786
787#define FRACT_BITS 3
788#define ONE (1UL << FRACT_BITS)
789
98376387
ED
790/*
791 * Given a hash chain and an item in this hash chain,
792 * find if a previous entry has the same hash_inputs
793 * (but differs on tos, mark or oif)
794 * Returns 0 if an alias is found.
795 * Returns ONE if rth has no alias before itself.
796 */
797static int has_noalias(const struct rtable *head, const struct rtable *rth)
798{
799 const struct rtable *aux = head;
800
801 while (aux != rth) {
802 if (compare_hash_inputs(&aux->fl, &rth->fl))
803 return 0;
1c31720a 804 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
805 }
806 return ONE;
807}
808
beb659bd 809static void rt_check_expire(void)
1da177e4 810{
bb1d23b0
ED
811 static unsigned int rover;
812 unsigned int i = rover, goal;
1c31720a
ED
813 struct rtable *rth;
814 struct rtable __rcu **rthp;
cf8da764 815 unsigned long samples = 0;
1080d709 816 unsigned long sum = 0, sum2 = 0;
125bb8f5 817 unsigned long delta;
bb1d23b0
ED
818 u64 mult;
819
125bb8f5
ED
820 delta = jiffies - expires_ljiffies;
821 expires_ljiffies = jiffies;
822 mult = ((u64)delta) << rt_hash_log;
bb1d23b0
ED
823 if (ip_rt_gc_timeout > 1)
824 do_div(mult, ip_rt_gc_timeout);
825 goal = (unsigned int)mult;
39c90ece
ED
826 if (goal > rt_hash_mask)
827 goal = rt_hash_mask + 1;
bb1d23b0 828 for (; goal > 0; goal--) {
1da177e4 829 unsigned long tmo = ip_rt_gc_timeout;
cf8da764 830 unsigned long length;
1da177e4
LT
831
832 i = (i + 1) & rt_hash_mask;
833 rthp = &rt_hash_table[i].chain;
834
d90bf5a9
ED
835 if (need_resched())
836 cond_resched();
837
1080d709
NH
838 samples++;
839
1c31720a 840 if (rcu_dereference_raw(*rthp) == NULL)
bb1d23b0 841 continue;
cf8da764 842 length = 0;
39c90ece 843 spin_lock_bh(rt_hash_lock_addr(i));
1c31720a
ED
844 while ((rth = rcu_dereference_protected(*rthp,
845 lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
d8d1f30b 846 prefetch(rth->dst.rt_next);
e84f84f2 847 if (rt_is_expired(rth)) {
d8d1f30b 848 *rthp = rth->dst.rt_next;
29e75252
ED
849 rt_free(rth);
850 continue;
851 }
d8d1f30b 852 if (rth->dst.expires) {
1da177e4 853 /* Entry is expired even if it is in use */
d8d1f30b 854 if (time_before_eq(jiffies, rth->dst.expires)) {
1ddbcb00 855nofree:
1da177e4 856 tmo >>= 1;
d8d1f30b 857 rthp = &rth->dst.rt_next;
1080d709 858 /*
1ddbcb00 859 * We only count entries on
1080d709
NH
860 * a chain with equal hash inputs once
861 * so that entries for different QOS
862 * levels, and other non-hash input
863 * attributes don't unfairly skew
864 * the length computation
865 */
98376387 866 length += has_noalias(rt_hash_table[i].chain, rth);
1da177e4
LT
867 continue;
868 }
1ddbcb00
ED
869 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
870 goto nofree;
1da177e4
LT
871
872 /* Cleanup aged off entries. */
d8d1f30b 873 *rthp = rth->dst.rt_next;
e905a9ed 874 rt_free(rth);
1da177e4 875 }
39c90ece 876 spin_unlock_bh(rt_hash_lock_addr(i));
1080d709
NH
877 sum += length;
878 sum2 += length*length;
879 }
880 if (samples) {
881 unsigned long avg = sum / samples;
882 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
883 rt_chain_length_max = max_t(unsigned long,
884 ip_rt_gc_elasticity,
885 (avg + 4*sd) >> FRACT_BITS);
1da177e4
LT
886 }
887 rover = i;
beb659bd
ED
888}
889
890/*
891 * rt_worker_func() is run in process context.
29e75252 892 * we call rt_check_expire() to scan part of the hash table
beb659bd
ED
893 */
894static void rt_worker_func(struct work_struct *work)
895{
29e75252 896 rt_check_expire();
39c90ece 897 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
1da177e4
LT
898}
899
29e75252
ED
900/*
901 * Pertubation of rt_genid by a small quantity [1..256]
902 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
903 * many times (2^24) without giving recent rt_genid.
904 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 905 */
86c657f6 906static void rt_cache_invalidate(struct net *net)
1da177e4 907{
29e75252 908 unsigned char shuffle;
1da177e4 909
29e75252 910 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 911 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
912}
913
29e75252
ED
914/*
915 * delay < 0 : invalidate cache (fast : entries will be deleted later)
916 * delay >= 0 : invalidate & flush cache (can be long)
917 */
76e6ebfb 918void rt_cache_flush(struct net *net, int delay)
1da177e4 919{
86c657f6 920 rt_cache_invalidate(net);
29e75252
ED
921 if (delay >= 0)
922 rt_do_flush(!in_softirq());
1da177e4
LT
923}
924
a5ee1551
EB
925/* Flush previous cache invalidated entries from the cache */
926void rt_cache_flush_batch(void)
927{
928 rt_do_flush(!in_softirq());
929}
930
1080d709
NH
931static void rt_emergency_hash_rebuild(struct net *net)
932{
3ee94372 933 if (net_ratelimit())
1080d709 934 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 935 rt_cache_invalidate(net);
1080d709
NH
936}
937
1da177e4
LT
938/*
939 Short description of GC goals.
940
941 We want to build algorithm, which will keep routing cache
942 at some equilibrium point, when number of aged off entries
943 is kept approximately equal to newly generated ones.
944
945 Current expiration strength is variable "expire".
946 We try to adjust it dynamically, so that if networking
947 is idle expires is large enough to keep enough of warm entries,
948 and when load increases it reduces to limit cache size.
949 */
950
569d3645 951static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
952{
953 static unsigned long expire = RT_GC_TIMEOUT;
954 static unsigned long last_gc;
955 static int rover;
956 static int equilibrium;
1c31720a
ED
957 struct rtable *rth;
958 struct rtable __rcu **rthp;
1da177e4
LT
959 unsigned long now = jiffies;
960 int goal;
fc66f95c 961 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
962
963 /*
964 * Garbage collection is pretty expensive,
965 * do not make it too frequently.
966 */
967
968 RT_CACHE_STAT_INC(gc_total);
969
970 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 971 entries < ip_rt_max_size) {
1da177e4
LT
972 RT_CACHE_STAT_INC(gc_ignored);
973 goto out;
974 }
975
fc66f95c 976 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 977 /* Calculate number of entries, which we want to expire now. */
fc66f95c 978 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
979 if (goal <= 0) {
980 if (equilibrium < ipv4_dst_ops.gc_thresh)
981 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 982 goal = entries - equilibrium;
1da177e4 983 if (goal > 0) {
b790cedd 984 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 985 goal = entries - equilibrium;
1da177e4
LT
986 }
987 } else {
988 /* We are in dangerous area. Try to reduce cache really
989 * aggressively.
990 */
b790cedd 991 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 992 equilibrium = entries - goal;
1da177e4
LT
993 }
994
995 if (now - last_gc >= ip_rt_gc_min_interval)
996 last_gc = now;
997
998 if (goal <= 0) {
999 equilibrium += goal;
1000 goto work_done;
1001 }
1002
1003 do {
1004 int i, k;
1005
1006 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1007 unsigned long tmo = expire;
1008
1009 k = (k + 1) & rt_hash_mask;
1010 rthp = &rt_hash_table[k].chain;
22c047cc 1011 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
1012 while ((rth = rcu_dereference_protected(*rthp,
1013 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 1014 if (!rt_is_expired(rth) &&
29e75252 1015 !rt_may_expire(rth, tmo, expire)) {
1da177e4 1016 tmo >>= 1;
d8d1f30b 1017 rthp = &rth->dst.rt_next;
1da177e4
LT
1018 continue;
1019 }
d8d1f30b 1020 *rthp = rth->dst.rt_next;
1da177e4
LT
1021 rt_free(rth);
1022 goal--;
1da177e4 1023 }
22c047cc 1024 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
1025 if (goal <= 0)
1026 break;
1027 }
1028 rover = k;
1029
1030 if (goal <= 0)
1031 goto work_done;
1032
1033 /* Goal is not achieved. We stop process if:
1034
1035 - if expire reduced to zero. Otherwise, expire is halfed.
1036 - if table is not full.
1037 - if we are called from interrupt.
1038 - jiffies check is just fallback/debug loop breaker.
1039 We will not spin here for long time in any case.
1040 */
1041
1042 RT_CACHE_STAT_INC(gc_goal_miss);
1043
1044 if (expire == 0)
1045 break;
1046
1047 expire >>= 1;
1048#if RT_CACHE_DEBUG >= 2
1049 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
fc66f95c 1050 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1da177e4
LT
1051#endif
1052
fc66f95c 1053 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
1054 goto out;
1055 } while (!in_softirq() && time_before_eq(jiffies, now));
1056
fc66f95c
ED
1057 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1058 goto out;
1059 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
1060 goto out;
1061 if (net_ratelimit())
1062 printk(KERN_WARNING "dst cache overflow\n");
1063 RT_CACHE_STAT_INC(gc_dst_overflow);
1064 return 1;
1065
1066work_done:
1067 expire += ip_rt_gc_min_interval;
1068 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
1069 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
1070 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4
LT
1071 expire = ip_rt_gc_timeout;
1072#if RT_CACHE_DEBUG >= 2
1073 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
fc66f95c 1074 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1da177e4
LT
1075#endif
1076out: return 0;
1077}
1078
98376387
ED
1079/*
1080 * Returns number of entries in a hash chain that have different hash_inputs
1081 */
1082static int slow_chain_length(const struct rtable *head)
1083{
1084 int length = 0;
1085 const struct rtable *rth = head;
1086
1087 while (rth) {
1088 length += has_noalias(head, rth);
1c31720a 1089 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1090 }
1091 return length >> FRACT_BITS;
1092}
1093
511c3f92 1094static int rt_intern_hash(unsigned hash, struct rtable *rt,
6a2bad70 1095 struct rtable **rp, struct sk_buff *skb, int ifindex)
1da177e4 1096{
1c31720a
ED
1097 struct rtable *rth, *cand;
1098 struct rtable __rcu **rthp, **candp;
1da177e4 1099 unsigned long now;
1da177e4
LT
1100 u32 min_score;
1101 int chain_length;
1102 int attempts = !in_softirq();
1103
1104restart:
1105 chain_length = 0;
1106 min_score = ~(u32)0;
1107 cand = NULL;
1108 candp = NULL;
1109 now = jiffies;
1110
d8d1f30b 1111 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1112 /*
1113 * If we're not caching, just tell the caller we
1114 * were successful and don't touch the route. The
1115 * caller hold the sole reference to the cache entry, and
1116 * it will be released when the caller is done with it.
1117 * If we drop it here, the callers have no way to resolve routes
1118 * when we're not caching. Instead, just point *rp at rt, so
1119 * the caller gets a single use out of the route
b6280b47
NH
1120 * Note that we do rt_free on this new route entry, so that
1121 * once its refcount hits zero, we are still able to reap it
1122 * (Thanks Alexey)
27b75c95
ED
1123 * Note: To avoid expensive rcu stuff for this uncached dst,
1124 * we set DST_NOCACHE so that dst_release() can free dst without
1125 * waiting a grace period.
73e42897 1126 */
b6280b47 1127
c7d4426a 1128 rt->dst.flags |= DST_NOCACHE;
c7537967 1129 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1130 int err = arp_bind_neighbour(&rt->dst);
b6280b47
NH
1131 if (err) {
1132 if (net_ratelimit())
1133 printk(KERN_WARNING
1134 "Neighbour table failure & not caching routes.\n");
27b75c95 1135 ip_rt_put(rt);
b6280b47
NH
1136 return err;
1137 }
1138 }
1139
b6280b47 1140 goto skip_hashing;
1080d709
NH
1141 }
1142
1da177e4
LT
1143 rthp = &rt_hash_table[hash].chain;
1144
22c047cc 1145 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1146 while ((rth = rcu_dereference_protected(*rthp,
1147 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1148 if (rt_is_expired(rth)) {
d8d1f30b 1149 *rthp = rth->dst.rt_next;
29e75252
ED
1150 rt_free(rth);
1151 continue;
1152 }
b5921910 1153 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1da177e4 1154 /* Put it first */
d8d1f30b 1155 *rthp = rth->dst.rt_next;
1da177e4
LT
1156 /*
1157 * Since lookup is lockfree, the deletion
1158 * must be visible to another weakly ordered CPU before
1159 * the insertion at the start of the hash chain.
1160 */
d8d1f30b 1161 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1162 rt_hash_table[hash].chain);
1163 /*
1164 * Since lookup is lockfree, the update writes
1165 * must be ordered for consistency on SMP.
1166 */
1167 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1168
d8d1f30b 1169 dst_use(&rth->dst, now);
22c047cc 1170 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1171
1172 rt_drop(rt);
511c3f92
ED
1173 if (rp)
1174 *rp = rth;
1175 else
d8d1f30b 1176 skb_dst_set(skb, &rth->dst);
1da177e4
LT
1177 return 0;
1178 }
1179
d8d1f30b 1180 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1181 u32 score = rt_score(rth);
1182
1183 if (score <= min_score) {
1184 cand = rth;
1185 candp = rthp;
1186 min_score = score;
1187 }
1188 }
1189
1190 chain_length++;
1191
d8d1f30b 1192 rthp = &rth->dst.rt_next;
1da177e4
LT
1193 }
1194
1195 if (cand) {
1196 /* ip_rt_gc_elasticity used to be average length of chain
1197 * length, when exceeded gc becomes really aggressive.
1198 *
1199 * The second limit is less certain. At the moment it allows
1200 * only 2 entries per bucket. We will see.
1201 */
1202 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1203 *candp = cand->dst.rt_next;
1da177e4
LT
1204 rt_free(cand);
1205 }
1080d709 1206 } else {
98376387
ED
1207 if (chain_length > rt_chain_length_max &&
1208 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1209 struct net *net = dev_net(rt->dst.dev);
1080d709 1210 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1211 if (!rt_caching(net)) {
1080d709 1212 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1213 rt->dst.dev->name, num);
1080d709 1214 }
b35ecb5d 1215 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1216 spin_unlock_bh(rt_hash_lock_addr(hash));
1217
1218 hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1219 ifindex, rt_genid(net));
1220 goto restart;
1080d709 1221 }
1da177e4
LT
1222 }
1223
1224 /* Try to bind route to arp only if it is output
1225 route or unicast forwarding path.
1226 */
c7537967 1227 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1228 int err = arp_bind_neighbour(&rt->dst);
1da177e4 1229 if (err) {
22c047cc 1230 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1231
1232 if (err != -ENOBUFS) {
1233 rt_drop(rt);
1234 return err;
1235 }
1236
1237 /* Neighbour tables are full and nothing
1238 can be released. Try to shrink route cache,
1239 it is most likely it holds some neighbour records.
1240 */
1241 if (attempts-- > 0) {
1242 int saved_elasticity = ip_rt_gc_elasticity;
1243 int saved_int = ip_rt_gc_min_interval;
1244 ip_rt_gc_elasticity = 1;
1245 ip_rt_gc_min_interval = 0;
569d3645 1246 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1247 ip_rt_gc_min_interval = saved_int;
1248 ip_rt_gc_elasticity = saved_elasticity;
1249 goto restart;
1250 }
1251
1252 if (net_ratelimit())
7e1b33e5 1253 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4
LT
1254 rt_drop(rt);
1255 return -ENOBUFS;
1256 }
1257 }
1258
d8d1f30b 1259 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1260
1da177e4 1261#if RT_CACHE_DEBUG >= 2
d8d1f30b 1262 if (rt->dst.rt_next) {
1da177e4 1263 struct rtable *trt;
b6280b47
NH
1264 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1265 hash, &rt->rt_dst);
d8d1f30b 1266 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
673d57e7 1267 printk(" . %pI4", &trt->rt_dst);
1da177e4
LT
1268 printk("\n");
1269 }
1270#endif
00269b54
ED
1271 /*
1272 * Since lookup is lockfree, we must make sure
1273 * previous writes to rt are comitted to memory
1274 * before making rt visible to other CPUS.
1275 */
1ddbcb00 1276 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1277
22c047cc 1278 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1279
b6280b47 1280skip_hashing:
511c3f92
ED
1281 if (rp)
1282 *rp = rt;
1283 else
d8d1f30b 1284 skb_dst_set(skb, &rt->dst);
1da177e4
LT
1285 return 0;
1286}
1287
1288void rt_bind_peer(struct rtable *rt, int create)
1289{
1da177e4
LT
1290 struct inet_peer *peer;
1291
b534ecf1 1292 peer = inet_getpeer_v4(rt->rt_dst, create);
1da177e4 1293
49e8ab03 1294 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4
LT
1295 inet_putpeer(peer);
1296}
1297
1298/*
1299 * Peer allocation may fail only in serious out-of-memory conditions. However
1300 * we still can generate some output.
1301 * Random ID selection looks a bit dangerous because we have no chances to
1302 * select ID being unique in a reasonable period of time.
1303 * But broken packet identifier may be better than no packet at all.
1304 */
1305static void ip_select_fb_ident(struct iphdr *iph)
1306{
1307 static DEFINE_SPINLOCK(ip_fb_id_lock);
1308 static u32 ip_fallback_id;
1309 u32 salt;
1310
1311 spin_lock_bh(&ip_fb_id_lock);
e448515c 1312 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1313 iph->id = htons(salt & 0xFFFF);
1314 ip_fallback_id = salt;
1315 spin_unlock_bh(&ip_fb_id_lock);
1316}
1317
1318void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1319{
1320 struct rtable *rt = (struct rtable *) dst;
1321
1322 if (rt) {
1323 if (rt->peer == NULL)
1324 rt_bind_peer(rt, 1);
1325
1326 /* If peer is attached to destination, it is never detached,
1327 so that we need not to grab a lock to dereference it.
1328 */
1329 if (rt->peer) {
1330 iph->id = htons(inet_getid(rt->peer, more));
1331 return;
1332 }
1333 } else
e905a9ed 1334 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1335 __builtin_return_address(0));
1da177e4
LT
1336
1337 ip_select_fb_ident(iph);
1338}
4bc2f18b 1339EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1340
1341static void rt_del(unsigned hash, struct rtable *rt)
1342{
1c31720a
ED
1343 struct rtable __rcu **rthp;
1344 struct rtable *aux;
1da177e4 1345
29e75252 1346 rthp = &rt_hash_table[hash].chain;
22c047cc 1347 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1348 ip_rt_put(rt);
1c31720a
ED
1349 while ((aux = rcu_dereference_protected(*rthp,
1350 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1351 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1352 *rthp = aux->dst.rt_next;
29e75252
ED
1353 rt_free(aux);
1354 continue;
1da177e4 1355 }
d8d1f30b 1356 rthp = &aux->dst.rt_next;
29e75252 1357 }
22c047cc 1358 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1359}
1360
ed7865a4 1361/* called in rcu_read_lock() section */
f7655229
AV
1362void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1363 __be32 saddr, struct net_device *dev)
1da177e4
LT
1364{
1365 int i, k;
ed7865a4 1366 struct in_device *in_dev = __in_dev_get_rcu(dev);
1c31720a
ED
1367 struct rtable *rth;
1368 struct rtable __rcu **rthp;
f7655229 1369 __be32 skeys[2] = { saddr, 0 };
1da177e4 1370 int ikeys[2] = { dev->ifindex, 0 };
8d71740c 1371 struct netevent_redirect netevent;
317805b8 1372 struct net *net;
1da177e4 1373
1da177e4
LT
1374 if (!in_dev)
1375 return;
1376
c346dca1 1377 net = dev_net(dev);
9d4fb27d
JP
1378 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1379 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1380 ipv4_is_zeronet(new_gw))
1da177e4
LT
1381 goto reject_redirect;
1382
1080d709
NH
1383 if (!rt_caching(net))
1384 goto reject_redirect;
1385
1da177e4
LT
1386 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1387 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1388 goto reject_redirect;
1389 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1390 goto reject_redirect;
1391 } else {
317805b8 1392 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1393 goto reject_redirect;
1394 }
1395
1396 for (i = 0; i < 2; i++) {
1397 for (k = 0; k < 2; k++) {
b00180de 1398 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
e84f84f2 1399 rt_genid(net));
1da177e4 1400
1c31720a 1401 rthp = &rt_hash_table[hash].chain;
1da177e4 1402
1da177e4
LT
1403 while ((rth = rcu_dereference(*rthp)) != NULL) {
1404 struct rtable *rt;
1405
1406 if (rth->fl.fl4_dst != daddr ||
1407 rth->fl.fl4_src != skeys[i] ||
1da177e4 1408 rth->fl.oif != ikeys[k] ||
c7537967 1409 rt_is_input_route(rth) ||
e84f84f2 1410 rt_is_expired(rth) ||
d8d1f30b
CG
1411 !net_eq(dev_net(rth->dst.dev), net)) {
1412 rthp = &rth->dst.rt_next;
1da177e4
LT
1413 continue;
1414 }
1415
1416 if (rth->rt_dst != daddr ||
1417 rth->rt_src != saddr ||
d8d1f30b 1418 rth->dst.error ||
1da177e4 1419 rth->rt_gateway != old_gw ||
d8d1f30b 1420 rth->dst.dev != dev)
1da177e4
LT
1421 break;
1422
d8d1f30b 1423 dst_hold(&rth->dst);
1da177e4
LT
1424
1425 rt = dst_alloc(&ipv4_dst_ops);
1426 if (rt == NULL) {
1427 ip_rt_put(rth);
1da177e4
LT
1428 return;
1429 }
1430
1431 /* Copy all the information. */
1432 *rt = *rth;
d8d1f30b
CG
1433 rt->dst.__use = 1;
1434 atomic_set(&rt->dst.__refcnt, 1);
1435 rt->dst.child = NULL;
1436 if (rt->dst.dev)
1437 dev_hold(rt->dst.dev);
d8d1f30b
CG
1438 rt->dst.obsolete = -1;
1439 rt->dst.lastuse = jiffies;
1440 rt->dst.path = &rt->dst;
1441 rt->dst.neighbour = NULL;
1442 rt->dst.hh = NULL;
def8b4fa 1443#ifdef CONFIG_XFRM
d8d1f30b 1444 rt->dst.xfrm = NULL;
def8b4fa 1445#endif
e84f84f2 1446 rt->rt_genid = rt_genid(net);
1da177e4
LT
1447 rt->rt_flags |= RTCF_REDIRECTED;
1448
1449 /* Gateway is different ... */
1450 rt->rt_gateway = new_gw;
1451
1452 /* Redirect received -> path was valid */
d8d1f30b 1453 dst_confirm(&rth->dst);
1da177e4
LT
1454
1455 if (rt->peer)
1456 atomic_inc(&rt->peer->refcnt);
1457
d8d1f30b
CG
1458 if (arp_bind_neighbour(&rt->dst) ||
1459 !(rt->dst.neighbour->nud_state &
1da177e4 1460 NUD_VALID)) {
d8d1f30b
CG
1461 if (rt->dst.neighbour)
1462 neigh_event_send(rt->dst.neighbour, NULL);
1da177e4
LT
1463 ip_rt_put(rth);
1464 rt_drop(rt);
1465 goto do_next;
1466 }
e905a9ed 1467
d8d1f30b
CG
1468 netevent.old = &rth->dst;
1469 netevent.new = &rt->dst;
e905a9ed
YH
1470 call_netevent_notifiers(NETEVENT_REDIRECT,
1471 &netevent);
1da177e4
LT
1472
1473 rt_del(hash, rth);
6a2bad70 1474 if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
1da177e4
LT
1475 ip_rt_put(rt);
1476 goto do_next;
1477 }
1da177e4
LT
1478 do_next:
1479 ;
1480 }
1481 }
1da177e4
LT
1482 return;
1483
1484reject_redirect:
1485#ifdef CONFIG_IP_ROUTE_VERBOSE
1486 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1487 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1488 " Advised path = %pI4 -> %pI4\n",
1489 &old_gw, dev->name, &new_gw,
1490 &saddr, &daddr);
1da177e4 1491#endif
ed7865a4 1492 ;
1da177e4
LT
1493}
1494
1495static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1496{
ee6b9673 1497 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1498 struct dst_entry *ret = dst;
1499
1500 if (rt) {
d11a4dc1 1501 if (dst->obsolete > 0) {
1da177e4
LT
1502 ip_rt_put(rt);
1503 ret = NULL;
1504 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
d8d1f30b
CG
1505 (rt->dst.expires &&
1506 time_after_eq(jiffies, rt->dst.expires))) {
8c7bc840 1507 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
b00180de 1508 rt->fl.oif,
e84f84f2 1509 rt_genid(dev_net(dst->dev)));
1da177e4 1510#if RT_CACHE_DEBUG >= 1
673d57e7
HH
1511 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1512 &rt->rt_dst, rt->fl.fl4_tos);
1da177e4
LT
1513#endif
1514 rt_del(hash, rt);
1515 ret = NULL;
1516 }
1517 }
1518 return ret;
1519}
1520
1521/*
1522 * Algorithm:
1523 * 1. The first ip_rt_redirect_number redirects are sent
1524 * with exponential backoff, then we stop sending them at all,
1525 * assuming that the host ignores our redirects.
1526 * 2. If we did not see packets requiring redirects
1527 * during ip_rt_redirect_silence, we assume that the host
1528 * forgot redirected route and start to send redirects again.
1529 *
1530 * This algorithm is much cheaper and more intelligent than dumb load limiting
1531 * in icmp.c.
1532 *
1533 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1534 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1535 */
1536
1537void ip_rt_send_redirect(struct sk_buff *skb)
1538{
511c3f92 1539 struct rtable *rt = skb_rtable(skb);
30038fc6
ED
1540 struct in_device *in_dev;
1541 int log_martians;
1da177e4 1542
30038fc6 1543 rcu_read_lock();
d8d1f30b 1544 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1545 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1546 rcu_read_unlock();
1da177e4 1547 return;
30038fc6
ED
1548 }
1549 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1550 rcu_read_unlock();
1da177e4
LT
1551
1552 /* No redirected packets during ip_rt_redirect_silence;
1553 * reset the algorithm.
1554 */
d8d1f30b
CG
1555 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1556 rt->dst.rate_tokens = 0;
1da177e4
LT
1557
1558 /* Too many ignored redirects; do not send anything
d8d1f30b 1559 * set dst.rate_last to the last seen redirected packet.
1da177e4 1560 */
d8d1f30b
CG
1561 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1562 rt->dst.rate_last = jiffies;
30038fc6 1563 return;
1da177e4
LT
1564 }
1565
1566 /* Check for load limit; set rate_last to the latest sent
1567 * redirect.
1568 */
d8d1f30b 1569 if (rt->dst.rate_tokens == 0 ||
14fb8a76 1570 time_after(jiffies,
d8d1f30b
CG
1571 (rt->dst.rate_last +
1572 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1da177e4 1573 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
d8d1f30b
CG
1574 rt->dst.rate_last = jiffies;
1575 ++rt->dst.rate_tokens;
1da177e4 1576#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1577 if (log_martians &&
d8d1f30b 1578 rt->dst.rate_tokens == ip_rt_redirect_number &&
1da177e4 1579 net_ratelimit())
673d57e7
HH
1580 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1581 &rt->rt_src, rt->rt_iif,
1582 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1583#endif
1584 }
1da177e4
LT
1585}
1586
1587static int ip_error(struct sk_buff *skb)
1588{
511c3f92 1589 struct rtable *rt = skb_rtable(skb);
1da177e4
LT
1590 unsigned long now;
1591 int code;
1592
d8d1f30b 1593 switch (rt->dst.error) {
1da177e4
LT
1594 case EINVAL:
1595 default:
1596 goto out;
1597 case EHOSTUNREACH:
1598 code = ICMP_HOST_UNREACH;
1599 break;
1600 case ENETUNREACH:
1601 code = ICMP_NET_UNREACH;
d8d1f30b 1602 IP_INC_STATS_BH(dev_net(rt->dst.dev),
7c73a6fa 1603 IPSTATS_MIB_INNOROUTES);
1da177e4
LT
1604 break;
1605 case EACCES:
1606 code = ICMP_PKT_FILTERED;
1607 break;
1608 }
1609
1610 now = jiffies;
d8d1f30b
CG
1611 rt->dst.rate_tokens += now - rt->dst.rate_last;
1612 if (rt->dst.rate_tokens > ip_rt_error_burst)
1613 rt->dst.rate_tokens = ip_rt_error_burst;
1614 rt->dst.rate_last = now;
1615 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1616 rt->dst.rate_tokens -= ip_rt_error_cost;
1da177e4
LT
1617 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1618 }
1619
1620out: kfree_skb(skb);
1621 return 0;
e905a9ed 1622}
1da177e4
LT
1623
1624/*
1625 * The last two values are not from the RFC but
1626 * are needed for AMPRnet AX.25 paths.
1627 */
1628
9b5b5cff 1629static const unsigned short mtu_plateau[] =
1da177e4
LT
1630{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1631
5969f71d 1632static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1633{
1634 int i;
e905a9ed 1635
1da177e4
LT
1636 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1637 if (old_mtu > mtu_plateau[i])
1638 return mtu_plateau[i];
1639 return 68;
1640}
1641
b5921910 1642unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
0010e465
TT
1643 unsigned short new_mtu,
1644 struct net_device *dev)
1da177e4 1645{
0010e465 1646 int i, k;
1da177e4
LT
1647 unsigned short old_mtu = ntohs(iph->tot_len);
1648 struct rtable *rth;
0010e465 1649 int ikeys[2] = { dev->ifindex, 0 };
e448515c
AV
1650 __be32 skeys[2] = { iph->saddr, 0, };
1651 __be32 daddr = iph->daddr;
1da177e4
LT
1652 unsigned short est_mtu = 0;
1653
0010e465
TT
1654 for (k = 0; k < 2; k++) {
1655 for (i = 0; i < 2; i++) {
b00180de 1656 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
e84f84f2 1657 rt_genid(net));
0010e465
TT
1658
1659 rcu_read_lock();
1660 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 1661 rth = rcu_dereference(rth->dst.rt_next)) {
1da177e4
LT
1662 unsigned short mtu = new_mtu;
1663
0010e465
TT
1664 if (rth->fl.fl4_dst != daddr ||
1665 rth->fl.fl4_src != skeys[i] ||
1666 rth->rt_dst != daddr ||
1667 rth->rt_src != iph->saddr ||
1668 rth->fl.oif != ikeys[k] ||
c7537967 1669 rt_is_input_route(rth) ||
d8d1f30b
CG
1670 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1671 !net_eq(dev_net(rth->dst.dev), net) ||
6c3b8fc6 1672 rt_is_expired(rth))
0010e465
TT
1673 continue;
1674
1da177e4
LT
1675 if (new_mtu < 68 || new_mtu >= old_mtu) {
1676
1677 /* BSD 4.2 compatibility hack :-( */
1678 if (mtu == 0 &&
d8d1f30b 1679 old_mtu >= dst_mtu(&rth->dst) &&
1da177e4
LT
1680 old_mtu >= 68 + (iph->ihl << 2))
1681 old_mtu -= iph->ihl << 2;
1682
1683 mtu = guess_mtu(old_mtu);
1684 }
d8d1f30b
CG
1685 if (mtu <= dst_mtu(&rth->dst)) {
1686 if (mtu < dst_mtu(&rth->dst)) {
1687 dst_confirm(&rth->dst);
1da177e4 1688 if (mtu < ip_rt_min_pmtu) {
defb3519
DM
1689 u32 lock = dst_metric(&rth->dst,
1690 RTAX_LOCK);
1da177e4 1691 mtu = ip_rt_min_pmtu;
defb3519
DM
1692 lock |= (1 << RTAX_MTU);
1693 dst_metric_set(&rth->dst, RTAX_LOCK,
1694 lock);
1da177e4 1695 }
defb3519 1696 dst_metric_set(&rth->dst, RTAX_MTU, mtu);
d8d1f30b 1697 dst_set_expires(&rth->dst,
1da177e4
LT
1698 ip_rt_mtu_expires);
1699 }
1700 est_mtu = mtu;
1701 }
1702 }
0010e465 1703 rcu_read_unlock();
1da177e4 1704 }
1da177e4
LT
1705 }
1706 return est_mtu ? : new_mtu;
1707}
1708
1709static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1710{
6d273f8d 1711 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1da177e4
LT
1712 !(dst_metric_locked(dst, RTAX_MTU))) {
1713 if (mtu < ip_rt_min_pmtu) {
defb3519 1714 u32 lock = dst_metric(dst, RTAX_LOCK);
1da177e4 1715 mtu = ip_rt_min_pmtu;
defb3519 1716 dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
1da177e4 1717 }
defb3519 1718 dst_metric_set(dst, RTAX_MTU, mtu);
1da177e4 1719 dst_set_expires(dst, ip_rt_mtu_expires);
8d71740c 1720 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1da177e4
LT
1721 }
1722}
1723
1724static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1725{
d11a4dc1
TT
1726 if (rt_is_expired((struct rtable *)dst))
1727 return NULL;
1728 return dst;
1da177e4
LT
1729}
1730
1731static void ipv4_dst_destroy(struct dst_entry *dst)
1732{
1733 struct rtable *rt = (struct rtable *) dst;
1734 struct inet_peer *peer = rt->peer;
1da177e4
LT
1735
1736 if (peer) {
1737 rt->peer = NULL;
1738 inet_putpeer(peer);
1739 }
1da177e4
LT
1740}
1741
1da177e4
LT
1742
1743static void ipv4_link_failure(struct sk_buff *skb)
1744{
1745 struct rtable *rt;
1746
1747 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1748
511c3f92 1749 rt = skb_rtable(skb);
1da177e4 1750 if (rt)
d8d1f30b 1751 dst_set_expires(&rt->dst, 0);
1da177e4
LT
1752}
1753
1754static int ip_rt_bug(struct sk_buff *skb)
1755{
673d57e7
HH
1756 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1757 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1758 skb->dev ? skb->dev->name : "?");
1759 kfree_skb(skb);
1760 return 0;
1761}
1762
1763/*
1764 We do not cache source address of outgoing interface,
1765 because it is used only by IP RR, TS and SRR options,
1766 so that it out of fast path.
1767
1768 BTW remember: "addr" is allowed to be not aligned
1769 in IP options!
1770 */
1771
1772void ip_rt_get_source(u8 *addr, struct rtable *rt)
1773{
a61ced5d 1774 __be32 src;
1da177e4
LT
1775 struct fib_result res;
1776
c7537967 1777 if (rt_is_output_route(rt))
1da177e4 1778 src = rt->rt_src;
ebc0ffae
ED
1779 else {
1780 rcu_read_lock();
1781 if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
1782 src = FIB_RES_PREFSRC(res);
1783 else
1784 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1785 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1786 rcu_read_unlock();
1787 }
1da177e4
LT
1788 memcpy(addr, &src, 4);
1789}
1790
1791#ifdef CONFIG_NET_CLS_ROUTE
1792static void set_class_tag(struct rtable *rt, u32 tag)
1793{
d8d1f30b
CG
1794 if (!(rt->dst.tclassid & 0xFFFF))
1795 rt->dst.tclassid |= tag & 0xFFFF;
1796 if (!(rt->dst.tclassid & 0xFFFF0000))
1797 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1798}
1799#endif
1800
1801static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1802{
defb3519 1803 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1804 struct fib_info *fi = res->fi;
1805
1806 if (fi) {
1807 if (FIB_RES_GW(*res) &&
1808 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1809 rt->rt_gateway = FIB_RES_GW(*res);
defb3519 1810 dst_import_metrics(dst, fi->fib_metrics);
1da177e4 1811 if (fi->fib_mtu == 0) {
defb3519
DM
1812 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1813 if (dst_metric_locked(dst, RTAX_MTU) &&
1da177e4 1814 rt->rt_gateway != rt->rt_dst &&
defb3519
DM
1815 dst->dev->mtu > 576)
1816 dst_metric_set(dst, RTAX_MTU, 576);
1da177e4
LT
1817 }
1818#ifdef CONFIG_NET_CLS_ROUTE
defb3519 1819 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4
LT
1820#endif
1821 } else
defb3519
DM
1822 dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
1823
defb3519
DM
1824 if (dst_mtu(dst) > IP_MAX_MTU)
1825 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
1826 if (dst_metric(dst, RTAX_ADVMSS) == 0)
1827 dst_metric_set(dst, RTAX_ADVMSS,
1828 max_t(unsigned int, dst->dev->mtu - 40,
1829 ip_rt_min_advmss));
1830 if (dst_metric(dst, RTAX_ADVMSS) > 65535 - 40)
1831 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4
LT
1832
1833#ifdef CONFIG_NET_CLS_ROUTE
1834#ifdef CONFIG_IP_MULTIPLE_TABLES
1835 set_class_tag(rt, fib_rules_tclass(res));
1836#endif
1837 set_class_tag(rt, itag);
1838#endif
e905a9ed 1839 rt->rt_type = res->type;
1da177e4
LT
1840}
1841
96d36220 1842/* called in rcu_read_lock() section */
9e12bb22 1843static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1844 u8 tos, struct net_device *dev, int our)
1845{
96d36220 1846 unsigned int hash;
1da177e4 1847 struct rtable *rth;
a61ced5d 1848 __be32 spec_dst;
96d36220 1849 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1850 u32 itag = 0;
b5f7e755 1851 int err;
1da177e4
LT
1852
1853 /* Primary sanity checks. */
1854
1855 if (in_dev == NULL)
1856 return -EINVAL;
1857
1e637c74 1858 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1859 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1860 goto e_inval;
1861
f97c1e0c
JP
1862 if (ipv4_is_zeronet(saddr)) {
1863 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1864 goto e_inval;
1865 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755
ED
1866 } else {
1867 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1868 &itag, 0);
1869 if (err < 0)
1870 goto e_err;
1871 }
1da177e4
LT
1872 rth = dst_alloc(&ipv4_dst_ops);
1873 if (!rth)
1874 goto e_nobufs;
1875
d8d1f30b
CG
1876 rth->dst.output = ip_rt_bug;
1877 rth->dst.obsolete = -1;
1da177e4 1878
d8d1f30b
CG
1879 atomic_set(&rth->dst.__refcnt, 1);
1880 rth->dst.flags= DST_HOST;
42f811b8 1881 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
d8d1f30b 1882 rth->dst.flags |= DST_NOPOLICY;
1da177e4
LT
1883 rth->fl.fl4_dst = daddr;
1884 rth->rt_dst = daddr;
1885 rth->fl.fl4_tos = tos;
47dcf0cb 1886 rth->fl.mark = skb->mark;
1da177e4
LT
1887 rth->fl.fl4_src = saddr;
1888 rth->rt_src = saddr;
1889#ifdef CONFIG_NET_CLS_ROUTE
d8d1f30b 1890 rth->dst.tclassid = itag;
1da177e4
LT
1891#endif
1892 rth->rt_iif =
1893 rth->fl.iif = dev->ifindex;
d8d1f30b
CG
1894 rth->dst.dev = init_net.loopback_dev;
1895 dev_hold(rth->dst.dev);
1da177e4
LT
1896 rth->fl.oif = 0;
1897 rth->rt_gateway = daddr;
1898 rth->rt_spec_dst= spec_dst;
e84f84f2 1899 rth->rt_genid = rt_genid(dev_net(dev));
1da177e4 1900 rth->rt_flags = RTCF_MULTICAST;
29e75252 1901 rth->rt_type = RTN_MULTICAST;
1da177e4 1902 if (our) {
d8d1f30b 1903 rth->dst.input= ip_local_deliver;
1da177e4
LT
1904 rth->rt_flags |= RTCF_LOCAL;
1905 }
1906
1907#ifdef CONFIG_IP_MROUTE
f97c1e0c 1908 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1909 rth->dst.input = ip_mr_input;
1da177e4
LT
1910#endif
1911 RT_CACHE_STAT_INC(in_slow_mc);
1912
e84f84f2 1913 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
6a2bad70 1914 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1da177e4
LT
1915
1916e_nobufs:
1da177e4 1917 return -ENOBUFS;
1da177e4 1918e_inval:
96d36220 1919 return -EINVAL;
b5f7e755 1920e_err:
b5f7e755 1921 return err;
1da177e4
LT
1922}
1923
1924
1925static void ip_handle_martian_source(struct net_device *dev,
1926 struct in_device *in_dev,
1927 struct sk_buff *skb,
9e12bb22
AV
1928 __be32 daddr,
1929 __be32 saddr)
1da177e4
LT
1930{
1931 RT_CACHE_STAT_INC(in_martian_src);
1932#ifdef CONFIG_IP_ROUTE_VERBOSE
1933 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1934 /*
1935 * RFC1812 recommendation, if source is martian,
1936 * the only hint is MAC header.
1937 */
673d57e7
HH
1938 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1939 &daddr, &saddr, dev->name);
98e399f8 1940 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1941 int i;
98e399f8 1942 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1943 printk(KERN_WARNING "ll header: ");
1944 for (i = 0; i < dev->hard_header_len; i++, p++) {
1945 printk("%02x", *p);
1946 if (i < (dev->hard_header_len - 1))
1947 printk(":");
1948 }
1949 printk("\n");
1950 }
1951 }
1952#endif
1953}
1954
47360228 1955/* called in rcu_read_lock() section */
5969f71d
SH
1956static int __mkroute_input(struct sk_buff *skb,
1957 struct fib_result *res,
1958 struct in_device *in_dev,
1959 __be32 daddr, __be32 saddr, u32 tos,
1960 struct rtable **result)
1da177e4 1961{
1da177e4
LT
1962 struct rtable *rth;
1963 int err;
1964 struct in_device *out_dev;
47360228 1965 unsigned int flags = 0;
d9c9df8c
AV
1966 __be32 spec_dst;
1967 u32 itag;
1da177e4
LT
1968
1969 /* get a working reference to the output device */
47360228 1970 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1971 if (out_dev == NULL) {
1972 if (net_ratelimit())
1973 printk(KERN_CRIT "Bug in ip_route_input" \
1974 "_slow(). Please, report\n");
1975 return -EINVAL;
1976 }
1977
1978
e905a9ed 1979 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
b0c110ca 1980 in_dev->dev, &spec_dst, &itag, skb->mark);
1da177e4 1981 if (err < 0) {
e905a9ed 1982 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1983 saddr);
e905a9ed 1984
1da177e4
LT
1985 goto cleanup;
1986 }
1987
1988 if (err)
1989 flags |= RTCF_DIRECTSRC;
1990
51b77cae 1991 if (out_dev == in_dev && err &&
1da177e4
LT
1992 (IN_DEV_SHARED_MEDIA(out_dev) ||
1993 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1994 flags |= RTCF_DOREDIRECT;
1995
1996 if (skb->protocol != htons(ETH_P_IP)) {
1997 /* Not IP (i.e. ARP). Do not create route, if it is
1998 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
1999 *
2000 * Proxy arp feature have been extended to allow, ARP
2001 * replies back to the same interface, to support
2002 * Private VLAN switch technologies. See arp.c.
1da177e4 2003 */
65324144
JDB
2004 if (out_dev == in_dev &&
2005 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2006 err = -EINVAL;
2007 goto cleanup;
2008 }
2009 }
2010
2011
2012 rth = dst_alloc(&ipv4_dst_ops);
2013 if (!rth) {
2014 err = -ENOBUFS;
2015 goto cleanup;
2016 }
2017
d8d1f30b
CG
2018 atomic_set(&rth->dst.__refcnt, 1);
2019 rth->dst.flags= DST_HOST;
42f811b8 2020 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
d8d1f30b 2021 rth->dst.flags |= DST_NOPOLICY;
42f811b8 2022 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
d8d1f30b 2023 rth->dst.flags |= DST_NOXFRM;
1da177e4
LT
2024 rth->fl.fl4_dst = daddr;
2025 rth->rt_dst = daddr;
2026 rth->fl.fl4_tos = tos;
47dcf0cb 2027 rth->fl.mark = skb->mark;
1da177e4
LT
2028 rth->fl.fl4_src = saddr;
2029 rth->rt_src = saddr;
2030 rth->rt_gateway = daddr;
2031 rth->rt_iif =
2032 rth->fl.iif = in_dev->dev->ifindex;
d8d1f30b
CG
2033 rth->dst.dev = (out_dev)->dev;
2034 dev_hold(rth->dst.dev);
1da177e4
LT
2035 rth->fl.oif = 0;
2036 rth->rt_spec_dst= spec_dst;
2037
d8d1f30b
CG
2038 rth->dst.obsolete = -1;
2039 rth->dst.input = ip_forward;
2040 rth->dst.output = ip_output;
2041 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1da177e4
LT
2042
2043 rt_set_nexthop(rth, res, itag);
2044
2045 rth->rt_flags = flags;
2046
2047 *result = rth;
2048 err = 0;
2049 cleanup:
1da177e4 2050 return err;
e905a9ed 2051}
1da177e4 2052
5969f71d
SH
2053static int ip_mkroute_input(struct sk_buff *skb,
2054 struct fib_result *res,
2055 const struct flowi *fl,
2056 struct in_device *in_dev,
2057 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2058{
7abaa27c 2059 struct rtable* rth = NULL;
1da177e4
LT
2060 int err;
2061 unsigned hash;
2062
2063#ifdef CONFIG_IP_ROUTE_MULTIPATH
2064 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2065 fib_select_multipath(fl, res);
2066#endif
2067
2068 /* create a routing cache entry */
2069 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2070 if (err)
2071 return err;
1da177e4
LT
2072
2073 /* put it into the cache */
e84f84f2 2074 hash = rt_hash(daddr, saddr, fl->iif,
d8d1f30b 2075 rt_genid(dev_net(rth->dst.dev)));
6a2bad70 2076 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
1da177e4
LT
2077}
2078
1da177e4
LT
2079/*
2080 * NOTE. We drop all the packets that has local source
2081 * addresses, because every properly looped back packet
2082 * must have correct destination already attached by output routine.
2083 *
2084 * Such approach solves two big problems:
2085 * 1. Not simplex devices are handled properly.
2086 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2087 * called with rcu_read_lock()
1da177e4
LT
2088 */
2089
9e12bb22 2090static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2091 u8 tos, struct net_device *dev)
2092{
2093 struct fib_result res;
96d36220 2094 struct in_device *in_dev = __in_dev_get_rcu(dev);
5811662b
CG
2095 struct flowi fl = { .fl4_dst = daddr,
2096 .fl4_src = saddr,
2097 .fl4_tos = tos,
2098 .fl4_scope = RT_SCOPE_UNIVERSE,
47dcf0cb 2099 .mark = skb->mark,
1da177e4
LT
2100 .iif = dev->ifindex };
2101 unsigned flags = 0;
2102 u32 itag = 0;
2103 struct rtable * rth;
2104 unsigned hash;
9e12bb22 2105 __be32 spec_dst;
1da177e4 2106 int err = -EINVAL;
c346dca1 2107 struct net * net = dev_net(dev);
1da177e4
LT
2108
2109 /* IP on this device is disabled. */
2110
2111 if (!in_dev)
2112 goto out;
2113
2114 /* Check for the most weird martians, which can be not detected
2115 by fib_lookup.
2116 */
2117
1e637c74 2118 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2119 ipv4_is_loopback(saddr))
1da177e4
LT
2120 goto martian_source;
2121
27a954bd 2122 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2123 goto brd_input;
2124
2125 /* Accept zero addresses only to limited broadcast;
2126 * I even do not know to fix it or not. Waiting for complains :-)
2127 */
f97c1e0c 2128 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2129 goto martian_source;
2130
27a954bd 2131 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2132 goto martian_destination;
2133
2134 /*
2135 * Now we are ready to route packet.
2136 */
ebc0ffae
ED
2137 err = fib_lookup(net, &fl, &res);
2138 if (err != 0) {
1da177e4 2139 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2140 goto e_hostunreach;
1da177e4
LT
2141 goto no_route;
2142 }
1da177e4
LT
2143
2144 RT_CACHE_STAT_INC(in_slow_tot);
2145
2146 if (res.type == RTN_BROADCAST)
2147 goto brd_input;
2148
2149 if (res.type == RTN_LOCAL) {
b5f7e755 2150 err = fib_validate_source(saddr, daddr, tos,
ebc0ffae
ED
2151 net->loopback_dev->ifindex,
2152 dev, &spec_dst, &itag, skb->mark);
b5f7e755
ED
2153 if (err < 0)
2154 goto martian_source_keep_err;
2155 if (err)
1da177e4
LT
2156 flags |= RTCF_DIRECTSRC;
2157 spec_dst = daddr;
2158 goto local_input;
2159 }
2160
2161 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2162 goto e_hostunreach;
1da177e4
LT
2163 if (res.type != RTN_UNICAST)
2164 goto martian_destination;
2165
2166 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
1da177e4
LT
2167out: return err;
2168
2169brd_input:
2170 if (skb->protocol != htons(ETH_P_IP))
2171 goto e_inval;
2172
f97c1e0c 2173 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2174 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2175 else {
2176 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
b0c110ca 2177 &itag, skb->mark);
1da177e4 2178 if (err < 0)
b5f7e755 2179 goto martian_source_keep_err;
1da177e4
LT
2180 if (err)
2181 flags |= RTCF_DIRECTSRC;
2182 }
2183 flags |= RTCF_BROADCAST;
2184 res.type = RTN_BROADCAST;
2185 RT_CACHE_STAT_INC(in_brd);
2186
2187local_input:
2188 rth = dst_alloc(&ipv4_dst_ops);
2189 if (!rth)
2190 goto e_nobufs;
2191
d8d1f30b
CG
2192 rth->dst.output= ip_rt_bug;
2193 rth->dst.obsolete = -1;
e84f84f2 2194 rth->rt_genid = rt_genid(net);
1da177e4 2195
d8d1f30b
CG
2196 atomic_set(&rth->dst.__refcnt, 1);
2197 rth->dst.flags= DST_HOST;
42f811b8 2198 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
d8d1f30b 2199 rth->dst.flags |= DST_NOPOLICY;
1da177e4
LT
2200 rth->fl.fl4_dst = daddr;
2201 rth->rt_dst = daddr;
2202 rth->fl.fl4_tos = tos;
47dcf0cb 2203 rth->fl.mark = skb->mark;
1da177e4
LT
2204 rth->fl.fl4_src = saddr;
2205 rth->rt_src = saddr;
2206#ifdef CONFIG_NET_CLS_ROUTE
d8d1f30b 2207 rth->dst.tclassid = itag;
1da177e4
LT
2208#endif
2209 rth->rt_iif =
2210 rth->fl.iif = dev->ifindex;
d8d1f30b
CG
2211 rth->dst.dev = net->loopback_dev;
2212 dev_hold(rth->dst.dev);
1da177e4
LT
2213 rth->rt_gateway = daddr;
2214 rth->rt_spec_dst= spec_dst;
d8d1f30b 2215 rth->dst.input= ip_local_deliver;
1da177e4
LT
2216 rth->rt_flags = flags|RTCF_LOCAL;
2217 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2218 rth->dst.input= ip_error;
2219 rth->dst.error= -err;
1da177e4
LT
2220 rth->rt_flags &= ~RTCF_LOCAL;
2221 }
2222 rth->rt_type = res.type;
e84f84f2 2223 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
6a2bad70 2224 err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
ebc0ffae 2225 goto out;
1da177e4
LT
2226
2227no_route:
2228 RT_CACHE_STAT_INC(in_no_route);
2229 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2230 res.type = RTN_UNREACHABLE;
7f53878d
MC
2231 if (err == -ESRCH)
2232 err = -ENETUNREACH;
1da177e4
LT
2233 goto local_input;
2234
2235 /*
2236 * Do not cache martian addresses: they should be logged (RFC1812)
2237 */
2238martian_destination:
2239 RT_CACHE_STAT_INC(in_martian_dst);
2240#ifdef CONFIG_IP_ROUTE_VERBOSE
2241 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2242 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2243 &daddr, &saddr, dev->name);
1da177e4 2244#endif
2c2910a4
DE
2245
2246e_hostunreach:
e905a9ed 2247 err = -EHOSTUNREACH;
ebc0ffae 2248 goto out;
2c2910a4 2249
1da177e4
LT
2250e_inval:
2251 err = -EINVAL;
ebc0ffae 2252 goto out;
1da177e4
LT
2253
2254e_nobufs:
2255 err = -ENOBUFS;
ebc0ffae 2256 goto out;
1da177e4
LT
2257
2258martian_source:
b5f7e755
ED
2259 err = -EINVAL;
2260martian_source_keep_err:
1da177e4 2261 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2262 goto out;
1da177e4
LT
2263}
2264
407eadd9
ED
2265int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2266 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2267{
2268 struct rtable * rth;
2269 unsigned hash;
2270 int iif = dev->ifindex;
b5921910 2271 struct net *net;
96d36220 2272 int res;
1da177e4 2273
c346dca1 2274 net = dev_net(dev);
1080d709 2275
96d36220
ED
2276 rcu_read_lock();
2277
1080d709
NH
2278 if (!rt_caching(net))
2279 goto skip_cache;
2280
1da177e4 2281 tos &= IPTOS_RT_MASK;
e84f84f2 2282 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2283
1da177e4 2284 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2285 rth = rcu_dereference(rth->dst.rt_next)) {
0eae88f3
ED
2286 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2287 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
c0b8c32b
SH
2288 (rth->fl.iif ^ iif) |
2289 rth->fl.oif |
2290 (rth->fl.fl4_tos ^ tos)) == 0 &&
47dcf0cb 2291 rth->fl.mark == skb->mark &&
d8d1f30b 2292 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2293 !rt_is_expired(rth)) {
407eadd9 2294 if (noref) {
d8d1f30b
CG
2295 dst_use_noref(&rth->dst, jiffies);
2296 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2297 } else {
d8d1f30b
CG
2298 dst_use(&rth->dst, jiffies);
2299 skb_dst_set(skb, &rth->dst);
407eadd9 2300 }
1da177e4
LT
2301 RT_CACHE_STAT_INC(in_hit);
2302 rcu_read_unlock();
1da177e4
LT
2303 return 0;
2304 }
2305 RT_CACHE_STAT_INC(in_hlist_search);
2306 }
1da177e4 2307
1080d709 2308skip_cache:
1da177e4
LT
2309 /* Multicast recognition logic is moved from route cache to here.
2310 The problem was that too many Ethernet cards have broken/missing
2311 hardware multicast filters :-( As result the host on multicasting
2312 network acquires a lot of useless route cache entries, sort of
2313 SDR messages from all the world. Now we try to get rid of them.
2314 Really, provided software IP multicast filter is organized
2315 reasonably (at least, hashed), it does not result in a slowdown
2316 comparing with route cache reject entries.
2317 Note, that multicast routers are not affected, because
2318 route cache entry is created eventually.
2319 */
f97c1e0c 2320 if (ipv4_is_multicast(daddr)) {
96d36220 2321 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2322
96d36220 2323 if (in_dev) {
1da177e4 2324 int our = ip_check_mc(in_dev, daddr, saddr,
96d36220 2325 ip_hdr(skb)->protocol);
1da177e4
LT
2326 if (our
2327#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2328 ||
2329 (!ipv4_is_local_multicast(daddr) &&
2330 IN_DEV_MFORWARD(in_dev))
1da177e4 2331#endif
9d4fb27d 2332 ) {
96d36220
ED
2333 int res = ip_route_input_mc(skb, daddr, saddr,
2334 tos, dev, our);
1da177e4 2335 rcu_read_unlock();
96d36220 2336 return res;
1da177e4
LT
2337 }
2338 }
2339 rcu_read_unlock();
2340 return -EINVAL;
2341 }
96d36220
ED
2342 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2343 rcu_read_unlock();
2344 return res;
1da177e4 2345}
407eadd9 2346EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2347
ebc0ffae 2348/* called with rcu_read_lock() */
5969f71d
SH
2349static int __mkroute_output(struct rtable **result,
2350 struct fib_result *res,
2351 const struct flowi *fl,
2352 const struct flowi *oldflp,
2353 struct net_device *dev_out,
2354 unsigned flags)
1da177e4
LT
2355{
2356 struct rtable *rth;
2357 struct in_device *in_dev;
2358 u32 tos = RT_FL_TOS(oldflp);
1da177e4 2359
dd28d1a0 2360 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
1da177e4
LT
2361 return -EINVAL;
2362
27a954bd 2363 if (ipv4_is_lbcast(fl->fl4_dst))
1da177e4 2364 res->type = RTN_BROADCAST;
f97c1e0c 2365 else if (ipv4_is_multicast(fl->fl4_dst))
1da177e4 2366 res->type = RTN_MULTICAST;
27a954bd 2367 else if (ipv4_is_zeronet(fl->fl4_dst))
1da177e4
LT
2368 return -EINVAL;
2369
2370 if (dev_out->flags & IFF_LOOPBACK)
2371 flags |= RTCF_LOCAL;
2372
dd28d1a0 2373 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2374 if (!in_dev)
1da177e4 2375 return -EINVAL;
ebc0ffae 2376
1da177e4
LT
2377 if (res->type == RTN_BROADCAST) {
2378 flags |= RTCF_BROADCAST | RTCF_LOCAL;
ebc0ffae 2379 res->fi = NULL;
1da177e4 2380 } else if (res->type == RTN_MULTICAST) {
dd28d1a0 2381 flags |= RTCF_MULTICAST | RTCF_LOCAL;
e905a9ed 2382 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
1da177e4
LT
2383 oldflp->proto))
2384 flags &= ~RTCF_LOCAL;
2385 /* If multicast route do not exist use
dd28d1a0
ED
2386 * default one, but do not gateway in this case.
2387 * Yes, it is hack.
1da177e4 2388 */
ebc0ffae 2389 if (res->fi && res->prefixlen < 4)
1da177e4 2390 res->fi = NULL;
1da177e4
LT
2391 }
2392
2393
2394 rth = dst_alloc(&ipv4_dst_ops);
8391d07b 2395 if (!rth)
dd28d1a0 2396 return -ENOBUFS;
8391d07b 2397
d8d1f30b
CG
2398 atomic_set(&rth->dst.__refcnt, 1);
2399 rth->dst.flags= DST_HOST;
42f811b8 2400 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
d8d1f30b 2401 rth->dst.flags |= DST_NOXFRM;
42f811b8 2402 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
d8d1f30b 2403 rth->dst.flags |= DST_NOPOLICY;
1da177e4
LT
2404
2405 rth->fl.fl4_dst = oldflp->fl4_dst;
2406 rth->fl.fl4_tos = tos;
2407 rth->fl.fl4_src = oldflp->fl4_src;
2408 rth->fl.oif = oldflp->oif;
47dcf0cb 2409 rth->fl.mark = oldflp->mark;
1da177e4
LT
2410 rth->rt_dst = fl->fl4_dst;
2411 rth->rt_src = fl->fl4_src;
2412 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
e905a9ed 2413 /* get references to the devices that are to be hold by the routing
1da177e4 2414 cache entry */
d8d1f30b 2415 rth->dst.dev = dev_out;
1da177e4 2416 dev_hold(dev_out);
1da177e4
LT
2417 rth->rt_gateway = fl->fl4_dst;
2418 rth->rt_spec_dst= fl->fl4_src;
2419
d8d1f30b
CG
2420 rth->dst.output=ip_output;
2421 rth->dst.obsolete = -1;
e84f84f2 2422 rth->rt_genid = rt_genid(dev_net(dev_out));
1da177e4
LT
2423
2424 RT_CACHE_STAT_INC(out_slow_tot);
2425
2426 if (flags & RTCF_LOCAL) {
d8d1f30b 2427 rth->dst.input = ip_local_deliver;
1da177e4
LT
2428 rth->rt_spec_dst = fl->fl4_dst;
2429 }
2430 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2431 rth->rt_spec_dst = fl->fl4_src;
e905a9ed 2432 if (flags & RTCF_LOCAL &&
1da177e4 2433 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2434 rth->dst.output = ip_mc_output;
1da177e4
LT
2435 RT_CACHE_STAT_INC(out_slow_mc);
2436 }
2437#ifdef CONFIG_IP_MROUTE
2438 if (res->type == RTN_MULTICAST) {
2439 if (IN_DEV_MFORWARD(in_dev) &&
f97c1e0c 2440 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
d8d1f30b
CG
2441 rth->dst.input = ip_mr_input;
2442 rth->dst.output = ip_mc_output;
1da177e4
LT
2443 }
2444 }
2445#endif
2446 }
2447
2448 rt_set_nexthop(rth, res, 0);
2449
2450 rth->rt_flags = flags;
1da177e4 2451 *result = rth;
dd28d1a0 2452 return 0;
1da177e4
LT
2453}
2454
ebc0ffae 2455/* called with rcu_read_lock() */
5969f71d
SH
2456static int ip_mkroute_output(struct rtable **rp,
2457 struct fib_result *res,
2458 const struct flowi *fl,
2459 const struct flowi *oldflp,
2460 struct net_device *dev_out,
2461 unsigned flags)
1da177e4 2462{
7abaa27c 2463 struct rtable *rth = NULL;
1da177e4
LT
2464 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2465 unsigned hash;
2466 if (err == 0) {
b00180de 2467 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
e84f84f2 2468 rt_genid(dev_net(dev_out)));
6a2bad70 2469 err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
1da177e4 2470 }
e905a9ed 2471
1da177e4
LT
2472 return err;
2473}
2474
1da177e4
LT
2475/*
2476 * Major route resolver routine.
0197aa38 2477 * called with rcu_read_lock();
1da177e4
LT
2478 */
2479
b40afd0e
DL
2480static int ip_route_output_slow(struct net *net, struct rtable **rp,
2481 const struct flowi *oldflp)
1da177e4
LT
2482{
2483 u32 tos = RT_FL_TOS(oldflp);
5811662b
CG
2484 struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
2485 .fl4_src = oldflp->fl4_src,
2486 .fl4_tos = tos & IPTOS_RT_MASK,
2487 .fl4_scope = ((tos & RTO_ONLINK) ?
2488 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
47dcf0cb 2489 .mark = oldflp->mark,
b40afd0e 2490 .iif = net->loopback_dev->ifindex,
1da177e4
LT
2491 .oif = oldflp->oif };
2492 struct fib_result res;
0197aa38 2493 unsigned int flags = 0;
1da177e4 2494 struct net_device *dev_out = NULL;
1da177e4
LT
2495 int err;
2496
2497
2498 res.fi = NULL;
2499#ifdef CONFIG_IP_MULTIPLE_TABLES
2500 res.r = NULL;
2501#endif
2502
2503 if (oldflp->fl4_src) {
2504 err = -EINVAL;
f97c1e0c 2505 if (ipv4_is_multicast(oldflp->fl4_src) ||
1e637c74 2506 ipv4_is_lbcast(oldflp->fl4_src) ||
f97c1e0c 2507 ipv4_is_zeronet(oldflp->fl4_src))
1da177e4
LT
2508 goto out;
2509
1da177e4
LT
2510 /* I removed check for oif == dev_out->oif here.
2511 It was wrong for two reasons:
1ab35276
DL
2512 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2513 is assigned to multiple interfaces.
1da177e4
LT
2514 2. Moreover, we are allowed to send packets with saddr
2515 of another iface. --ANK
2516 */
2517
9d4fb27d
JP
2518 if (oldflp->oif == 0 &&
2519 (ipv4_is_multicast(oldflp->fl4_dst) ||
27a954bd 2520 ipv4_is_lbcast(oldflp->fl4_dst))) {
a210d01a 2521 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
0197aa38 2522 dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
a210d01a
JA
2523 if (dev_out == NULL)
2524 goto out;
2525
1da177e4
LT
2526 /* Special hack: user can direct multicasts
2527 and limited broadcast via necessary interface
2528 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2529 This hack is not just for fun, it allows
2530 vic,vat and friends to work.
2531 They bind socket to loopback, set ttl to zero
2532 and expect that it will work.
2533 From the viewpoint of routing cache they are broken,
2534 because we are not allowed to build multicast path
2535 with loopback source addr (look, routing cache
2536 cannot know, that ttl is zero, so that packet
2537 will not leave this host and route is valid).
2538 Luckily, this hack is good workaround.
2539 */
2540
2541 fl.oif = dev_out->ifindex;
2542 goto make_route;
2543 }
a210d01a
JA
2544
2545 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2546 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
0197aa38 2547 if (!__ip_dev_find(net, oldflp->fl4_src, false))
a210d01a 2548 goto out;
a210d01a 2549 }
1da177e4
LT
2550 }
2551
2552
2553 if (oldflp->oif) {
0197aa38 2554 dev_out = dev_get_by_index_rcu(net, oldflp->oif);
1da177e4
LT
2555 err = -ENODEV;
2556 if (dev_out == NULL)
2557 goto out;
e5ed6399
HX
2558
2559 /* RACE: Check return value of inet_select_addr instead. */
0197aa38 2560 if (rcu_dereference(dev_out->ip_ptr) == NULL)
1da177e4 2561 goto out; /* Wrong error code */
1da177e4 2562
f97c1e0c 2563 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
27a954bd 2564 ipv4_is_lbcast(oldflp->fl4_dst)) {
1da177e4
LT
2565 if (!fl.fl4_src)
2566 fl.fl4_src = inet_select_addr(dev_out, 0,
2567 RT_SCOPE_LINK);
2568 goto make_route;
2569 }
2570 if (!fl.fl4_src) {
f97c1e0c 2571 if (ipv4_is_multicast(oldflp->fl4_dst))
1da177e4
LT
2572 fl.fl4_src = inet_select_addr(dev_out, 0,
2573 fl.fl4_scope);
2574 else if (!oldflp->fl4_dst)
2575 fl.fl4_src = inet_select_addr(dev_out, 0,
2576 RT_SCOPE_HOST);
2577 }
2578 }
2579
2580 if (!fl.fl4_dst) {
2581 fl.fl4_dst = fl.fl4_src;
2582 if (!fl.fl4_dst)
2583 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
b40afd0e 2584 dev_out = net->loopback_dev;
b40afd0e 2585 fl.oif = net->loopback_dev->ifindex;
1da177e4
LT
2586 res.type = RTN_LOCAL;
2587 flags |= RTCF_LOCAL;
2588 goto make_route;
2589 }
2590
b40afd0e 2591 if (fib_lookup(net, &fl, &res)) {
1da177e4
LT
2592 res.fi = NULL;
2593 if (oldflp->oif) {
2594 /* Apparently, routing tables are wrong. Assume,
2595 that the destination is on link.
2596
2597 WHY? DW.
2598 Because we are allowed to send to iface
2599 even if it has NO routes and NO assigned
2600 addresses. When oif is specified, routing
2601 tables are looked up with only one purpose:
2602 to catch if destination is gatewayed, rather than
2603 direct. Moreover, if MSG_DONTROUTE is set,
2604 we send packet, ignoring both routing tables
2605 and ifaddr state. --ANK
2606
2607
2608 We could make it even if oif is unknown,
2609 likely IPv6, but we do not.
2610 */
2611
2612 if (fl.fl4_src == 0)
2613 fl.fl4_src = inet_select_addr(dev_out, 0,
2614 RT_SCOPE_LINK);
2615 res.type = RTN_UNICAST;
2616 goto make_route;
2617 }
1da177e4
LT
2618 err = -ENETUNREACH;
2619 goto out;
2620 }
1da177e4
LT
2621
2622 if (res.type == RTN_LOCAL) {
2623 if (!fl.fl4_src)
2624 fl.fl4_src = fl.fl4_dst;
b40afd0e 2625 dev_out = net->loopback_dev;
1da177e4 2626 fl.oif = dev_out->ifindex;
1da177e4
LT
2627 res.fi = NULL;
2628 flags |= RTCF_LOCAL;
2629 goto make_route;
2630 }
2631
2632#ifdef CONFIG_IP_ROUTE_MULTIPATH
2633 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2634 fib_select_multipath(&fl, &res);
2635 else
2636#endif
2637 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
b40afd0e 2638 fib_select_default(net, &fl, &res);
1da177e4
LT
2639
2640 if (!fl.fl4_src)
2641 fl.fl4_src = FIB_RES_PREFSRC(res);
2642
1da177e4 2643 dev_out = FIB_RES_DEV(res);
1da177e4
LT
2644 fl.oif = dev_out->ifindex;
2645
2646
2647make_route:
2648 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2649
1da177e4
LT
2650out: return err;
2651}
2652
611c183e
DL
2653int __ip_route_output_key(struct net *net, struct rtable **rp,
2654 const struct flowi *flp)
1da177e4 2655{
0197aa38
ED
2656 unsigned int hash;
2657 int res;
1da177e4
LT
2658 struct rtable *rth;
2659
1080d709
NH
2660 if (!rt_caching(net))
2661 goto slow_output;
2662
e84f84f2 2663 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
1da177e4
LT
2664
2665 rcu_read_lock_bh();
a898def2 2666 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2667 rth = rcu_dereference_bh(rth->dst.rt_next)) {
1da177e4
LT
2668 if (rth->fl.fl4_dst == flp->fl4_dst &&
2669 rth->fl.fl4_src == flp->fl4_src &&
c7537967 2670 rt_is_output_route(rth) &&
1da177e4 2671 rth->fl.oif == flp->oif &&
47dcf0cb 2672 rth->fl.mark == flp->mark &&
1da177e4 2673 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
b5921910 2674 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2675 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2676 !rt_is_expired(rth)) {
d8d1f30b 2677 dst_use(&rth->dst, jiffies);
1da177e4
LT
2678 RT_CACHE_STAT_INC(out_hit);
2679 rcu_read_unlock_bh();
2680 *rp = rth;
2681 return 0;
2682 }
2683 RT_CACHE_STAT_INC(out_hlist_search);
2684 }
2685 rcu_read_unlock_bh();
2686
1080d709 2687slow_output:
0197aa38
ED
2688 rcu_read_lock();
2689 res = ip_route_output_slow(net, rp, flp);
2690 rcu_read_unlock();
2691 return res;
1da177e4 2692}
d8c97a94
ACM
2693EXPORT_SYMBOL_GPL(__ip_route_output_key);
2694
ae2688d5
JW
2695static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2696{
2697 return NULL;
2698}
2699
14e50e57
DM
2700static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2701{
2702}
2703
2704static struct dst_ops ipv4_dst_blackhole_ops = {
2705 .family = AF_INET,
09640e63 2706 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2707 .destroy = ipv4_dst_destroy,
ae2688d5 2708 .check = ipv4_blackhole_dst_check,
14e50e57 2709 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
14e50e57
DM
2710};
2711
2712
e84f84f2 2713static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
14e50e57
DM
2714{
2715 struct rtable *ort = *rp;
2716 struct rtable *rt = (struct rtable *)
2717 dst_alloc(&ipv4_dst_blackhole_ops);
2718
2719 if (rt) {
d8d1f30b 2720 struct dst_entry *new = &rt->dst;
14e50e57
DM
2721
2722 atomic_set(&new->__refcnt, 1);
2723 new->__use = 1;
352e512c
HX
2724 new->input = dst_discard;
2725 new->output = dst_discard;
defb3519 2726 dst_copy_metrics(new, &ort->dst);
14e50e57 2727
d8d1f30b 2728 new->dev = ort->dst.dev;
14e50e57
DM
2729 if (new->dev)
2730 dev_hold(new->dev);
2731
2732 rt->fl = ort->fl;
2733
e84f84f2 2734 rt->rt_genid = rt_genid(net);
14e50e57
DM
2735 rt->rt_flags = ort->rt_flags;
2736 rt->rt_type = ort->rt_type;
2737 rt->rt_dst = ort->rt_dst;
2738 rt->rt_src = ort->rt_src;
2739 rt->rt_iif = ort->rt_iif;
2740 rt->rt_gateway = ort->rt_gateway;
2741 rt->rt_spec_dst = ort->rt_spec_dst;
2742 rt->peer = ort->peer;
2743 if (rt->peer)
2744 atomic_inc(&rt->peer->refcnt);
2745
2746 dst_free(new);
2747 }
2748
d8d1f30b 2749 dst_release(&(*rp)->dst);
14e50e57 2750 *rp = rt;
a02cec21 2751 return rt ? 0 : -ENOMEM;
14e50e57
DM
2752}
2753
f1b050bf
DL
2754int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2755 struct sock *sk, int flags)
1da177e4
LT
2756{
2757 int err;
2758
f1b050bf 2759 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
1da177e4
LT
2760 return err;
2761
2762 if (flp->proto) {
2763 if (!flp->fl4_src)
2764 flp->fl4_src = (*rp)->rt_src;
2765 if (!flp->fl4_dst)
2766 flp->fl4_dst = (*rp)->rt_dst;
52479b62 2767 err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
bb72845e 2768 flags ? XFRM_LOOKUP_WAIT : 0);
14e50e57 2769 if (err == -EREMOTE)
e84f84f2 2770 err = ipv4_dst_blackhole(net, rp, flp);
14e50e57
DM
2771
2772 return err;
1da177e4
LT
2773 }
2774
2775 return 0;
2776}
d8c97a94
ACM
2777EXPORT_SYMBOL_GPL(ip_route_output_flow);
2778
f206351a 2779int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
1da177e4 2780{
f206351a 2781 return ip_route_output_flow(net, rp, flp, NULL, 0);
1da177e4 2782}
4bc2f18b 2783EXPORT_SYMBOL(ip_route_output_key);
1da177e4 2784
4feb88e5
BT
2785static int rt_fill_info(struct net *net,
2786 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2787 int nowait, unsigned int flags)
1da177e4 2788{
511c3f92 2789 struct rtable *rt = skb_rtable(skb);
1da177e4 2790 struct rtmsg *r;
be403ea1 2791 struct nlmsghdr *nlh;
e3703b3d
TG
2792 long expires;
2793 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2794
2795 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2796 if (nlh == NULL)
26932566 2797 return -EMSGSIZE;
be403ea1
TG
2798
2799 r = nlmsg_data(nlh);
1da177e4
LT
2800 r->rtm_family = AF_INET;
2801 r->rtm_dst_len = 32;
2802 r->rtm_src_len = 0;
2803 r->rtm_tos = rt->fl.fl4_tos;
2804 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2805 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2806 r->rtm_type = rt->rt_type;
2807 r->rtm_scope = RT_SCOPE_UNIVERSE;
2808 r->rtm_protocol = RTPROT_UNSPEC;
2809 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2810 if (rt->rt_flags & RTCF_NOTIFY)
2811 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2812
17fb2c64 2813 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2814
1da177e4
LT
2815 if (rt->fl.fl4_src) {
2816 r->rtm_src_len = 32;
17fb2c64 2817 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
1da177e4 2818 }
d8d1f30b
CG
2819 if (rt->dst.dev)
2820 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
1da177e4 2821#ifdef CONFIG_NET_CLS_ROUTE
d8d1f30b
CG
2822 if (rt->dst.tclassid)
2823 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2824#endif
c7537967 2825 if (rt_is_input_route(rt))
17fb2c64 2826 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
1da177e4 2827 else if (rt->rt_src != rt->fl.fl4_src)
17fb2c64 2828 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2829
1da177e4 2830 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2831 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2832
defb3519 2833 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2834 goto nla_put_failure;
2835
963bfeee
ED
2836 if (rt->fl.mark)
2837 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
2838
d8d1f30b
CG
2839 error = rt->dst.error;
2840 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1da177e4 2841 if (rt->peer) {
317fe0e6 2842 inet_peer_refcheck(rt->peer);
2c1409a0 2843 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
1da177e4 2844 if (rt->peer->tcp_ts_stamp) {
e3703b3d 2845 ts = rt->peer->tcp_ts;
9d729f72 2846 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
1da177e4
LT
2847 }
2848 }
be403ea1 2849
c7537967 2850 if (rt_is_input_route(rt)) {
1da177e4 2851#ifdef CONFIG_IP_MROUTE
e448515c 2852 __be32 dst = rt->rt_dst;
1da177e4 2853
f97c1e0c 2854 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5
BT
2855 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2856 int err = ipmr_get_route(net, skb, r, nowait);
1da177e4
LT
2857 if (err <= 0) {
2858 if (!nowait) {
2859 if (err == 0)
2860 return 0;
be403ea1 2861 goto nla_put_failure;
1da177e4
LT
2862 } else {
2863 if (err == -EMSGSIZE)
be403ea1 2864 goto nla_put_failure;
e3703b3d 2865 error = err;
1da177e4
LT
2866 }
2867 }
2868 } else
2869#endif
be403ea1 2870 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
1da177e4
LT
2871 }
2872
d8d1f30b 2873 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2874 expires, error) < 0)
2875 goto nla_put_failure;
be403ea1
TG
2876
2877 return nlmsg_end(skb, nlh);
1da177e4 2878
be403ea1 2879nla_put_failure:
26932566
PM
2880 nlmsg_cancel(skb, nlh);
2881 return -EMSGSIZE;
1da177e4
LT
2882}
2883
63f3444f 2884static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2885{
3b1e0a65 2886 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2887 struct rtmsg *rtm;
2888 struct nlattr *tb[RTA_MAX+1];
1da177e4 2889 struct rtable *rt = NULL;
9e12bb22
AV
2890 __be32 dst = 0;
2891 __be32 src = 0;
2892 u32 iif;
d889ce3b 2893 int err;
963bfeee 2894 int mark;
1da177e4
LT
2895 struct sk_buff *skb;
2896
d889ce3b
TG
2897 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2898 if (err < 0)
2899 goto errout;
2900
2901 rtm = nlmsg_data(nlh);
2902
1da177e4 2903 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2904 if (skb == NULL) {
2905 err = -ENOBUFS;
2906 goto errout;
2907 }
1da177e4
LT
2908
2909 /* Reserve room for dummy headers, this skb can pass
2910 through good chunk of routing engine.
2911 */
459a98ed 2912 skb_reset_mac_header(skb);
c1d2bbe1 2913 skb_reset_network_header(skb);
d2c962b8
SH
2914
2915 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2916 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2917 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2918
17fb2c64
AV
2919 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2920 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2921 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2922 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2923
2924 if (iif) {
d889ce3b
TG
2925 struct net_device *dev;
2926
1937504d 2927 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2928 if (dev == NULL) {
2929 err = -ENODEV;
2930 goto errout_free;
2931 }
2932
1da177e4
LT
2933 skb->protocol = htons(ETH_P_IP);
2934 skb->dev = dev;
963bfeee 2935 skb->mark = mark;
1da177e4
LT
2936 local_bh_disable();
2937 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2938 local_bh_enable();
d889ce3b 2939
511c3f92 2940 rt = skb_rtable(skb);
d8d1f30b
CG
2941 if (err == 0 && rt->dst.error)
2942 err = -rt->dst.error;
1da177e4 2943 } else {
d889ce3b 2944 struct flowi fl = {
5811662b
CG
2945 .fl4_dst = dst,
2946 .fl4_src = src,
2947 .fl4_tos = rtm->rtm_tos,
d889ce3b 2948 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
963bfeee 2949 .mark = mark,
d889ce3b 2950 };
1937504d 2951 err = ip_route_output_key(net, &rt, &fl);
1da177e4 2952 }
d889ce3b 2953
1da177e4 2954 if (err)
d889ce3b 2955 goto errout_free;
1da177e4 2956
d8d1f30b 2957 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2958 if (rtm->rtm_flags & RTM_F_NOTIFY)
2959 rt->rt_flags |= RTCF_NOTIFY;
2960
4feb88e5 2961 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2962 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2963 if (err <= 0)
2964 goto errout_free;
1da177e4 2965
1937504d 2966 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2967errout:
2942e900 2968 return err;
1da177e4 2969
d889ce3b 2970errout_free:
1da177e4 2971 kfree_skb(skb);
d889ce3b 2972 goto errout;
1da177e4
LT
2973}
2974
2975int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2976{
2977 struct rtable *rt;
2978 int h, s_h;
2979 int idx, s_idx;
1937504d
DL
2980 struct net *net;
2981
3b1e0a65 2982 net = sock_net(skb->sk);
1da177e4
LT
2983
2984 s_h = cb->args[0];
d8c92830
ED
2985 if (s_h < 0)
2986 s_h = 0;
1da177e4 2987 s_idx = idx = cb->args[1];
a6272665
ED
2988 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2989 if (!rt_hash_table[h].chain)
2990 continue;
1da177e4 2991 rcu_read_lock_bh();
a898def2 2992 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
2993 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2994 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 2995 continue;
e84f84f2 2996 if (rt_is_expired(rt))
29e75252 2997 continue;
d8d1f30b 2998 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 2999 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 3000 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 3001 1, NLM_F_MULTI) <= 0) {
adf30907 3002 skb_dst_drop(skb);
1da177e4
LT
3003 rcu_read_unlock_bh();
3004 goto done;
3005 }
adf30907 3006 skb_dst_drop(skb);
1da177e4
LT
3007 }
3008 rcu_read_unlock_bh();
3009 }
3010
3011done:
3012 cb->args[0] = h;
3013 cb->args[1] = idx;
3014 return skb->len;
3015}
3016
3017void ip_rt_multicast_event(struct in_device *in_dev)
3018{
76e6ebfb 3019 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
3020}
3021
3022#ifdef CONFIG_SYSCTL
81c684d1 3023static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3024 void __user *buffer,
1da177e4
LT
3025 size_t *lenp, loff_t *ppos)
3026{
3027 if (write) {
639e104f 3028 int flush_delay;
81c684d1 3029 ctl_table ctl;
39a23e75 3030 struct net *net;
639e104f 3031
81c684d1
DL
3032 memcpy(&ctl, __ctl, sizeof(ctl));
3033 ctl.data = &flush_delay;
8d65af78 3034 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3035
81c684d1 3036 net = (struct net *)__ctl->extra1;
39a23e75 3037 rt_cache_flush(net, flush_delay);
1da177e4 3038 return 0;
e905a9ed 3039 }
1da177e4
LT
3040
3041 return -EINVAL;
3042}
3043
eeb61f71 3044static ctl_table ipv4_route_table[] = {
1da177e4 3045 {
1da177e4
LT
3046 .procname = "gc_thresh",
3047 .data = &ipv4_dst_ops.gc_thresh,
3048 .maxlen = sizeof(int),
3049 .mode = 0644,
6d9f239a 3050 .proc_handler = proc_dointvec,
1da177e4
LT
3051 },
3052 {
1da177e4
LT
3053 .procname = "max_size",
3054 .data = &ip_rt_max_size,
3055 .maxlen = sizeof(int),
3056 .mode = 0644,
6d9f239a 3057 .proc_handler = proc_dointvec,
1da177e4
LT
3058 },
3059 {
3060 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3061
1da177e4
LT
3062 .procname = "gc_min_interval",
3063 .data = &ip_rt_gc_min_interval,
3064 .maxlen = sizeof(int),
3065 .mode = 0644,
6d9f239a 3066 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3067 },
3068 {
1da177e4
LT
3069 .procname = "gc_min_interval_ms",
3070 .data = &ip_rt_gc_min_interval,
3071 .maxlen = sizeof(int),
3072 .mode = 0644,
6d9f239a 3073 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3074 },
3075 {
1da177e4
LT
3076 .procname = "gc_timeout",
3077 .data = &ip_rt_gc_timeout,
3078 .maxlen = sizeof(int),
3079 .mode = 0644,
6d9f239a 3080 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3081 },
3082 {
1da177e4
LT
3083 .procname = "gc_interval",
3084 .data = &ip_rt_gc_interval,
3085 .maxlen = sizeof(int),
3086 .mode = 0644,
6d9f239a 3087 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3088 },
3089 {
1da177e4
LT
3090 .procname = "redirect_load",
3091 .data = &ip_rt_redirect_load,
3092 .maxlen = sizeof(int),
3093 .mode = 0644,
6d9f239a 3094 .proc_handler = proc_dointvec,
1da177e4
LT
3095 },
3096 {
1da177e4
LT
3097 .procname = "redirect_number",
3098 .data = &ip_rt_redirect_number,
3099 .maxlen = sizeof(int),
3100 .mode = 0644,
6d9f239a 3101 .proc_handler = proc_dointvec,
1da177e4
LT
3102 },
3103 {
1da177e4
LT
3104 .procname = "redirect_silence",
3105 .data = &ip_rt_redirect_silence,
3106 .maxlen = sizeof(int),
3107 .mode = 0644,
6d9f239a 3108 .proc_handler = proc_dointvec,
1da177e4
LT
3109 },
3110 {
1da177e4
LT
3111 .procname = "error_cost",
3112 .data = &ip_rt_error_cost,
3113 .maxlen = sizeof(int),
3114 .mode = 0644,
6d9f239a 3115 .proc_handler = proc_dointvec,
1da177e4
LT
3116 },
3117 {
1da177e4
LT
3118 .procname = "error_burst",
3119 .data = &ip_rt_error_burst,
3120 .maxlen = sizeof(int),
3121 .mode = 0644,
6d9f239a 3122 .proc_handler = proc_dointvec,
1da177e4
LT
3123 },
3124 {
1da177e4
LT
3125 .procname = "gc_elasticity",
3126 .data = &ip_rt_gc_elasticity,
3127 .maxlen = sizeof(int),
3128 .mode = 0644,
6d9f239a 3129 .proc_handler = proc_dointvec,
1da177e4
LT
3130 },
3131 {
1da177e4
LT
3132 .procname = "mtu_expires",
3133 .data = &ip_rt_mtu_expires,
3134 .maxlen = sizeof(int),
3135 .mode = 0644,
6d9f239a 3136 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3137 },
3138 {
1da177e4
LT
3139 .procname = "min_pmtu",
3140 .data = &ip_rt_min_pmtu,
3141 .maxlen = sizeof(int),
3142 .mode = 0644,
6d9f239a 3143 .proc_handler = proc_dointvec,
1da177e4
LT
3144 },
3145 {
1da177e4
LT
3146 .procname = "min_adv_mss",
3147 .data = &ip_rt_min_advmss,
3148 .maxlen = sizeof(int),
3149 .mode = 0644,
6d9f239a 3150 .proc_handler = proc_dointvec,
1da177e4 3151 },
f8572d8f 3152 { }
1da177e4 3153};
39a23e75 3154
2f4520d3
AV
3155static struct ctl_table empty[1];
3156
3157static struct ctl_table ipv4_skeleton[] =
3158{
f8572d8f 3159 { .procname = "route",
d994af0d 3160 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3161 { .procname = "neigh",
d994af0d 3162 .mode = 0555, .child = empty},
2f4520d3
AV
3163 { }
3164};
3165
3166static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3167 { .procname = "net", },
3168 { .procname = "ipv4", },
39a23e75
DL
3169 { },
3170};
3171
39a23e75
DL
3172static struct ctl_table ipv4_route_flush_table[] = {
3173 {
39a23e75
DL
3174 .procname = "flush",
3175 .maxlen = sizeof(int),
3176 .mode = 0200,
6d9f239a 3177 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3178 },
f8572d8f 3179 { },
39a23e75
DL
3180};
3181
2f4520d3 3182static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3183 { .procname = "net", },
3184 { .procname = "ipv4", },
3185 { .procname = "route", },
2f4520d3
AV
3186 { },
3187};
3188
39a23e75
DL
3189static __net_init int sysctl_route_net_init(struct net *net)
3190{
3191 struct ctl_table *tbl;
3192
3193 tbl = ipv4_route_flush_table;
09ad9bc7 3194 if (!net_eq(net, &init_net)) {
39a23e75
DL
3195 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3196 if (tbl == NULL)
3197 goto err_dup;
3198 }
3199 tbl[0].extra1 = net;
3200
3201 net->ipv4.route_hdr =
3202 register_net_sysctl_table(net, ipv4_route_path, tbl);
3203 if (net->ipv4.route_hdr == NULL)
3204 goto err_reg;
3205 return 0;
3206
3207err_reg:
3208 if (tbl != ipv4_route_flush_table)
3209 kfree(tbl);
3210err_dup:
3211 return -ENOMEM;
3212}
3213
3214static __net_exit void sysctl_route_net_exit(struct net *net)
3215{
3216 struct ctl_table *tbl;
3217
3218 tbl = net->ipv4.route_hdr->ctl_table_arg;
3219 unregister_net_sysctl_table(net->ipv4.route_hdr);
3220 BUG_ON(tbl == ipv4_route_flush_table);
3221 kfree(tbl);
3222}
3223
3224static __net_initdata struct pernet_operations sysctl_route_ops = {
3225 .init = sysctl_route_net_init,
3226 .exit = sysctl_route_net_exit,
3227};
1da177e4
LT
3228#endif
3229
3ee94372 3230static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3231{
3ee94372
NH
3232 get_random_bytes(&net->ipv4.rt_genid,
3233 sizeof(net->ipv4.rt_genid));
9f5e97e5
DL
3234 return 0;
3235}
3236
3ee94372
NH
3237static __net_initdata struct pernet_operations rt_genid_ops = {
3238 .init = rt_genid_init,
9f5e97e5
DL
3239};
3240
3241
1da177e4 3242#ifdef CONFIG_NET_CLS_ROUTE
7d720c3e 3243struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
1da177e4
LT
3244#endif /* CONFIG_NET_CLS_ROUTE */
3245
3246static __initdata unsigned long rhash_entries;
3247static int __init set_rhash_entries(char *str)
3248{
3249 if (!str)
3250 return 0;
3251 rhash_entries = simple_strtoul(str, &str, 0);
3252 return 1;
3253}
3254__setup("rhash_entries=", set_rhash_entries);
3255
3256int __init ip_rt_init(void)
3257{
424c4b70 3258 int rc = 0;
1da177e4 3259
1da177e4 3260#ifdef CONFIG_NET_CLS_ROUTE
0dcec8c2 3261 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3262 if (!ip_rt_acct)
3263 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3264#endif
3265
e5d679f3
AD
3266 ipv4_dst_ops.kmem_cachep =
3267 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3268 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3269
14e50e57
DM
3270 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3271
fc66f95c
ED
3272 if (dst_entries_init(&ipv4_dst_ops) < 0)
3273 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3274
3275 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3276 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3277
424c4b70
ED
3278 rt_hash_table = (struct rt_hash_bucket *)
3279 alloc_large_system_hash("IP route cache",
3280 sizeof(struct rt_hash_bucket),
3281 rhash_entries,
4481374c 3282 (totalram_pages >= 128 * 1024) ?
18955cfc 3283 15 : 17,
8d1502de 3284 0,
424c4b70
ED
3285 &rt_hash_log,
3286 &rt_hash_mask,
c9503e0f 3287 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3288 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3289 rt_hash_lock_init();
1da177e4
LT
3290
3291 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3292 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3293
1da177e4
LT
3294 devinet_init();
3295 ip_fib_init();
3296
1da177e4
LT
3297 /* All the timers, started at system startup tend
3298 to synchronize. Perturb it a bit.
3299 */
125bb8f5
ED
3300 INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
3301 expires_ljiffies = jiffies;
39c90ece
ED
3302 schedule_delayed_work(&expires_work,
3303 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
1da177e4 3304
73b38711 3305 if (ip_rt_proc_init())
107f1634 3306 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3307#ifdef CONFIG_XFRM
3308 xfrm_init();
a33bc5c1 3309 xfrm4_init(ip_rt_max_size);
1da177e4 3310#endif
63f3444f
TG
3311 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3312
39a23e75
DL
3313#ifdef CONFIG_SYSCTL
3314 register_pernet_subsys(&sysctl_route_ops);
3315#endif
3ee94372 3316 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3317 return rc;
3318}
3319
a1bc6eb4 3320#ifdef CONFIG_SYSCTL
eeb61f71
AV
3321/*
3322 * We really need to sanitize the damn ipv4 init order, then all
3323 * this nonsense will go away.
3324 */
3325void __init ip_static_sysctl_init(void)
3326{
2f4520d3 3327 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3328}
a1bc6eb4 3329#endif
This page took 0.91365 seconds and 5 git commands to generate.