net: fix hw_features ethtool_ops->set_flags compatibility
[deliverable/linux.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
1da177e4
LT
65#include <linux/module.h>
66#include <asm/uaccess.h>
67#include <asm/system.h>
68#include <linux/bitops.h>
69#include <linux/types.h>
70#include <linux/kernel.h>
1da177e4 71#include <linux/mm.h>
424c4b70 72#include <linux/bootmem.h>
1da177e4
LT
73#include <linux/string.h>
74#include <linux/socket.h>
75#include <linux/sockios.h>
76#include <linux/errno.h>
77#include <linux/in.h>
78#include <linux/inet.h>
79#include <linux/netdevice.h>
80#include <linux/proc_fs.h>
81#include <linux/init.h>
39c90ece 82#include <linux/workqueue.h>
1da177e4 83#include <linux/skbuff.h>
1da177e4
LT
84#include <linux/inetdevice.h>
85#include <linux/igmp.h>
86#include <linux/pkt_sched.h>
87#include <linux/mroute.h>
88#include <linux/netfilter_ipv4.h>
89#include <linux/random.h>
90#include <linux/jhash.h>
91#include <linux/rcupdate.h>
92#include <linux/times.h>
5a0e3ad6 93#include <linux/slab.h>
352e512c 94#include <net/dst.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/protocol.h>
97#include <net/ip.h>
98#include <net/route.h>
99#include <net/inetpeer.h>
100#include <net/sock.h>
101#include <net/ip_fib.h>
102#include <net/arp.h>
103#include <net/tcp.h>
104#include <net/icmp.h>
105#include <net/xfrm.h>
8d71740c 106#include <net/netevent.h>
63f3444f 107#include <net/rtnetlink.h>
1da177e4
LT
108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h>
110#endif
111
68a5e3dd
DM
112#define RT_FL_TOS(oldflp4) \
113 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
1da177e4
LT
114
115#define IP_MAX_MTU 0xFFF0
116
117#define RT_GC_TIMEOUT (300*HZ)
118
1da177e4 119static int ip_rt_max_size;
817bc4db
SH
120static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
121static int ip_rt_gc_interval __read_mostly = 60 * HZ;
122static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
123static int ip_rt_redirect_number __read_mostly = 9;
124static int ip_rt_redirect_load __read_mostly = HZ / 50;
125static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126static int ip_rt_error_cost __read_mostly = HZ;
127static int ip_rt_error_burst __read_mostly = 5 * HZ;
128static int ip_rt_gc_elasticity __read_mostly = 8;
129static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
130static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
131static int ip_rt_min_advmss __read_mostly = 256;
1080d709 132static int rt_chain_length_max __read_mostly = 20;
1da177e4 133
1da177e4
LT
134/*
135 * Interface to generic destination cache.
136 */
137
138static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 139static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
d33e4553 140static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
1da177e4 141static void ipv4_dst_destroy(struct dst_entry *dst);
1da177e4
LT
142static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
143static void ipv4_link_failure(struct sk_buff *skb);
144static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
569d3645 145static int rt_garbage_collect(struct dst_ops *ops);
1da177e4 146
72cdd1d9
ED
147static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
148 int how)
149{
150}
1da177e4 151
62fa8a84
DM
152static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153{
06582540
DM
154 struct rtable *rt = (struct rtable *) dst;
155 struct inet_peer *peer;
156 u32 *p = NULL;
157
158 if (!rt->peer)
159 rt_bind_peer(rt, 1);
62fa8a84 160
06582540
DM
161 peer = rt->peer;
162 if (peer) {
62fa8a84
DM
163 u32 *old_p = __DST_METRICS_PTR(old);
164 unsigned long prev, new;
165
06582540
DM
166 p = peer->metrics;
167 if (inet_metrics_new(peer))
168 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
62fa8a84
DM
169
170 new = (unsigned long) p;
171 prev = cmpxchg(&dst->_metrics, old, new);
172
173 if (prev != old) {
62fa8a84
DM
174 p = __DST_METRICS_PTR(prev);
175 if (prev & DST_METRICS_READ_ONLY)
176 p = NULL;
177 } else {
62fa8a84
DM
178 if (rt->fi) {
179 fib_info_put(rt->fi);
180 rt->fi = NULL;
181 }
182 }
183 }
184 return p;
185}
186
1da177e4
LT
187static struct dst_ops ipv4_dst_ops = {
188 .family = AF_INET,
09640e63 189 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4
LT
190 .gc = rt_garbage_collect,
191 .check = ipv4_dst_check,
0dbaee3b 192 .default_advmss = ipv4_default_advmss,
d33e4553 193 .default_mtu = ipv4_default_mtu,
62fa8a84 194 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
195 .destroy = ipv4_dst_destroy,
196 .ifdown = ipv4_dst_ifdown,
197 .negative_advice = ipv4_negative_advice,
198 .link_failure = ipv4_link_failure,
199 .update_pmtu = ip_rt_update_pmtu,
1ac06e03 200 .local_out = __ip_local_out,
1da177e4
LT
201};
202
203#define ECN_OR_COST(class) TC_PRIO_##class
204
4839c52b 205const __u8 ip_tos2prio[16] = {
1da177e4 206 TC_PRIO_BESTEFFORT,
4a2b9c37 207 ECN_OR_COST(BESTEFFORT),
1da177e4
LT
208 TC_PRIO_BESTEFFORT,
209 ECN_OR_COST(BESTEFFORT),
210 TC_PRIO_BULK,
211 ECN_OR_COST(BULK),
212 TC_PRIO_BULK,
213 ECN_OR_COST(BULK),
214 TC_PRIO_INTERACTIVE,
215 ECN_OR_COST(INTERACTIVE),
216 TC_PRIO_INTERACTIVE,
217 ECN_OR_COST(INTERACTIVE),
218 TC_PRIO_INTERACTIVE_BULK,
219 ECN_OR_COST(INTERACTIVE_BULK),
220 TC_PRIO_INTERACTIVE_BULK,
221 ECN_OR_COST(INTERACTIVE_BULK)
222};
223
224
225/*
226 * Route cache.
227 */
228
229/* The locking scheme is rather straight forward:
230 *
231 * 1) Read-Copy Update protects the buckets of the central route hash.
232 * 2) Only writers remove entries, and they hold the lock
233 * as they look at rtable reference counts.
234 * 3) Only readers acquire references to rtable entries,
235 * they do so with atomic increments and with the
236 * lock held.
237 */
238
239struct rt_hash_bucket {
1c31720a 240 struct rtable __rcu *chain;
22c047cc 241};
1080d709 242
8a25d5de
IM
243#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
244 defined(CONFIG_PROVE_LOCKING)
22c047cc
ED
245/*
246 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
247 * The size of this table is a power of two and depends on the number of CPUS.
62051200 248 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
22c047cc 249 */
62051200
IM
250#ifdef CONFIG_LOCKDEP
251# define RT_HASH_LOCK_SZ 256
22c047cc 252#else
62051200
IM
253# if NR_CPUS >= 32
254# define RT_HASH_LOCK_SZ 4096
255# elif NR_CPUS >= 16
256# define RT_HASH_LOCK_SZ 2048
257# elif NR_CPUS >= 8
258# define RT_HASH_LOCK_SZ 1024
259# elif NR_CPUS >= 4
260# define RT_HASH_LOCK_SZ 512
261# else
262# define RT_HASH_LOCK_SZ 256
263# endif
22c047cc
ED
264#endif
265
266static spinlock_t *rt_hash_locks;
267# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
1ff1cc20
PE
268
269static __init void rt_hash_lock_init(void)
270{
271 int i;
272
273 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
274 GFP_KERNEL);
275 if (!rt_hash_locks)
276 panic("IP: failed to allocate rt_hash_locks\n");
277
278 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
279 spin_lock_init(&rt_hash_locks[i]);
280}
22c047cc
ED
281#else
282# define rt_hash_lock_addr(slot) NULL
1ff1cc20
PE
283
284static inline void rt_hash_lock_init(void)
285{
286}
22c047cc 287#endif
1da177e4 288
817bc4db
SH
289static struct rt_hash_bucket *rt_hash_table __read_mostly;
290static unsigned rt_hash_mask __read_mostly;
291static unsigned int rt_hash_log __read_mostly;
1da177e4 292
2f970d83 293static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 294#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 295
b00180de 296static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
0eae88f3 297 int genid)
1da177e4 298{
0eae88f3 299 return jhash_3words((__force u32)daddr, (__force u32)saddr,
b00180de 300 idx, genid)
29e75252 301 & rt_hash_mask;
1da177e4
LT
302}
303
e84f84f2
DL
304static inline int rt_genid(struct net *net)
305{
306 return atomic_read(&net->ipv4.rt_genid);
307}
308
1da177e4
LT
309#ifdef CONFIG_PROC_FS
310struct rt_cache_iter_state {
a75e936f 311 struct seq_net_private p;
1da177e4 312 int bucket;
29e75252 313 int genid;
1da177e4
LT
314};
315
1218854a 316static struct rtable *rt_cache_get_first(struct seq_file *seq)
1da177e4 317{
1218854a 318 struct rt_cache_iter_state *st = seq->private;
1da177e4 319 struct rtable *r = NULL;
1da177e4
LT
320
321 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
1c31720a 322 if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
a6272665 323 continue;
1da177e4 324 rcu_read_lock_bh();
a898def2 325 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
29e75252 326 while (r) {
d8d1f30b 327 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
a75e936f 328 r->rt_genid == st->genid)
29e75252 329 return r;
d8d1f30b 330 r = rcu_dereference_bh(r->dst.rt_next);
29e75252 331 }
1da177e4
LT
332 rcu_read_unlock_bh();
333 }
29e75252 334 return r;
1da177e4
LT
335}
336
1218854a 337static struct rtable *__rt_cache_get_next(struct seq_file *seq,
642d6318 338 struct rtable *r)
1da177e4 339{
1218854a 340 struct rt_cache_iter_state *st = seq->private;
a6272665 341
1c31720a 342 r = rcu_dereference_bh(r->dst.rt_next);
1da177e4
LT
343 while (!r) {
344 rcu_read_unlock_bh();
a6272665
ED
345 do {
346 if (--st->bucket < 0)
347 return NULL;
1c31720a 348 } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
1da177e4 349 rcu_read_lock_bh();
1c31720a 350 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
1da177e4 351 }
1c31720a 352 return r;
1da177e4
LT
353}
354
1218854a 355static struct rtable *rt_cache_get_next(struct seq_file *seq,
642d6318
DL
356 struct rtable *r)
357{
1218854a
YH
358 struct rt_cache_iter_state *st = seq->private;
359 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
d8d1f30b 360 if (dev_net(r->dst.dev) != seq_file_net(seq))
a75e936f 361 continue;
642d6318
DL
362 if (r->rt_genid == st->genid)
363 break;
364 }
365 return r;
366}
367
1218854a 368static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
1da177e4 369{
1218854a 370 struct rtable *r = rt_cache_get_first(seq);
1da177e4
LT
371
372 if (r)
1218854a 373 while (pos && (r = rt_cache_get_next(seq, r)))
1da177e4
LT
374 --pos;
375 return pos ? NULL : r;
376}
377
378static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
379{
29e75252 380 struct rt_cache_iter_state *st = seq->private;
29e75252 381 if (*pos)
1218854a 382 return rt_cache_get_idx(seq, *pos - 1);
e84f84f2 383 st->genid = rt_genid(seq_file_net(seq));
29e75252 384 return SEQ_START_TOKEN;
1da177e4
LT
385}
386
387static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
388{
29e75252 389 struct rtable *r;
1da177e4
LT
390
391 if (v == SEQ_START_TOKEN)
1218854a 392 r = rt_cache_get_first(seq);
1da177e4 393 else
1218854a 394 r = rt_cache_get_next(seq, v);
1da177e4
LT
395 ++*pos;
396 return r;
397}
398
399static void rt_cache_seq_stop(struct seq_file *seq, void *v)
400{
401 if (v && v != SEQ_START_TOKEN)
402 rcu_read_unlock_bh();
403}
404
405static int rt_cache_seq_show(struct seq_file *seq, void *v)
406{
407 if (v == SEQ_START_TOKEN)
408 seq_printf(seq, "%-127s\n",
409 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
410 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
411 "HHUptod\tSpecDst");
412 else {
413 struct rtable *r = v;
5e659e4c 414 int len;
1da177e4 415
0eae88f3
ED
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
d8d1f30b 418 r->dst.dev ? r->dst.dev->name : "*",
0eae88f3
ED
419 (__force u32)r->rt_dst,
420 (__force u32)r->rt_gateway,
d8d1f30b
CG
421 r->rt_flags, atomic_read(&r->dst.__refcnt),
422 r->dst.__use, 0, (__force u32)r->rt_src,
0dbaee3b 423 dst_metric_advmss(&r->dst) + 40,
d8d1f30b
CG
424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)),
5e2b61f7 427 r->rt_tos,
d8d1f30b
CG
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output ==
1da177e4 430 dev_queue_xmit) : 0,
5e659e4c
PE
431 r->rt_spec_dst, &len);
432
433 seq_printf(seq, "%*s\n", 127 - len, "");
e905a9ed
YH
434 }
435 return 0;
1da177e4
LT
436}
437
f690808e 438static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
439 .start = rt_cache_seq_start,
440 .next = rt_cache_seq_next,
441 .stop = rt_cache_seq_stop,
442 .show = rt_cache_seq_show,
443};
444
445static int rt_cache_seq_open(struct inode *inode, struct file *file)
446{
a75e936f 447 return seq_open_net(inode, file, &rt_cache_seq_ops,
cf7732e4 448 sizeof(struct rt_cache_iter_state));
1da177e4
LT
449}
450
9a32144e 451static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
452 .owner = THIS_MODULE,
453 .open = rt_cache_seq_open,
454 .read = seq_read,
455 .llseek = seq_lseek,
a75e936f 456 .release = seq_release_net,
1da177e4
LT
457};
458
459
460static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
461{
462 int cpu;
463
464 if (*pos == 0)
465 return SEQ_START_TOKEN;
466
0f23174a 467 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
468 if (!cpu_possible(cpu))
469 continue;
470 *pos = cpu+1;
2f970d83 471 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
472 }
473 return NULL;
474}
475
476static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
477{
478 int cpu;
479
0f23174a 480 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
481 if (!cpu_possible(cpu))
482 continue;
483 *pos = cpu+1;
2f970d83 484 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
485 }
486 return NULL;
e905a9ed 487
1da177e4
LT
488}
489
490static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
491{
492
493}
494
495static int rt_cpu_seq_show(struct seq_file *seq, void *v)
496{
497 struct rt_cache_stat *st = v;
498
499 if (v == SEQ_START_TOKEN) {
5bec0039 500 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
501 return 0;
502 }
e905a9ed 503
1da177e4
LT
504 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
505 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 506 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
507 st->in_hit,
508 st->in_slow_tot,
509 st->in_slow_mc,
510 st->in_no_route,
511 st->in_brd,
512 st->in_martian_dst,
513 st->in_martian_src,
514
515 st->out_hit,
516 st->out_slow_tot,
e905a9ed 517 st->out_slow_mc,
1da177e4
LT
518
519 st->gc_total,
520 st->gc_ignored,
521 st->gc_goal_miss,
522 st->gc_dst_overflow,
523 st->in_hlist_search,
524 st->out_hlist_search
525 );
526 return 0;
527}
528
f690808e 529static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
530 .start = rt_cpu_seq_start,
531 .next = rt_cpu_seq_next,
532 .stop = rt_cpu_seq_stop,
533 .show = rt_cpu_seq_show,
534};
535
536
537static int rt_cpu_seq_open(struct inode *inode, struct file *file)
538{
539 return seq_open(file, &rt_cpu_seq_ops);
540}
541
9a32144e 542static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
543 .owner = THIS_MODULE,
544 .open = rt_cpu_seq_open,
545 .read = seq_read,
546 .llseek = seq_lseek,
547 .release = seq_release,
548};
549
c7066f70 550#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 551static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 552{
a661c419
AD
553 struct ip_rt_acct *dst, *src;
554 unsigned int i, j;
555
556 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
557 if (!dst)
558 return -ENOMEM;
559
560 for_each_possible_cpu(i) {
561 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
562 for (j = 0; j < 256; j++) {
563 dst[j].o_bytes += src[j].o_bytes;
564 dst[j].o_packets += src[j].o_packets;
565 dst[j].i_bytes += src[j].i_bytes;
566 dst[j].i_packets += src[j].i_packets;
567 }
78c686e9
PE
568 }
569
a661c419
AD
570 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
571 kfree(dst);
572 return 0;
573}
78c686e9 574
a661c419
AD
575static int rt_acct_proc_open(struct inode *inode, struct file *file)
576{
577 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 578}
a661c419
AD
579
580static const struct file_operations rt_acct_proc_fops = {
581 .owner = THIS_MODULE,
582 .open = rt_acct_proc_open,
583 .read = seq_read,
584 .llseek = seq_lseek,
585 .release = single_release,
586};
78c686e9 587#endif
107f1634 588
73b38711 589static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
590{
591 struct proc_dir_entry *pde;
592
593 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
594 &rt_cache_seq_fops);
595 if (!pde)
596 goto err1;
597
77020720
WC
598 pde = proc_create("rt_cache", S_IRUGO,
599 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
600 if (!pde)
601 goto err2;
602
c7066f70 603#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 604 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
605 if (!pde)
606 goto err3;
607#endif
608 return 0;
609
c7066f70 610#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
611err3:
612 remove_proc_entry("rt_cache", net->proc_net_stat);
613#endif
614err2:
615 remove_proc_entry("rt_cache", net->proc_net);
616err1:
617 return -ENOMEM;
618}
73b38711
DL
619
620static void __net_exit ip_rt_do_proc_exit(struct net *net)
621{
622 remove_proc_entry("rt_cache", net->proc_net_stat);
623 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 624#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 625 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 626#endif
73b38711
DL
627}
628
629static struct pernet_operations ip_rt_proc_ops __net_initdata = {
630 .init = ip_rt_do_proc_init,
631 .exit = ip_rt_do_proc_exit,
632};
633
634static int __init ip_rt_proc_init(void)
635{
636 return register_pernet_subsys(&ip_rt_proc_ops);
637}
638
107f1634 639#else
73b38711 640static inline int ip_rt_proc_init(void)
107f1634
PE
641{
642 return 0;
643}
1da177e4 644#endif /* CONFIG_PROC_FS */
e905a9ed 645
5969f71d 646static inline void rt_free(struct rtable *rt)
1da177e4 647{
d8d1f30b 648 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
649}
650
5969f71d 651static inline void rt_drop(struct rtable *rt)
1da177e4 652{
1da177e4 653 ip_rt_put(rt);
d8d1f30b 654 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
1da177e4
LT
655}
656
5969f71d 657static inline int rt_fast_clean(struct rtable *rth)
1da177e4
LT
658{
659 /* Kill broadcast/multicast entries very aggresively, if they
660 collide in hash table with more useful entries */
661 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
c7537967 662 rt_is_input_route(rth) && rth->dst.rt_next;
1da177e4
LT
663}
664
5969f71d 665static inline int rt_valuable(struct rtable *rth)
1da177e4
LT
666{
667 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
2c8cec5c 668 (rth->peer && rth->peer->pmtu_expires);
1da177e4
LT
669}
670
671static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
672{
673 unsigned long age;
674 int ret = 0;
675
d8d1f30b 676 if (atomic_read(&rth->dst.__refcnt))
1da177e4
LT
677 goto out;
678
d8d1f30b 679 age = jiffies - rth->dst.lastuse;
1da177e4
LT
680 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
681 (age <= tmo2 && rt_valuable(rth)))
682 goto out;
683 ret = 1;
684out: return ret;
685}
686
687/* Bits of score are:
688 * 31: very valuable
689 * 30: not quite useless
690 * 29..0: usage counter
691 */
692static inline u32 rt_score(struct rtable *rt)
693{
d8d1f30b 694 u32 score = jiffies - rt->dst.lastuse;
1da177e4
LT
695
696 score = ~score & ~(3<<30);
697
698 if (rt_valuable(rt))
699 score |= (1<<31);
700
c7537967 701 if (rt_is_output_route(rt) ||
1da177e4
LT
702 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
703 score |= (1<<30);
704
705 return score;
706}
707
1080d709
NH
708static inline bool rt_caching(const struct net *net)
709{
710 return net->ipv4.current_rt_cache_rebuild_count <=
711 net->ipv4.sysctl_rt_cache_rebuild_count;
712}
713
5e2b61f7
DM
714static inline bool compare_hash_inputs(const struct rtable *rt1,
715 const struct rtable *rt2)
1080d709 716{
5e2b61f7
DM
717 return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
718 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
719 (rt1->rt_iif ^ rt2->rt_iif)) == 0);
1080d709
NH
720}
721
5e2b61f7 722static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
1da177e4 723{
5e2b61f7
DM
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
1da177e4
LT
730}
731
b5921910
DL
732static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
733{
d8d1f30b 734 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
b5921910
DL
735}
736
e84f84f2
DL
737static inline int rt_is_expired(struct rtable *rth)
738{
d8d1f30b 739 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
740}
741
beb659bd
ED
742/*
743 * Perform a full scan of hash table and free all entries.
744 * Can be called by a softirq or a process.
745 * In the later case, we want to be reschedule if necessary
746 */
6561a3b1 747static void rt_do_flush(struct net *net, int process_context)
beb659bd
ED
748{
749 unsigned int i;
750 struct rtable *rth, *next;
751
752 for (i = 0; i <= rt_hash_mask; i++) {
6561a3b1
DM
753 struct rtable __rcu **pprev;
754 struct rtable *list;
755
beb659bd
ED
756 if (process_context && need_resched())
757 cond_resched();
1c31720a 758 rth = rcu_dereference_raw(rt_hash_table[i].chain);
beb659bd
ED
759 if (!rth)
760 continue;
761
762 spin_lock_bh(rt_hash_lock_addr(i));
32cb5b4e 763
6561a3b1
DM
764 list = NULL;
765 pprev = &rt_hash_table[i].chain;
766 rth = rcu_dereference_protected(*pprev,
1c31720a 767 lockdep_is_held(rt_hash_lock_addr(i)));
32cb5b4e 768
6561a3b1
DM
769 while (rth) {
770 next = rcu_dereference_protected(rth->dst.rt_next,
1c31720a 771 lockdep_is_held(rt_hash_lock_addr(i)));
6561a3b1
DM
772
773 if (!net ||
774 net_eq(dev_net(rth->dst.dev), net)) {
775 rcu_assign_pointer(*pprev, next);
776 rcu_assign_pointer(rth->dst.rt_next, list);
777 list = rth;
32cb5b4e 778 } else {
6561a3b1 779 pprev = &rth->dst.rt_next;
32cb5b4e 780 }
6561a3b1 781 rth = next;
32cb5b4e 782 }
6561a3b1 783
beb659bd
ED
784 spin_unlock_bh(rt_hash_lock_addr(i));
785
6561a3b1
DM
786 for (; list; list = next) {
787 next = rcu_dereference_protected(list->dst.rt_next, 1);
788 rt_free(list);
beb659bd
ED
789 }
790 }
791}
792
1080d709
NH
793/*
794 * While freeing expired entries, we compute average chain length
795 * and standard deviation, using fixed-point arithmetic.
796 * This to have an estimation of rt_chain_length_max
797 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
798 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
799 */
800
801#define FRACT_BITS 3
802#define ONE (1UL << FRACT_BITS)
803
98376387
ED
804/*
805 * Given a hash chain and an item in this hash chain,
806 * find if a previous entry has the same hash_inputs
807 * (but differs on tos, mark or oif)
808 * Returns 0 if an alias is found.
809 * Returns ONE if rth has no alias before itself.
810 */
811static int has_noalias(const struct rtable *head, const struct rtable *rth)
812{
813 const struct rtable *aux = head;
814
815 while (aux != rth) {
5e2b61f7 816 if (compare_hash_inputs(aux, rth))
98376387 817 return 0;
1c31720a 818 aux = rcu_dereference_protected(aux->dst.rt_next, 1);
98376387
ED
819 }
820 return ONE;
821}
822
29e75252 823/*
25985edc 824 * Perturbation of rt_genid by a small quantity [1..256]
29e75252
ED
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826 * many times (2^24) without giving recent rt_genid.
827 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 828 */
86c657f6 829static void rt_cache_invalidate(struct net *net)
1da177e4 830{
29e75252 831 unsigned char shuffle;
1da177e4 832
29e75252 833 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 834 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
835}
836
29e75252
ED
837/*
838 * delay < 0 : invalidate cache (fast : entries will be deleted later)
839 * delay >= 0 : invalidate & flush cache (can be long)
840 */
76e6ebfb 841void rt_cache_flush(struct net *net, int delay)
1da177e4 842{
86c657f6 843 rt_cache_invalidate(net);
29e75252 844 if (delay >= 0)
6561a3b1 845 rt_do_flush(net, !in_softirq());
1da177e4
LT
846}
847
a5ee1551 848/* Flush previous cache invalidated entries from the cache */
6561a3b1 849void rt_cache_flush_batch(struct net *net)
a5ee1551 850{
6561a3b1 851 rt_do_flush(net, !in_softirq());
a5ee1551
EB
852}
853
1080d709
NH
854static void rt_emergency_hash_rebuild(struct net *net)
855{
3ee94372 856 if (net_ratelimit())
1080d709 857 printk(KERN_WARNING "Route hash chain too long!\n");
3ee94372 858 rt_cache_invalidate(net);
1080d709
NH
859}
860
1da177e4
LT
861/*
862 Short description of GC goals.
863
864 We want to build algorithm, which will keep routing cache
865 at some equilibrium point, when number of aged off entries
866 is kept approximately equal to newly generated ones.
867
868 Current expiration strength is variable "expire".
869 We try to adjust it dynamically, so that if networking
870 is idle expires is large enough to keep enough of warm entries,
871 and when load increases it reduces to limit cache size.
872 */
873
569d3645 874static int rt_garbage_collect(struct dst_ops *ops)
1da177e4
LT
875{
876 static unsigned long expire = RT_GC_TIMEOUT;
877 static unsigned long last_gc;
878 static int rover;
879 static int equilibrium;
1c31720a
ED
880 struct rtable *rth;
881 struct rtable __rcu **rthp;
1da177e4
LT
882 unsigned long now = jiffies;
883 int goal;
fc66f95c 884 int entries = dst_entries_get_fast(&ipv4_dst_ops);
1da177e4
LT
885
886 /*
887 * Garbage collection is pretty expensive,
888 * do not make it too frequently.
889 */
890
891 RT_CACHE_STAT_INC(gc_total);
892
893 if (now - last_gc < ip_rt_gc_min_interval &&
fc66f95c 894 entries < ip_rt_max_size) {
1da177e4
LT
895 RT_CACHE_STAT_INC(gc_ignored);
896 goto out;
897 }
898
fc66f95c 899 entries = dst_entries_get_slow(&ipv4_dst_ops);
1da177e4 900 /* Calculate number of entries, which we want to expire now. */
fc66f95c 901 goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
1da177e4
LT
902 if (goal <= 0) {
903 if (equilibrium < ipv4_dst_ops.gc_thresh)
904 equilibrium = ipv4_dst_ops.gc_thresh;
fc66f95c 905 goal = entries - equilibrium;
1da177e4 906 if (goal > 0) {
b790cedd 907 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 908 goal = entries - equilibrium;
1da177e4
LT
909 }
910 } else {
911 /* We are in dangerous area. Try to reduce cache really
912 * aggressively.
913 */
b790cedd 914 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
fc66f95c 915 equilibrium = entries - goal;
1da177e4
LT
916 }
917
918 if (now - last_gc >= ip_rt_gc_min_interval)
919 last_gc = now;
920
921 if (goal <= 0) {
922 equilibrium += goal;
923 goto work_done;
924 }
925
926 do {
927 int i, k;
928
929 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
930 unsigned long tmo = expire;
931
932 k = (k + 1) & rt_hash_mask;
933 rthp = &rt_hash_table[k].chain;
22c047cc 934 spin_lock_bh(rt_hash_lock_addr(k));
1c31720a
ED
935 while ((rth = rcu_dereference_protected(*rthp,
936 lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
e84f84f2 937 if (!rt_is_expired(rth) &&
29e75252 938 !rt_may_expire(rth, tmo, expire)) {
1da177e4 939 tmo >>= 1;
d8d1f30b 940 rthp = &rth->dst.rt_next;
1da177e4
LT
941 continue;
942 }
d8d1f30b 943 *rthp = rth->dst.rt_next;
1da177e4
LT
944 rt_free(rth);
945 goal--;
1da177e4 946 }
22c047cc 947 spin_unlock_bh(rt_hash_lock_addr(k));
1da177e4
LT
948 if (goal <= 0)
949 break;
950 }
951 rover = k;
952
953 if (goal <= 0)
954 goto work_done;
955
956 /* Goal is not achieved. We stop process if:
957
958 - if expire reduced to zero. Otherwise, expire is halfed.
959 - if table is not full.
960 - if we are called from interrupt.
961 - jiffies check is just fallback/debug loop breaker.
962 We will not spin here for long time in any case.
963 */
964
965 RT_CACHE_STAT_INC(gc_goal_miss);
966
967 if (expire == 0)
968 break;
969
970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
fc66f95c 973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
1da177e4
LT
974#endif
975
fc66f95c 976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
977 goto out;
978 } while (!in_softirq() && time_before_eq(jiffies, now));
979
fc66f95c
ED
980 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
981 goto out;
982 if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
1da177e4
LT
983 goto out;
984 if (net_ratelimit())
985 printk(KERN_WARNING "dst cache overflow\n");
986 RT_CACHE_STAT_INC(gc_dst_overflow);
987 return 1;
988
989work_done:
990 expire += ip_rt_gc_min_interval;
991 if (expire > ip_rt_gc_timeout ||
fc66f95c
ED
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
1da177e4
LT
994 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
fc66f95c 997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
1da177e4
LT
998#endif
999out: return 0;
1000}
1001
98376387
ED
1002/*
1003 * Returns number of entries in a hash chain that have different hash_inputs
1004 */
1005static int slow_chain_length(const struct rtable *head)
1006{
1007 int length = 0;
1008 const struct rtable *rth = head;
1009
1010 while (rth) {
1011 length += has_noalias(head, rth);
1c31720a 1012 rth = rcu_dereference_protected(rth->dst.rt_next, 1);
98376387
ED
1013 }
1014 return length >> FRACT_BITS;
1015}
1016
b23dd4fe
DM
1017static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1018 struct sk_buff *skb, int ifindex)
1da177e4 1019{
1c31720a
ED
1020 struct rtable *rth, *cand;
1021 struct rtable __rcu **rthp, **candp;
1da177e4 1022 unsigned long now;
1da177e4
LT
1023 u32 min_score;
1024 int chain_length;
1025 int attempts = !in_softirq();
1026
1027restart:
1028 chain_length = 0;
1029 min_score = ~(u32)0;
1030 cand = NULL;
1031 candp = NULL;
1032 now = jiffies;
1033
d8d1f30b 1034 if (!rt_caching(dev_net(rt->dst.dev))) {
73e42897
NH
1035 /*
1036 * If we're not caching, just tell the caller we
1037 * were successful and don't touch the route. The
1038 * caller hold the sole reference to the cache entry, and
1039 * it will be released when the caller is done with it.
1040 * If we drop it here, the callers have no way to resolve routes
1041 * when we're not caching. Instead, just point *rp at rt, so
1042 * the caller gets a single use out of the route
b6280b47
NH
1043 * Note that we do rt_free on this new route entry, so that
1044 * once its refcount hits zero, we are still able to reap it
1045 * (Thanks Alexey)
27b75c95
ED
1046 * Note: To avoid expensive rcu stuff for this uncached dst,
1047 * we set DST_NOCACHE so that dst_release() can free dst without
1048 * waiting a grace period.
73e42897 1049 */
b6280b47 1050
c7d4426a 1051 rt->dst.flags |= DST_NOCACHE;
c7537967 1052 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1053 int err = arp_bind_neighbour(&rt->dst);
b6280b47
NH
1054 if (err) {
1055 if (net_ratelimit())
1056 printk(KERN_WARNING
1057 "Neighbour table failure & not caching routes.\n");
27b75c95 1058 ip_rt_put(rt);
b23dd4fe 1059 return ERR_PTR(err);
b6280b47
NH
1060 }
1061 }
1062
b6280b47 1063 goto skip_hashing;
1080d709
NH
1064 }
1065
1da177e4
LT
1066 rthp = &rt_hash_table[hash].chain;
1067
22c047cc 1068 spin_lock_bh(rt_hash_lock_addr(hash));
1c31720a
ED
1069 while ((rth = rcu_dereference_protected(*rthp,
1070 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1071 if (rt_is_expired(rth)) {
d8d1f30b 1072 *rthp = rth->dst.rt_next;
29e75252
ED
1073 rt_free(rth);
1074 continue;
1075 }
5e2b61f7 1076 if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
1da177e4 1077 /* Put it first */
d8d1f30b 1078 *rthp = rth->dst.rt_next;
1da177e4
LT
1079 /*
1080 * Since lookup is lockfree, the deletion
1081 * must be visible to another weakly ordered CPU before
1082 * the insertion at the start of the hash chain.
1083 */
d8d1f30b 1084 rcu_assign_pointer(rth->dst.rt_next,
1da177e4
LT
1085 rt_hash_table[hash].chain);
1086 /*
1087 * Since lookup is lockfree, the update writes
1088 * must be ordered for consistency on SMP.
1089 */
1090 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1091
d8d1f30b 1092 dst_use(&rth->dst, now);
22c047cc 1093 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1094
1095 rt_drop(rt);
b23dd4fe 1096 if (skb)
d8d1f30b 1097 skb_dst_set(skb, &rth->dst);
b23dd4fe 1098 return rth;
1da177e4
LT
1099 }
1100
d8d1f30b 1101 if (!atomic_read(&rth->dst.__refcnt)) {
1da177e4
LT
1102 u32 score = rt_score(rth);
1103
1104 if (score <= min_score) {
1105 cand = rth;
1106 candp = rthp;
1107 min_score = score;
1108 }
1109 }
1110
1111 chain_length++;
1112
d8d1f30b 1113 rthp = &rth->dst.rt_next;
1da177e4
LT
1114 }
1115
1116 if (cand) {
1117 /* ip_rt_gc_elasticity used to be average length of chain
1118 * length, when exceeded gc becomes really aggressive.
1119 *
1120 * The second limit is less certain. At the moment it allows
1121 * only 2 entries per bucket. We will see.
1122 */
1123 if (chain_length > ip_rt_gc_elasticity) {
d8d1f30b 1124 *candp = cand->dst.rt_next;
1da177e4
LT
1125 rt_free(cand);
1126 }
1080d709 1127 } else {
98376387
ED
1128 if (chain_length > rt_chain_length_max &&
1129 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
d8d1f30b 1130 struct net *net = dev_net(rt->dst.dev);
1080d709 1131 int num = ++net->ipv4.current_rt_cache_rebuild_count;
b35ecb5d 1132 if (!rt_caching(net)) {
1080d709 1133 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
d8d1f30b 1134 rt->dst.dev->name, num);
1080d709 1135 }
b35ecb5d 1136 rt_emergency_hash_rebuild(net);
6a2bad70
PE
1137 spin_unlock_bh(rt_hash_lock_addr(hash));
1138
5e2b61f7 1139 hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
6a2bad70
PE
1140 ifindex, rt_genid(net));
1141 goto restart;
1080d709 1142 }
1da177e4
LT
1143 }
1144
1145 /* Try to bind route to arp only if it is output
1146 route or unicast forwarding path.
1147 */
c7537967 1148 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
d8d1f30b 1149 int err = arp_bind_neighbour(&rt->dst);
1da177e4 1150 if (err) {
22c047cc 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1152
1153 if (err != -ENOBUFS) {
1154 rt_drop(rt);
b23dd4fe 1155 return ERR_PTR(err);
1da177e4
LT
1156 }
1157
1158 /* Neighbour tables are full and nothing
1159 can be released. Try to shrink route cache,
1160 it is most likely it holds some neighbour records.
1161 */
1162 if (attempts-- > 0) {
1163 int saved_elasticity = ip_rt_gc_elasticity;
1164 int saved_int = ip_rt_gc_min_interval;
1165 ip_rt_gc_elasticity = 1;
1166 ip_rt_gc_min_interval = 0;
569d3645 1167 rt_garbage_collect(&ipv4_dst_ops);
1da177e4
LT
1168 ip_rt_gc_min_interval = saved_int;
1169 ip_rt_gc_elasticity = saved_elasticity;
1170 goto restart;
1171 }
1172
1173 if (net_ratelimit())
7e1b33e5 1174 printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
1da177e4 1175 rt_drop(rt);
b23dd4fe 1176 return ERR_PTR(-ENOBUFS);
1da177e4
LT
1177 }
1178 }
1179
d8d1f30b 1180 rt->dst.rt_next = rt_hash_table[hash].chain;
1080d709 1181
1da177e4 1182#if RT_CACHE_DEBUG >= 2
d8d1f30b 1183 if (rt->dst.rt_next) {
1da177e4 1184 struct rtable *trt;
b6280b47
NH
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
d8d1f30b 1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
673d57e7 1188 printk(" . %pI4", &trt->rt_dst);
1da177e4
LT
1189 printk("\n");
1190 }
1191#endif
00269b54
ED
1192 /*
1193 * Since lookup is lockfree, we must make sure
25985edc 1194 * previous writes to rt are committed to memory
00269b54
ED
1195 * before making rt visible to other CPUS.
1196 */
1ddbcb00 1197 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1080d709 1198
22c047cc 1199 spin_unlock_bh(rt_hash_lock_addr(hash));
73e42897 1200
b6280b47 1201skip_hashing:
b23dd4fe 1202 if (skb)
d8d1f30b 1203 skb_dst_set(skb, &rt->dst);
b23dd4fe 1204 return rt;
1da177e4
LT
1205}
1206
6431cbc2
DM
1207static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
1208
1209static u32 rt_peer_genid(void)
1210{
1211 return atomic_read(&__rt_peer_genid);
1212}
1213
1da177e4
LT
1214void rt_bind_peer(struct rtable *rt, int create)
1215{
1da177e4
LT
1216 struct inet_peer *peer;
1217
b534ecf1 1218 peer = inet_getpeer_v4(rt->rt_dst, create);
1da177e4 1219
49e8ab03 1220 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1da177e4 1221 inet_putpeer(peer);
6431cbc2
DM
1222 else
1223 rt->rt_peer_genid = rt_peer_genid();
1da177e4
LT
1224}
1225
1226/*
1227 * Peer allocation may fail only in serious out-of-memory conditions. However
1228 * we still can generate some output.
1229 * Random ID selection looks a bit dangerous because we have no chances to
1230 * select ID being unique in a reasonable period of time.
1231 * But broken packet identifier may be better than no packet at all.
1232 */
1233static void ip_select_fb_ident(struct iphdr *iph)
1234{
1235 static DEFINE_SPINLOCK(ip_fb_id_lock);
1236 static u32 ip_fallback_id;
1237 u32 salt;
1238
1239 spin_lock_bh(&ip_fb_id_lock);
e448515c 1240 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
1241 iph->id = htons(salt & 0xFFFF);
1242 ip_fallback_id = salt;
1243 spin_unlock_bh(&ip_fb_id_lock);
1244}
1245
1246void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1247{
1248 struct rtable *rt = (struct rtable *) dst;
1249
1250 if (rt) {
1251 if (rt->peer == NULL)
1252 rt_bind_peer(rt, 1);
1253
1254 /* If peer is attached to destination, it is never detached,
1255 so that we need not to grab a lock to dereference it.
1256 */
1257 if (rt->peer) {
1258 iph->id = htons(inet_getid(rt->peer, more));
1259 return;
1260 }
1261 } else
e905a9ed 1262 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
9c2b3328 1263 __builtin_return_address(0));
1da177e4
LT
1264
1265 ip_select_fb_ident(iph);
1266}
4bc2f18b 1267EXPORT_SYMBOL(__ip_select_ident);
1da177e4
LT
1268
1269static void rt_del(unsigned hash, struct rtable *rt)
1270{
1c31720a
ED
1271 struct rtable __rcu **rthp;
1272 struct rtable *aux;
1da177e4 1273
29e75252 1274 rthp = &rt_hash_table[hash].chain;
22c047cc 1275 spin_lock_bh(rt_hash_lock_addr(hash));
1da177e4 1276 ip_rt_put(rt);
1c31720a
ED
1277 while ((aux = rcu_dereference_protected(*rthp,
1278 lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
e84f84f2 1279 if (aux == rt || rt_is_expired(aux)) {
d8d1f30b 1280 *rthp = aux->dst.rt_next;
29e75252
ED
1281 rt_free(aux);
1282 continue;
1da177e4 1283 }
d8d1f30b 1284 rthp = &aux->dst.rt_next;
29e75252 1285 }
22c047cc 1286 spin_unlock_bh(rt_hash_lock_addr(hash));
1da177e4
LT
1287}
1288
ed7865a4 1289/* called in rcu_read_lock() section */
f7655229
AV
1290void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1291 __be32 saddr, struct net_device *dev)
1da177e4 1292{
ed7865a4 1293 struct in_device *in_dev = __in_dev_get_rcu(dev);
f39925db 1294 struct inet_peer *peer;
317805b8 1295 struct net *net;
1da177e4 1296
1da177e4
LT
1297 if (!in_dev)
1298 return;
1299
c346dca1 1300 net = dev_net(dev);
9d4fb27d
JP
1301 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
1302 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
1303 ipv4_is_zeronet(new_gw))
1da177e4
LT
1304 goto reject_redirect;
1305
1306 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1307 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1308 goto reject_redirect;
1309 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1310 goto reject_redirect;
1311 } else {
317805b8 1312 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
1313 goto reject_redirect;
1314 }
1315
f39925db
DM
1316 peer = inet_getpeer_v4(daddr, 1);
1317 if (peer) {
1318 peer->redirect_learned.a4 = new_gw;
e905a9ed 1319
f39925db 1320 inet_putpeer(peer);
1da177e4 1321
f39925db 1322 atomic_inc(&__rt_peer_genid);
1da177e4 1323 }
1da177e4
LT
1324 return;
1325
1326reject_redirect:
1327#ifdef CONFIG_IP_ROUTE_VERBOSE
1328 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
1329 printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
1330 " Advised path = %pI4 -> %pI4\n",
1331 &old_gw, dev->name, &new_gw,
1332 &saddr, &daddr);
1da177e4 1333#endif
ed7865a4 1334 ;
1da177e4
LT
1335}
1336
1337static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1338{
ee6b9673 1339 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
1340 struct dst_entry *ret = dst;
1341
1342 if (rt) {
d11a4dc1 1343 if (dst->obsolete > 0) {
1da177e4
LT
1344 ip_rt_put(rt);
1345 ret = NULL;
2c8cec5c 1346 } else if (rt->rt_flags & RTCF_REDIRECTED) {
5e2b61f7
DM
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif,
e84f84f2 1349 rt_genid(dev_net(dst->dev)));
1da177e4 1350#if RT_CACHE_DEBUG >= 1
673d57e7 1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
5e2b61f7 1352 &rt->rt_dst, rt->rt_tos);
1da177e4
LT
1353#endif
1354 rt_del(hash, rt);
1355 ret = NULL;
2c8cec5c
DM
1356 } else if (rt->peer &&
1357 rt->peer->pmtu_expires &&
1358 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1359 unsigned long orig = rt->peer->pmtu_expires;
1360
1361 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1362 dst_metric_set(dst, RTAX_MTU,
1363 rt->peer->pmtu_orig);
1da177e4
LT
1364 }
1365 }
1366 return ret;
1367}
1368
1369/*
1370 * Algorithm:
1371 * 1. The first ip_rt_redirect_number redirects are sent
1372 * with exponential backoff, then we stop sending them at all,
1373 * assuming that the host ignores our redirects.
1374 * 2. If we did not see packets requiring redirects
1375 * during ip_rt_redirect_silence, we assume that the host
1376 * forgot redirected route and start to send redirects again.
1377 *
1378 * This algorithm is much cheaper and more intelligent than dumb load limiting
1379 * in icmp.c.
1380 *
1381 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1382 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1383 */
1384
1385void ip_rt_send_redirect(struct sk_buff *skb)
1386{
511c3f92 1387 struct rtable *rt = skb_rtable(skb);
30038fc6 1388 struct in_device *in_dev;
92d86829 1389 struct inet_peer *peer;
30038fc6 1390 int log_martians;
1da177e4 1391
30038fc6 1392 rcu_read_lock();
d8d1f30b 1393 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
1394 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1395 rcu_read_unlock();
1da177e4 1396 return;
30038fc6
ED
1397 }
1398 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
1399 rcu_read_unlock();
1da177e4 1400
92d86829
DM
1401 if (!rt->peer)
1402 rt_bind_peer(rt, 1);
1403 peer = rt->peer;
1404 if (!peer) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1406 return;
1407 }
1408
1da177e4
LT
1409 /* No redirected packets during ip_rt_redirect_silence;
1410 * reset the algorithm.
1411 */
92d86829
DM
1412 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
1413 peer->rate_tokens = 0;
1da177e4
LT
1414
1415 /* Too many ignored redirects; do not send anything
d8d1f30b 1416 * set dst.rate_last to the last seen redirected packet.
1da177e4 1417 */
92d86829
DM
1418 if (peer->rate_tokens >= ip_rt_redirect_number) {
1419 peer->rate_last = jiffies;
30038fc6 1420 return;
1da177e4
LT
1421 }
1422
1423 /* Check for load limit; set rate_last to the latest sent
1424 * redirect.
1425 */
92d86829 1426 if (peer->rate_tokens == 0 ||
14fb8a76 1427 time_after(jiffies,
92d86829
DM
1428 (peer->rate_last +
1429 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 1430 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
1431 peer->rate_last = jiffies;
1432 ++peer->rate_tokens;
1da177e4 1433#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 1434 if (log_martians &&
92d86829 1435 peer->rate_tokens == ip_rt_redirect_number &&
1da177e4 1436 net_ratelimit())
673d57e7
HH
1437 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1438 &rt->rt_src, rt->rt_iif,
1439 &rt->rt_dst, &rt->rt_gateway);
1da177e4
LT
1440#endif
1441 }
1da177e4
LT
1442}
1443
1444static int ip_error(struct sk_buff *skb)
1445{
511c3f92 1446 struct rtable *rt = skb_rtable(skb);
92d86829 1447 struct inet_peer *peer;
1da177e4 1448 unsigned long now;
92d86829 1449 bool send;
1da177e4
LT
1450 int code;
1451
d8d1f30b 1452 switch (rt->dst.error) {
1da177e4
LT
1453 case EINVAL:
1454 default:
1455 goto out;
1456 case EHOSTUNREACH:
1457 code = ICMP_HOST_UNREACH;
1458 break;
1459 case ENETUNREACH:
1460 code = ICMP_NET_UNREACH;
d8d1f30b 1461 IP_INC_STATS_BH(dev_net(rt->dst.dev),
7c73a6fa 1462 IPSTATS_MIB_INNOROUTES);
1da177e4
LT
1463 break;
1464 case EACCES:
1465 code = ICMP_PKT_FILTERED;
1466 break;
1467 }
1468
92d86829
DM
1469 if (!rt->peer)
1470 rt_bind_peer(rt, 1);
1471 peer = rt->peer;
1472
1473 send = true;
1474 if (peer) {
1475 now = jiffies;
1476 peer->rate_tokens += now - peer->rate_last;
1477 if (peer->rate_tokens > ip_rt_error_burst)
1478 peer->rate_tokens = ip_rt_error_burst;
1479 peer->rate_last = now;
1480 if (peer->rate_tokens >= ip_rt_error_cost)
1481 peer->rate_tokens -= ip_rt_error_cost;
1482 else
1483 send = false;
1da177e4 1484 }
92d86829
DM
1485 if (send)
1486 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
1487
1488out: kfree_skb(skb);
1489 return 0;
e905a9ed 1490}
1da177e4
LT
1491
1492/*
1493 * The last two values are not from the RFC but
1494 * are needed for AMPRnet AX.25 paths.
1495 */
1496
9b5b5cff 1497static const unsigned short mtu_plateau[] =
1da177e4
LT
1498{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1499
5969f71d 1500static inline unsigned short guess_mtu(unsigned short old_mtu)
1da177e4
LT
1501{
1502 int i;
e905a9ed 1503
1da177e4
LT
1504 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1505 if (old_mtu > mtu_plateau[i])
1506 return mtu_plateau[i];
1507 return 68;
1508}
1509
b5921910 1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
0010e465
TT
1511 unsigned short new_mtu,
1512 struct net_device *dev)
1da177e4 1513{
1da177e4 1514 unsigned short old_mtu = ntohs(iph->tot_len);
1da177e4 1515 unsigned short est_mtu = 0;
2c8cec5c 1516 struct inet_peer *peer;
1da177e4 1517
2c8cec5c
DM
1518 peer = inet_getpeer_v4(iph->daddr, 1);
1519 if (peer) {
1520 unsigned short mtu = new_mtu;
1da177e4 1521
2c8cec5c
DM
1522 if (new_mtu < 68 || new_mtu >= old_mtu) {
1523 /* BSD 4.2 derived systems incorrectly adjust
1524 * tot_len by the IP header length, and report
1525 * a zero MTU in the ICMP message.
1526 */
1527 if (mtu == 0 &&
1528 old_mtu >= 68 + (iph->ihl << 2))
1529 old_mtu -= iph->ihl << 2;
1530 mtu = guess_mtu(old_mtu);
1531 }
0010e465 1532
2c8cec5c
DM
1533 if (mtu < ip_rt_min_pmtu)
1534 mtu = ip_rt_min_pmtu;
1535 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1536 unsigned long pmtu_expires;
1537
1538 pmtu_expires = jiffies + ip_rt_mtu_expires;
1539 if (!pmtu_expires)
1540 pmtu_expires = 1UL;
1541
2c8cec5c
DM
1542 est_mtu = mtu;
1543 peer->pmtu_learned = mtu;
46af3180 1544 peer->pmtu_expires = pmtu_expires;
2c8cec5c 1545 }
1da177e4 1546
2c8cec5c 1547 inet_putpeer(peer);
1da177e4 1548
2c8cec5c 1549 atomic_inc(&__rt_peer_genid);
1da177e4
LT
1550 }
1551 return est_mtu ? : new_mtu;
1552}
1553
2c8cec5c
DM
1554static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1555{
1556 unsigned long expires = peer->pmtu_expires;
1557
46af3180 1558 if (time_before(jiffies, expires)) {
2c8cec5c
DM
1559 u32 orig_dst_mtu = dst_mtu(dst);
1560 if (peer->pmtu_learned < orig_dst_mtu) {
1561 if (!peer->pmtu_orig)
1562 peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
1563 dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
1564 }
1565 } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
1566 dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
1567}
1568
1da177e4
LT
1569static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1570{
2c8cec5c
DM
1571 struct rtable *rt = (struct rtable *) dst;
1572 struct inet_peer *peer;
1573
1574 dst_confirm(dst);
1575
1576 if (!rt->peer)
1577 rt_bind_peer(rt, 1);
1578 peer = rt->peer;
1579 if (peer) {
1580 if (mtu < ip_rt_min_pmtu)
1da177e4 1581 mtu = ip_rt_min_pmtu;
2c8cec5c 1582 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
46af3180
HS
1583 unsigned long pmtu_expires;
1584
1585 pmtu_expires = jiffies + ip_rt_mtu_expires;
1586 if (!pmtu_expires)
1587 pmtu_expires = 1UL;
1588
2c8cec5c 1589 peer->pmtu_learned = mtu;
46af3180 1590 peer->pmtu_expires = pmtu_expires;
2c8cec5c
DM
1591
1592 atomic_inc(&__rt_peer_genid);
1593 rt->rt_peer_genid = rt_peer_genid();
1da177e4 1594 }
46af3180 1595 check_peer_pmtu(dst, peer);
1da177e4
LT
1596 }
1597}
1598
f39925db
DM
1599static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1600{
1601 struct rtable *rt = (struct rtable *) dst;
1602 __be32 orig_gw = rt->rt_gateway;
1603
1604 dst_confirm(&rt->dst);
1605
1606 neigh_release(rt->dst.neighbour);
1607 rt->dst.neighbour = NULL;
1608
1609 rt->rt_gateway = peer->redirect_learned.a4;
1610 if (arp_bind_neighbour(&rt->dst) ||
1611 !(rt->dst.neighbour->nud_state & NUD_VALID)) {
1612 if (rt->dst.neighbour)
1613 neigh_event_send(rt->dst.neighbour, NULL);
1614 rt->rt_gateway = orig_gw;
1615 return -EAGAIN;
1616 } else {
1617 rt->rt_flags |= RTCF_REDIRECTED;
1618 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
1619 rt->dst.neighbour);
1620 }
1621 return 0;
1622}
1623
1da177e4
LT
1624static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1625{
6431cbc2
DM
1626 struct rtable *rt = (struct rtable *) dst;
1627
1628 if (rt_is_expired(rt))
d11a4dc1 1629 return NULL;
6431cbc2 1630 if (rt->rt_peer_genid != rt_peer_genid()) {
2c8cec5c
DM
1631 struct inet_peer *peer;
1632
6431cbc2
DM
1633 if (!rt->peer)
1634 rt_bind_peer(rt, 0);
1635
2c8cec5c
DM
1636 peer = rt->peer;
1637 if (peer && peer->pmtu_expires)
1638 check_peer_pmtu(dst, peer);
1639
f39925db
DM
1640 if (peer && peer->redirect_learned.a4 &&
1641 peer->redirect_learned.a4 != rt->rt_gateway) {
1642 if (check_peer_redir(dst, peer))
1643 return NULL;
1644 }
1645
6431cbc2
DM
1646 rt->rt_peer_genid = rt_peer_genid();
1647 }
d11a4dc1 1648 return dst;
1da177e4
LT
1649}
1650
1651static void ipv4_dst_destroy(struct dst_entry *dst)
1652{
1653 struct rtable *rt = (struct rtable *) dst;
1654 struct inet_peer *peer = rt->peer;
1da177e4 1655
62fa8a84
DM
1656 if (rt->fi) {
1657 fib_info_put(rt->fi);
1658 rt->fi = NULL;
1659 }
1da177e4
LT
1660 if (peer) {
1661 rt->peer = NULL;
1662 inet_putpeer(peer);
1663 }
1da177e4
LT
1664}
1665
1da177e4
LT
1666
1667static void ipv4_link_failure(struct sk_buff *skb)
1668{
1669 struct rtable *rt;
1670
1671 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1672
511c3f92 1673 rt = skb_rtable(skb);
2c8cec5c
DM
1674 if (rt &&
1675 rt->peer &&
1676 rt->peer->pmtu_expires) {
1677 unsigned long orig = rt->peer->pmtu_expires;
1678
1679 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1680 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1681 }
1da177e4
LT
1682}
1683
1684static int ip_rt_bug(struct sk_buff *skb)
1685{
673d57e7
HH
1686 printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
1687 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1da177e4
LT
1688 skb->dev ? skb->dev->name : "?");
1689 kfree_skb(skb);
1690 return 0;
1691}
1692
1693/*
1694 We do not cache source address of outgoing interface,
1695 because it is used only by IP RR, TS and SRR options,
1696 so that it out of fast path.
1697
1698 BTW remember: "addr" is allowed to be not aligned
1699 in IP options!
1700 */
1701
1702void ip_rt_get_source(u8 *addr, struct rtable *rt)
1703{
a61ced5d 1704 __be32 src;
1da177e4
LT
1705 struct fib_result res;
1706
c7537967 1707 if (rt_is_output_route(rt))
1da177e4 1708 src = rt->rt_src;
ebc0ffae 1709 else {
68a5e3dd
DM
1710 struct flowi4 fl4 = {
1711 .daddr = rt->rt_key_dst,
1712 .saddr = rt->rt_key_src,
1713 .flowi4_tos = rt->rt_tos,
1714 .flowi4_oif = rt->rt_oif,
1715 .flowi4_iif = rt->rt_iif,
1716 .flowi4_mark = rt->rt_mark,
5e2b61f7
DM
1717 };
1718
ebc0ffae 1719 rcu_read_lock();
68a5e3dd 1720 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
436c3b66 1721 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
ebc0ffae
ED
1722 else
1723 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1da177e4 1724 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1725 rcu_read_unlock();
1726 }
1da177e4
LT
1727 memcpy(addr, &src, 4);
1728}
1729
c7066f70 1730#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1731static void set_class_tag(struct rtable *rt, u32 tag)
1732{
d8d1f30b
CG
1733 if (!(rt->dst.tclassid & 0xFFFF))
1734 rt->dst.tclassid |= tag & 0xFFFF;
1735 if (!(rt->dst.tclassid & 0xFFFF0000))
1736 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1737}
1738#endif
1739
0dbaee3b
DM
1740static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1741{
1742 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1743
1744 if (advmss == 0) {
1745 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1746 ip_rt_min_advmss);
1747 if (advmss > 65535 - 40)
1748 advmss = 65535 - 40;
1749 }
1750 return advmss;
1751}
1752
d33e4553
DM
1753static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1754{
1755 unsigned int mtu = dst->dev->mtu;
1756
1757 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1758 const struct rtable *rt = (const struct rtable *) dst;
1759
1760 if (rt->rt_gateway != rt->rt_dst && mtu > 576)
1761 mtu = 576;
1762 }
1763
1764 if (mtu > IP_MAX_MTU)
1765 mtu = IP_MAX_MTU;
1766
1767 return mtu;
1768}
1769
68a5e3dd 1770static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
5e2b61f7 1771 struct fib_info *fi)
a4daad6b 1772{
0131ba45
DM
1773 struct inet_peer *peer;
1774 int create = 0;
a4daad6b 1775
0131ba45
DM
1776 /* If a peer entry exists for this destination, we must hook
1777 * it up in order to get at cached metrics.
1778 */
68a5e3dd 1779 if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
0131ba45
DM
1780 create = 1;
1781
3c0afdca 1782 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
0131ba45 1783 if (peer) {
3c0afdca 1784 rt->rt_peer_genid = rt_peer_genid();
a4daad6b
DM
1785 if (inet_metrics_new(peer))
1786 memcpy(peer->metrics, fi->fib_metrics,
1787 sizeof(u32) * RTAX_MAX);
1788 dst_init_metrics(&rt->dst, peer->metrics, false);
2c8cec5c
DM
1789
1790 if (peer->pmtu_expires)
1791 check_peer_pmtu(&rt->dst, peer);
f39925db
DM
1792 if (peer->redirect_learned.a4 &&
1793 peer->redirect_learned.a4 != rt->rt_gateway) {
1794 rt->rt_gateway = peer->redirect_learned.a4;
1795 rt->rt_flags |= RTCF_REDIRECTED;
1796 }
0131ba45
DM
1797 } else {
1798 if (fi->fib_metrics != (u32 *) dst_default_metrics) {
1799 rt->fi = fi;
1800 atomic_inc(&fi->fib_clntref);
1801 }
1802 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
a4daad6b
DM
1803 }
1804}
1805
68a5e3dd 1806static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
5e2b61f7 1807 const struct fib_result *res,
982721f3 1808 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1809{
defb3519 1810 struct dst_entry *dst = &rt->dst;
1da177e4
LT
1811
1812 if (fi) {
1813 if (FIB_RES_GW(*res) &&
1814 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1815 rt->rt_gateway = FIB_RES_GW(*res);
68a5e3dd 1816 rt_init_metrics(rt, oldflp4, fi);
c7066f70 1817#ifdef CONFIG_IP_ROUTE_CLASSID
defb3519 1818 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1da177e4 1819#endif
d33e4553 1820 }
defb3519 1821
defb3519
DM
1822 if (dst_mtu(dst) > IP_MAX_MTU)
1823 dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
0dbaee3b 1824 if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
defb3519 1825 dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
1da177e4 1826
c7066f70 1827#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1828#ifdef CONFIG_IP_MULTIPLE_TABLES
1829 set_class_tag(rt, fib_rules_tclass(res));
1830#endif
1831 set_class_tag(rt, itag);
1832#endif
982721f3 1833 rt->rt_type = type;
1da177e4
LT
1834}
1835
0c4dcd58
DM
1836static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
1837{
3c7bd1a1 1838 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
0c4dcd58
DM
1839 if (rt) {
1840 rt->dst.obsolete = -1;
1841
0c4dcd58
DM
1842 rt->dst.flags = DST_HOST |
1843 (nopolicy ? DST_NOPOLICY : 0) |
1844 (noxfrm ? DST_NOXFRM : 0);
1845 }
1846 return rt;
1847}
1848
96d36220 1849/* called in rcu_read_lock() section */
9e12bb22 1850static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1851 u8 tos, struct net_device *dev, int our)
1852{
96d36220 1853 unsigned int hash;
1da177e4 1854 struct rtable *rth;
a61ced5d 1855 __be32 spec_dst;
96d36220 1856 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1857 u32 itag = 0;
b5f7e755 1858 int err;
1da177e4
LT
1859
1860 /* Primary sanity checks. */
1861
1862 if (in_dev == NULL)
1863 return -EINVAL;
1864
1e637c74 1865 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 1866 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1867 goto e_inval;
1868
f97c1e0c
JP
1869 if (ipv4_is_zeronet(saddr)) {
1870 if (!ipv4_is_local_multicast(daddr))
1da177e4
LT
1871 goto e_inval;
1872 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
b5f7e755 1873 } else {
5c04c819
MS
1874 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1875 &itag);
b5f7e755
ED
1876 if (err < 0)
1877 goto e_err;
1878 }
0c4dcd58 1879 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
1880 if (!rth)
1881 goto e_nobufs;
1882
d8d1f30b 1883 rth->dst.output = ip_rt_bug;
1da177e4 1884
5e2b61f7 1885 rth->rt_key_dst = daddr;
1da177e4 1886 rth->rt_dst = daddr;
5e2b61f7
DM
1887 rth->rt_tos = tos;
1888 rth->rt_mark = skb->mark;
1889 rth->rt_key_src = saddr;
1da177e4 1890 rth->rt_src = saddr;
c7066f70 1891#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 1892 rth->dst.tclassid = itag;
1da177e4 1893#endif
1b86a58f 1894 rth->rt_route_iif = dev->ifindex;
5e2b61f7 1895 rth->rt_iif = dev->ifindex;
d8d1f30b
CG
1896 rth->dst.dev = init_net.loopback_dev;
1897 dev_hold(rth->dst.dev);
5e2b61f7 1898 rth->rt_oif = 0;
1da177e4
LT
1899 rth->rt_gateway = daddr;
1900 rth->rt_spec_dst= spec_dst;
e84f84f2 1901 rth->rt_genid = rt_genid(dev_net(dev));
1da177e4 1902 rth->rt_flags = RTCF_MULTICAST;
29e75252 1903 rth->rt_type = RTN_MULTICAST;
1da177e4 1904 if (our) {
d8d1f30b 1905 rth->dst.input= ip_local_deliver;
1da177e4
LT
1906 rth->rt_flags |= RTCF_LOCAL;
1907 }
1908
1909#ifdef CONFIG_IP_MROUTE
f97c1e0c 1910 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1911 rth->dst.input = ip_mr_input;
1da177e4
LT
1912#endif
1913 RT_CACHE_STAT_INC(in_slow_mc);
1914
e84f84f2 1915 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
b23dd4fe
DM
1916 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1917 err = 0;
1918 if (IS_ERR(rth))
1919 err = PTR_ERR(rth);
1da177e4
LT
1920
1921e_nobufs:
1da177e4 1922 return -ENOBUFS;
1da177e4 1923e_inval:
96d36220 1924 return -EINVAL;
b5f7e755 1925e_err:
b5f7e755 1926 return err;
1da177e4
LT
1927}
1928
1929
1930static void ip_handle_martian_source(struct net_device *dev,
1931 struct in_device *in_dev,
1932 struct sk_buff *skb,
9e12bb22
AV
1933 __be32 daddr,
1934 __be32 saddr)
1da177e4
LT
1935{
1936 RT_CACHE_STAT_INC(in_martian_src);
1937#ifdef CONFIG_IP_ROUTE_VERBOSE
1938 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1939 /*
1940 * RFC1812 recommendation, if source is martian,
1941 * the only hint is MAC header.
1942 */
673d57e7
HH
1943 printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
1944 &daddr, &saddr, dev->name);
98e399f8 1945 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1da177e4 1946 int i;
98e399f8 1947 const unsigned char *p = skb_mac_header(skb);
1da177e4
LT
1948 printk(KERN_WARNING "ll header: ");
1949 for (i = 0; i < dev->hard_header_len; i++, p++) {
1950 printk("%02x", *p);
1951 if (i < (dev->hard_header_len - 1))
1952 printk(":");
1953 }
1954 printk("\n");
1955 }
1956 }
1957#endif
1958}
1959
47360228 1960/* called in rcu_read_lock() section */
5969f71d 1961static int __mkroute_input(struct sk_buff *skb,
982721f3 1962 const struct fib_result *res,
5969f71d
SH
1963 struct in_device *in_dev,
1964 __be32 daddr, __be32 saddr, u32 tos,
1965 struct rtable **result)
1da177e4 1966{
1da177e4
LT
1967 struct rtable *rth;
1968 int err;
1969 struct in_device *out_dev;
47360228 1970 unsigned int flags = 0;
d9c9df8c
AV
1971 __be32 spec_dst;
1972 u32 itag;
1da177e4
LT
1973
1974 /* get a working reference to the output device */
47360228 1975 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4
LT
1976 if (out_dev == NULL) {
1977 if (net_ratelimit())
1978 printk(KERN_CRIT "Bug in ip_route_input" \
1979 "_slow(). Please, report\n");
1980 return -EINVAL;
1981 }
1982
1983
5c04c819
MS
1984 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1985 in_dev->dev, &spec_dst, &itag);
1da177e4 1986 if (err < 0) {
e905a9ed 1987 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1988 saddr);
e905a9ed 1989
1da177e4
LT
1990 goto cleanup;
1991 }
1992
1993 if (err)
1994 flags |= RTCF_DIRECTSRC;
1995
51b77cae 1996 if (out_dev == in_dev && err &&
1da177e4
LT
1997 (IN_DEV_SHARED_MEDIA(out_dev) ||
1998 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1999 flags |= RTCF_DOREDIRECT;
2000
2001 if (skb->protocol != htons(ETH_P_IP)) {
2002 /* Not IP (i.e. ARP). Do not create route, if it is
2003 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
2004 *
2005 * Proxy arp feature have been extended to allow, ARP
2006 * replies back to the same interface, to support
2007 * Private VLAN switch technologies. See arp.c.
1da177e4 2008 */
65324144
JDB
2009 if (out_dev == in_dev &&
2010 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
2011 err = -EINVAL;
2012 goto cleanup;
2013 }
2014 }
2015
0c4dcd58
DM
2016 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2017 IN_DEV_CONF_GET(out_dev, NOXFRM));
1da177e4
LT
2018 if (!rth) {
2019 err = -ENOBUFS;
2020 goto cleanup;
2021 }
2022
5e2b61f7 2023 rth->rt_key_dst = daddr;
1da177e4 2024 rth->rt_dst = daddr;
5e2b61f7
DM
2025 rth->rt_tos = tos;
2026 rth->rt_mark = skb->mark;
2027 rth->rt_key_src = saddr;
1da177e4
LT
2028 rth->rt_src = saddr;
2029 rth->rt_gateway = daddr;
1b86a58f 2030 rth->rt_route_iif = in_dev->dev->ifindex;
5e2b61f7 2031 rth->rt_iif = in_dev->dev->ifindex;
d8d1f30b
CG
2032 rth->dst.dev = (out_dev)->dev;
2033 dev_hold(rth->dst.dev);
5e2b61f7 2034 rth->rt_oif = 0;
1da177e4
LT
2035 rth->rt_spec_dst= spec_dst;
2036
d8d1f30b
CG
2037 rth->dst.input = ip_forward;
2038 rth->dst.output = ip_output;
2039 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1da177e4 2040
5e2b61f7 2041 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
1da177e4
LT
2042
2043 rth->rt_flags = flags;
2044
2045 *result = rth;
2046 err = 0;
2047 cleanup:
1da177e4 2048 return err;
e905a9ed 2049}
1da177e4 2050
5969f71d
SH
2051static int ip_mkroute_input(struct sk_buff *skb,
2052 struct fib_result *res,
68a5e3dd 2053 const struct flowi4 *fl4,
5969f71d
SH
2054 struct in_device *in_dev,
2055 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 2056{
7abaa27c 2057 struct rtable* rth = NULL;
1da177e4
LT
2058 int err;
2059 unsigned hash;
2060
2061#ifdef CONFIG_IP_ROUTE_MULTIPATH
ff3fccb3 2062 if (res->fi && res->fi->fib_nhs > 1)
1b7fe593 2063 fib_select_multipath(res);
1da177e4
LT
2064#endif
2065
2066 /* create a routing cache entry */
2067 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2068 if (err)
2069 return err;
1da177e4
LT
2070
2071 /* put it into the cache */
68a5e3dd 2072 hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
d8d1f30b 2073 rt_genid(dev_net(rth->dst.dev)));
68a5e3dd 2074 rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
b23dd4fe
DM
2075 if (IS_ERR(rth))
2076 return PTR_ERR(rth);
2077 return 0;
1da177e4
LT
2078}
2079
1da177e4
LT
2080/*
2081 * NOTE. We drop all the packets that has local source
2082 * addresses, because every properly looped back packet
2083 * must have correct destination already attached by output routine.
2084 *
2085 * Such approach solves two big problems:
2086 * 1. Not simplex devices are handled properly.
2087 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 2088 * called with rcu_read_lock()
1da177e4
LT
2089 */
2090
9e12bb22 2091static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
2092 u8 tos, struct net_device *dev)
2093{
2094 struct fib_result res;
96d36220 2095 struct in_device *in_dev = __in_dev_get_rcu(dev);
68a5e3dd 2096 struct flowi4 fl4;
1da177e4
LT
2097 unsigned flags = 0;
2098 u32 itag = 0;
2099 struct rtable * rth;
2100 unsigned hash;
9e12bb22 2101 __be32 spec_dst;
1da177e4 2102 int err = -EINVAL;
c346dca1 2103 struct net * net = dev_net(dev);
1da177e4
LT
2104
2105 /* IP on this device is disabled. */
2106
2107 if (!in_dev)
2108 goto out;
2109
2110 /* Check for the most weird martians, which can be not detected
2111 by fib_lookup.
2112 */
2113
1e637c74 2114 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
f97c1e0c 2115 ipv4_is_loopback(saddr))
1da177e4
LT
2116 goto martian_source;
2117
27a954bd 2118 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
2119 goto brd_input;
2120
2121 /* Accept zero addresses only to limited broadcast;
2122 * I even do not know to fix it or not. Waiting for complains :-)
2123 */
f97c1e0c 2124 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2125 goto martian_source;
2126
27a954bd 2127 if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
1da177e4
LT
2128 goto martian_destination;
2129
2130 /*
2131 * Now we are ready to route packet.
2132 */
68a5e3dd
DM
2133 fl4.flowi4_oif = 0;
2134 fl4.flowi4_iif = dev->ifindex;
2135 fl4.flowi4_mark = skb->mark;
2136 fl4.flowi4_tos = tos;
2137 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2138 fl4.daddr = daddr;
2139 fl4.saddr = saddr;
2140 err = fib_lookup(net, &fl4, &res);
ebc0ffae 2141 if (err != 0) {
1da177e4 2142 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2143 goto e_hostunreach;
1da177e4
LT
2144 goto no_route;
2145 }
1da177e4
LT
2146
2147 RT_CACHE_STAT_INC(in_slow_tot);
2148
2149 if (res.type == RTN_BROADCAST)
2150 goto brd_input;
2151
2152 if (res.type == RTN_LOCAL) {
5c04c819 2153 err = fib_validate_source(skb, saddr, daddr, tos,
ebc0ffae 2154 net->loopback_dev->ifindex,
5c04c819 2155 dev, &spec_dst, &itag);
b5f7e755
ED
2156 if (err < 0)
2157 goto martian_source_keep_err;
2158 if (err)
1da177e4
LT
2159 flags |= RTCF_DIRECTSRC;
2160 spec_dst = daddr;
2161 goto local_input;
2162 }
2163
2164 if (!IN_DEV_FORWARD(in_dev))
2c2910a4 2165 goto e_hostunreach;
1da177e4
LT
2166 if (res.type != RTN_UNICAST)
2167 goto martian_destination;
2168
68a5e3dd 2169 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1da177e4
LT
2170out: return err;
2171
2172brd_input:
2173 if (skb->protocol != htons(ETH_P_IP))
2174 goto e_inval;
2175
f97c1e0c 2176 if (ipv4_is_zeronet(saddr))
1da177e4
LT
2177 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2178 else {
5c04c819
MS
2179 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2180 &itag);
1da177e4 2181 if (err < 0)
b5f7e755 2182 goto martian_source_keep_err;
1da177e4
LT
2183 if (err)
2184 flags |= RTCF_DIRECTSRC;
2185 }
2186 flags |= RTCF_BROADCAST;
2187 res.type = RTN_BROADCAST;
2188 RT_CACHE_STAT_INC(in_brd);
2189
2190local_input:
0c4dcd58 2191 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1da177e4
LT
2192 if (!rth)
2193 goto e_nobufs;
2194
d8d1f30b 2195 rth->dst.output= ip_rt_bug;
e84f84f2 2196 rth->rt_genid = rt_genid(net);
1da177e4 2197
5e2b61f7 2198 rth->rt_key_dst = daddr;
1da177e4 2199 rth->rt_dst = daddr;
5e2b61f7
DM
2200 rth->rt_tos = tos;
2201 rth->rt_mark = skb->mark;
2202 rth->rt_key_src = saddr;
1da177e4 2203 rth->rt_src = saddr;
c7066f70 2204#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b 2205 rth->dst.tclassid = itag;
1da177e4 2206#endif
1b86a58f 2207 rth->rt_route_iif = dev->ifindex;
5e2b61f7 2208 rth->rt_iif = dev->ifindex;
d8d1f30b
CG
2209 rth->dst.dev = net->loopback_dev;
2210 dev_hold(rth->dst.dev);
1da177e4
LT
2211 rth->rt_gateway = daddr;
2212 rth->rt_spec_dst= spec_dst;
d8d1f30b 2213 rth->dst.input= ip_local_deliver;
1da177e4
LT
2214 rth->rt_flags = flags|RTCF_LOCAL;
2215 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
2216 rth->dst.input= ip_error;
2217 rth->dst.error= -err;
1da177e4
LT
2218 rth->rt_flags &= ~RTCF_LOCAL;
2219 }
2220 rth->rt_type = res.type;
68a5e3dd
DM
2221 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2222 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
b23dd4fe
DM
2223 err = 0;
2224 if (IS_ERR(rth))
2225 err = PTR_ERR(rth);
ebc0ffae 2226 goto out;
1da177e4
LT
2227
2228no_route:
2229 RT_CACHE_STAT_INC(in_no_route);
2230 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2231 res.type = RTN_UNREACHABLE;
7f53878d
MC
2232 if (err == -ESRCH)
2233 err = -ENETUNREACH;
1da177e4
LT
2234 goto local_input;
2235
2236 /*
2237 * Do not cache martian addresses: they should be logged (RFC1812)
2238 */
2239martian_destination:
2240 RT_CACHE_STAT_INC(in_martian_dst);
2241#ifdef CONFIG_IP_ROUTE_VERBOSE
2242 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
673d57e7
HH
2243 printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
2244 &daddr, &saddr, dev->name);
1da177e4 2245#endif
2c2910a4
DE
2246
2247e_hostunreach:
e905a9ed 2248 err = -EHOSTUNREACH;
ebc0ffae 2249 goto out;
2c2910a4 2250
1da177e4
LT
2251e_inval:
2252 err = -EINVAL;
ebc0ffae 2253 goto out;
1da177e4
LT
2254
2255e_nobufs:
2256 err = -ENOBUFS;
ebc0ffae 2257 goto out;
1da177e4
LT
2258
2259martian_source:
b5f7e755
ED
2260 err = -EINVAL;
2261martian_source_keep_err:
1da177e4 2262 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 2263 goto out;
1da177e4
LT
2264}
2265
407eadd9
ED
2266int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2267 u8 tos, struct net_device *dev, bool noref)
1da177e4
LT
2268{
2269 struct rtable * rth;
2270 unsigned hash;
2271 int iif = dev->ifindex;
b5921910 2272 struct net *net;
96d36220 2273 int res;
1da177e4 2274
c346dca1 2275 net = dev_net(dev);
1080d709 2276
96d36220
ED
2277 rcu_read_lock();
2278
1080d709
NH
2279 if (!rt_caching(net))
2280 goto skip_cache;
2281
1da177e4 2282 tos &= IPTOS_RT_MASK;
e84f84f2 2283 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
1da177e4 2284
1da177e4 2285 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
d8d1f30b 2286 rth = rcu_dereference(rth->dst.rt_next)) {
5e2b61f7
DM
2287 if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
2288 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2289 (rth->rt_iif ^ iif) |
2290 rth->rt_oif |
2291 (rth->rt_tos ^ tos)) == 0 &&
2292 rth->rt_mark == skb->mark &&
d8d1f30b 2293 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2294 !rt_is_expired(rth)) {
407eadd9 2295 if (noref) {
d8d1f30b
CG
2296 dst_use_noref(&rth->dst, jiffies);
2297 skb_dst_set_noref(skb, &rth->dst);
407eadd9 2298 } else {
d8d1f30b
CG
2299 dst_use(&rth->dst, jiffies);
2300 skb_dst_set(skb, &rth->dst);
407eadd9 2301 }
1da177e4
LT
2302 RT_CACHE_STAT_INC(in_hit);
2303 rcu_read_unlock();
1da177e4
LT
2304 return 0;
2305 }
2306 RT_CACHE_STAT_INC(in_hlist_search);
2307 }
1da177e4 2308
1080d709 2309skip_cache:
1da177e4
LT
2310 /* Multicast recognition logic is moved from route cache to here.
2311 The problem was that too many Ethernet cards have broken/missing
2312 hardware multicast filters :-( As result the host on multicasting
2313 network acquires a lot of useless route cache entries, sort of
2314 SDR messages from all the world. Now we try to get rid of them.
2315 Really, provided software IP multicast filter is organized
2316 reasonably (at least, hashed), it does not result in a slowdown
2317 comparing with route cache reject entries.
2318 Note, that multicast routers are not affected, because
2319 route cache entry is created eventually.
2320 */
f97c1e0c 2321 if (ipv4_is_multicast(daddr)) {
96d36220 2322 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 2323
96d36220 2324 if (in_dev) {
dbdd9a52
DM
2325 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
2326 ip_hdr(skb)->protocol);
1da177e4
LT
2327 if (our
2328#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
2329 ||
2330 (!ipv4_is_local_multicast(daddr) &&
2331 IN_DEV_MFORWARD(in_dev))
1da177e4 2332#endif
9d4fb27d 2333 ) {
96d36220
ED
2334 int res = ip_route_input_mc(skb, daddr, saddr,
2335 tos, dev, our);
1da177e4 2336 rcu_read_unlock();
96d36220 2337 return res;
1da177e4
LT
2338 }
2339 }
2340 rcu_read_unlock();
2341 return -EINVAL;
2342 }
96d36220
ED
2343 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2344 rcu_read_unlock();
2345 return res;
1da177e4 2346}
407eadd9 2347EXPORT_SYMBOL(ip_route_input_common);
1da177e4 2348
ebc0ffae 2349/* called with rcu_read_lock() */
982721f3 2350static struct rtable *__mkroute_output(const struct fib_result *res,
68a5e3dd
DM
2351 const struct flowi4 *fl4,
2352 const struct flowi4 *oldflp4,
5ada5527
DM
2353 struct net_device *dev_out,
2354 unsigned int flags)
1da177e4 2355{
982721f3 2356 struct fib_info *fi = res->fi;
68a5e3dd 2357 u32 tos = RT_FL_TOS(oldflp4);
5ada5527 2358 struct in_device *in_dev;
982721f3 2359 u16 type = res->type;
5ada5527 2360 struct rtable *rth;
1da177e4 2361
68a5e3dd 2362 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
5ada5527 2363 return ERR_PTR(-EINVAL);
1da177e4 2364
68a5e3dd 2365 if (ipv4_is_lbcast(fl4->daddr))
982721f3 2366 type = RTN_BROADCAST;
68a5e3dd 2367 else if (ipv4_is_multicast(fl4->daddr))
982721f3 2368 type = RTN_MULTICAST;
68a5e3dd 2369 else if (ipv4_is_zeronet(fl4->daddr))
5ada5527 2370 return ERR_PTR(-EINVAL);
1da177e4
LT
2371
2372 if (dev_out->flags & IFF_LOOPBACK)
2373 flags |= RTCF_LOCAL;
2374
dd28d1a0 2375 in_dev = __in_dev_get_rcu(dev_out);
ebc0ffae 2376 if (!in_dev)
5ada5527 2377 return ERR_PTR(-EINVAL);
ebc0ffae 2378
982721f3 2379 if (type == RTN_BROADCAST) {
1da177e4 2380 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
2381 fi = NULL;
2382 } else if (type == RTN_MULTICAST) {
dd28d1a0 2383 flags |= RTCF_MULTICAST | RTCF_LOCAL;
68a5e3dd
DM
2384 if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr,
2385 oldflp4->flowi4_proto))
1da177e4
LT
2386 flags &= ~RTCF_LOCAL;
2387 /* If multicast route do not exist use
dd28d1a0
ED
2388 * default one, but do not gateway in this case.
2389 * Yes, it is hack.
1da177e4 2390 */
982721f3
DM
2391 if (fi && res->prefixlen < 4)
2392 fi = NULL;
1da177e4
LT
2393 }
2394
0c4dcd58
DM
2395 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
2396 IN_DEV_CONF_GET(in_dev, NOXFRM));
8391d07b 2397 if (!rth)
5ada5527 2398 return ERR_PTR(-ENOBUFS);
8391d07b 2399
68a5e3dd 2400 rth->rt_key_dst = oldflp4->daddr;
5e2b61f7 2401 rth->rt_tos = tos;
68a5e3dd
DM
2402 rth->rt_key_src = oldflp4->saddr;
2403 rth->rt_oif = oldflp4->flowi4_oif;
2404 rth->rt_mark = oldflp4->flowi4_mark;
2405 rth->rt_dst = fl4->daddr;
2406 rth->rt_src = fl4->saddr;
1b86a58f
OH
2407 rth->rt_route_iif = 0;
2408 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
e905a9ed 2409 /* get references to the devices that are to be hold by the routing
1da177e4 2410 cache entry */
d8d1f30b 2411 rth->dst.dev = dev_out;
1da177e4 2412 dev_hold(dev_out);
68a5e3dd
DM
2413 rth->rt_gateway = fl4->daddr;
2414 rth->rt_spec_dst= fl4->saddr;
1da177e4 2415
d8d1f30b 2416 rth->dst.output=ip_output;
e84f84f2 2417 rth->rt_genid = rt_genid(dev_net(dev_out));
1da177e4
LT
2418
2419 RT_CACHE_STAT_INC(out_slow_tot);
2420
2421 if (flags & RTCF_LOCAL) {
d8d1f30b 2422 rth->dst.input = ip_local_deliver;
68a5e3dd 2423 rth->rt_spec_dst = fl4->daddr;
1da177e4
LT
2424 }
2425 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
68a5e3dd 2426 rth->rt_spec_dst = fl4->saddr;
e905a9ed 2427 if (flags & RTCF_LOCAL &&
1da177e4 2428 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 2429 rth->dst.output = ip_mc_output;
1da177e4
LT
2430 RT_CACHE_STAT_INC(out_slow_mc);
2431 }
2432#ifdef CONFIG_IP_MROUTE
982721f3 2433 if (type == RTN_MULTICAST) {
1da177e4 2434 if (IN_DEV_MFORWARD(in_dev) &&
68a5e3dd 2435 !ipv4_is_local_multicast(oldflp4->daddr)) {
d8d1f30b
CG
2436 rth->dst.input = ip_mr_input;
2437 rth->dst.output = ip_mc_output;
1da177e4
LT
2438 }
2439 }
2440#endif
2441 }
2442
68a5e3dd 2443 rt_set_nexthop(rth, oldflp4, res, fi, type, 0);
1da177e4
LT
2444
2445 rth->rt_flags = flags;
5ada5527 2446 return rth;
1da177e4
LT
2447}
2448
1da177e4
LT
2449/*
2450 * Major route resolver routine.
0197aa38 2451 * called with rcu_read_lock();
1da177e4
LT
2452 */
2453
b23dd4fe 2454static struct rtable *ip_route_output_slow(struct net *net,
68a5e3dd 2455 const struct flowi4 *oldflp4)
1da177e4 2456{
68a5e3dd
DM
2457 u32 tos = RT_FL_TOS(oldflp4);
2458 struct flowi4 fl4;
1da177e4 2459 struct fib_result res;
0197aa38 2460 unsigned int flags = 0;
1da177e4 2461 struct net_device *dev_out = NULL;
5ada5527 2462 struct rtable *rth;
1da177e4
LT
2463
2464 res.fi = NULL;
2465#ifdef CONFIG_IP_MULTIPLE_TABLES
2466 res.r = NULL;
2467#endif
2468
68a5e3dd
DM
2469 fl4.flowi4_oif = oldflp4->flowi4_oif;
2470 fl4.flowi4_iif = net->loopback_dev->ifindex;
2471 fl4.flowi4_mark = oldflp4->flowi4_mark;
2472 fl4.daddr = oldflp4->daddr;
2473 fl4.saddr = oldflp4->saddr;
2474 fl4.flowi4_tos = tos & IPTOS_RT_MASK;
2475 fl4.flowi4_scope = ((tos & RTO_ONLINK) ?
44713b67
DM
2476 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2477
010c2708 2478 rcu_read_lock();
68a5e3dd 2479 if (oldflp4->saddr) {
b23dd4fe 2480 rth = ERR_PTR(-EINVAL);
68a5e3dd
DM
2481 if (ipv4_is_multicast(oldflp4->saddr) ||
2482 ipv4_is_lbcast(oldflp4->saddr) ||
2483 ipv4_is_zeronet(oldflp4->saddr))
1da177e4
LT
2484 goto out;
2485
1da177e4
LT
2486 /* I removed check for oif == dev_out->oif here.
2487 It was wrong for two reasons:
1ab35276
DL
2488 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2489 is assigned to multiple interfaces.
1da177e4
LT
2490 2. Moreover, we are allowed to send packets with saddr
2491 of another iface. --ANK
2492 */
2493
68a5e3dd
DM
2494 if (oldflp4->flowi4_oif == 0 &&
2495 (ipv4_is_multicast(oldflp4->daddr) ||
2496 ipv4_is_lbcast(oldflp4->daddr))) {
a210d01a 2497 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
68a5e3dd 2498 dev_out = __ip_dev_find(net, oldflp4->saddr, false);
a210d01a
JA
2499 if (dev_out == NULL)
2500 goto out;
2501
1da177e4
LT
2502 /* Special hack: user can direct multicasts
2503 and limited broadcast via necessary interface
2504 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2505 This hack is not just for fun, it allows
2506 vic,vat and friends to work.
2507 They bind socket to loopback, set ttl to zero
2508 and expect that it will work.
2509 From the viewpoint of routing cache they are broken,
2510 because we are not allowed to build multicast path
2511 with loopback source addr (look, routing cache
2512 cannot know, that ttl is zero, so that packet
2513 will not leave this host and route is valid).
2514 Luckily, this hack is good workaround.
2515 */
2516
68a5e3dd 2517 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2518 goto make_route;
2519 }
a210d01a 2520
68a5e3dd 2521 if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
a210d01a 2522 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
68a5e3dd 2523 if (!__ip_dev_find(net, oldflp4->saddr, false))
a210d01a 2524 goto out;
a210d01a 2525 }
1da177e4
LT
2526 }
2527
2528
68a5e3dd
DM
2529 if (oldflp4->flowi4_oif) {
2530 dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif);
b23dd4fe 2531 rth = ERR_PTR(-ENODEV);
1da177e4
LT
2532 if (dev_out == NULL)
2533 goto out;
e5ed6399
HX
2534
2535 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 2536 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 2537 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
2538 goto out;
2539 }
68a5e3dd
DM
2540 if (ipv4_is_local_multicast(oldflp4->daddr) ||
2541 ipv4_is_lbcast(oldflp4->daddr)) {
2542 if (!fl4.saddr)
2543 fl4.saddr = inet_select_addr(dev_out, 0,
2544 RT_SCOPE_LINK);
1da177e4
LT
2545 goto make_route;
2546 }
68a5e3dd
DM
2547 if (!fl4.saddr) {
2548 if (ipv4_is_multicast(oldflp4->daddr))
2549 fl4.saddr = inet_select_addr(dev_out, 0,
2550 fl4.flowi4_scope);
2551 else if (!oldflp4->daddr)
2552 fl4.saddr = inet_select_addr(dev_out, 0,
2553 RT_SCOPE_HOST);
1da177e4
LT
2554 }
2555 }
2556
68a5e3dd
DM
2557 if (!fl4.daddr) {
2558 fl4.daddr = fl4.saddr;
2559 if (!fl4.daddr)
2560 fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK);
b40afd0e 2561 dev_out = net->loopback_dev;
68a5e3dd 2562 fl4.flowi4_oif = net->loopback_dev->ifindex;
1da177e4
LT
2563 res.type = RTN_LOCAL;
2564 flags |= RTCF_LOCAL;
2565 goto make_route;
2566 }
2567
68a5e3dd 2568 if (fib_lookup(net, &fl4, &res)) {
1da177e4 2569 res.fi = NULL;
68a5e3dd 2570 if (oldflp4->flowi4_oif) {
1da177e4
LT
2571 /* Apparently, routing tables are wrong. Assume,
2572 that the destination is on link.
2573
2574 WHY? DW.
2575 Because we are allowed to send to iface
2576 even if it has NO routes and NO assigned
2577 addresses. When oif is specified, routing
2578 tables are looked up with only one purpose:
2579 to catch if destination is gatewayed, rather than
2580 direct. Moreover, if MSG_DONTROUTE is set,
2581 we send packet, ignoring both routing tables
2582 and ifaddr state. --ANK
2583
2584
2585 We could make it even if oif is unknown,
2586 likely IPv6, but we do not.
2587 */
2588
68a5e3dd
DM
2589 if (fl4.saddr == 0)
2590 fl4.saddr = inet_select_addr(dev_out, 0,
2591 RT_SCOPE_LINK);
1da177e4
LT
2592 res.type = RTN_UNICAST;
2593 goto make_route;
2594 }
b23dd4fe 2595 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
2596 goto out;
2597 }
1da177e4
LT
2598
2599 if (res.type == RTN_LOCAL) {
68a5e3dd 2600 if (!fl4.saddr) {
9fc3bbb4 2601 if (res.fi->fib_prefsrc)
68a5e3dd 2602 fl4.saddr = res.fi->fib_prefsrc;
9fc3bbb4 2603 else
68a5e3dd 2604 fl4.saddr = fl4.daddr;
9fc3bbb4 2605 }
b40afd0e 2606 dev_out = net->loopback_dev;
68a5e3dd 2607 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2608 res.fi = NULL;
2609 flags |= RTCF_LOCAL;
2610 goto make_route;
2611 }
2612
2613#ifdef CONFIG_IP_ROUTE_MULTIPATH
68a5e3dd 2614 if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0)
1b7fe593 2615 fib_select_multipath(&res);
1da177e4
LT
2616 else
2617#endif
21d8c49e
DM
2618 if (!res.prefixlen &&
2619 res.table->tb_num_default > 1 &&
2620 res.type == RTN_UNICAST && !fl4.flowi4_oif)
0c838ff1 2621 fib_select_default(&res);
1da177e4 2622
68a5e3dd 2623 if (!fl4.saddr)
436c3b66 2624 fl4.saddr = FIB_RES_PREFSRC(net, res);
1da177e4 2625
1da177e4 2626 dev_out = FIB_RES_DEV(res);
68a5e3dd 2627 fl4.flowi4_oif = dev_out->ifindex;
1da177e4
LT
2628
2629
2630make_route:
68a5e3dd 2631 rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags);
b23dd4fe 2632 if (!IS_ERR(rth)) {
5ada5527
DM
2633 unsigned int hash;
2634
68a5e3dd 2635 hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif,
5ada5527 2636 rt_genid(dev_net(dev_out)));
68a5e3dd 2637 rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif);
5ada5527 2638 }
1da177e4 2639
010c2708
DM
2640out:
2641 rcu_read_unlock();
b23dd4fe 2642 return rth;
1da177e4
LT
2643}
2644
9d6ec938 2645struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
1da177e4 2646{
1da177e4 2647 struct rtable *rth;
010c2708 2648 unsigned int hash;
1da177e4 2649
1080d709
NH
2650 if (!rt_caching(net))
2651 goto slow_output;
2652
9d6ec938 2653 hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
1da177e4
LT
2654
2655 rcu_read_lock_bh();
a898def2 2656 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
d8d1f30b 2657 rth = rcu_dereference_bh(rth->dst.rt_next)) {
9d6ec938
DM
2658 if (rth->rt_key_dst == flp4->daddr &&
2659 rth->rt_key_src == flp4->saddr &&
c7537967 2660 rt_is_output_route(rth) &&
9d6ec938
DM
2661 rth->rt_oif == flp4->flowi4_oif &&
2662 rth->rt_mark == flp4->flowi4_mark &&
2663 !((rth->rt_tos ^ flp4->flowi4_tos) &
b5921910 2664 (IPTOS_RT_MASK | RTO_ONLINK)) &&
d8d1f30b 2665 net_eq(dev_net(rth->dst.dev), net) &&
e84f84f2 2666 !rt_is_expired(rth)) {
d8d1f30b 2667 dst_use(&rth->dst, jiffies);
1da177e4
LT
2668 RT_CACHE_STAT_INC(out_hit);
2669 rcu_read_unlock_bh();
b23dd4fe 2670 return rth;
1da177e4
LT
2671 }
2672 RT_CACHE_STAT_INC(out_hlist_search);
2673 }
2674 rcu_read_unlock_bh();
2675
1080d709 2676slow_output:
9d6ec938 2677 return ip_route_output_slow(net, flp4);
1da177e4 2678}
d8c97a94
ACM
2679EXPORT_SYMBOL_GPL(__ip_route_output_key);
2680
ae2688d5
JW
2681static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2682{
2683 return NULL;
2684}
2685
ec831ea7
RD
2686static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
2687{
2688 return 0;
2689}
2690
14e50e57
DM
2691static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2692{
2693}
2694
2695static struct dst_ops ipv4_dst_blackhole_ops = {
2696 .family = AF_INET,
09640e63 2697 .protocol = cpu_to_be16(ETH_P_IP),
14e50e57 2698 .destroy = ipv4_dst_destroy,
ae2688d5 2699 .check = ipv4_blackhole_dst_check,
ec831ea7 2700 .default_mtu = ipv4_blackhole_default_mtu,
214f45c9 2701 .default_advmss = ipv4_default_advmss,
14e50e57 2702 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
14e50e57
DM
2703};
2704
2774c131 2705struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2706{
2774c131
DM
2707 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1);
2708 struct rtable *ort = (struct rtable *) dst_orig;
14e50e57
DM
2709
2710 if (rt) {
d8d1f30b 2711 struct dst_entry *new = &rt->dst;
14e50e57 2712
14e50e57 2713 new->__use = 1;
352e512c
HX
2714 new->input = dst_discard;
2715 new->output = dst_discard;
defb3519 2716 dst_copy_metrics(new, &ort->dst);
14e50e57 2717
d8d1f30b 2718 new->dev = ort->dst.dev;
14e50e57
DM
2719 if (new->dev)
2720 dev_hold(new->dev);
2721
5e2b61f7
DM
2722 rt->rt_key_dst = ort->rt_key_dst;
2723 rt->rt_key_src = ort->rt_key_src;
2724 rt->rt_tos = ort->rt_tos;
1b86a58f 2725 rt->rt_route_iif = ort->rt_route_iif;
5e2b61f7
DM
2726 rt->rt_iif = ort->rt_iif;
2727 rt->rt_oif = ort->rt_oif;
2728 rt->rt_mark = ort->rt_mark;
14e50e57 2729
e84f84f2 2730 rt->rt_genid = rt_genid(net);
14e50e57
DM
2731 rt->rt_flags = ort->rt_flags;
2732 rt->rt_type = ort->rt_type;
2733 rt->rt_dst = ort->rt_dst;
2734 rt->rt_src = ort->rt_src;
14e50e57
DM
2735 rt->rt_gateway = ort->rt_gateway;
2736 rt->rt_spec_dst = ort->rt_spec_dst;
2737 rt->peer = ort->peer;
2738 if (rt->peer)
2739 atomic_inc(&rt->peer->refcnt);
62fa8a84
DM
2740 rt->fi = ort->fi;
2741 if (rt->fi)
2742 atomic_inc(&rt->fi->fib_clntref);
14e50e57
DM
2743
2744 dst_free(new);
2745 }
2746
2774c131
DM
2747 dst_release(dst_orig);
2748
2749 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2750}
2751
9d6ec938 2752struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
b23dd4fe 2753 struct sock *sk)
1da177e4 2754{
9d6ec938 2755 struct rtable *rt = __ip_route_output_key(net, flp4);
1da177e4 2756
b23dd4fe
DM
2757 if (IS_ERR(rt))
2758 return rt;
1da177e4 2759
9d6ec938
DM
2760 if (flp4->flowi4_proto) {
2761 if (!flp4->saddr)
2762 flp4->saddr = rt->rt_src;
2763 if (!flp4->daddr)
2764 flp4->daddr = rt->rt_dst;
2765 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2766 flowi4_to_flowi(flp4),
2767 sk, 0);
1da177e4
LT
2768 }
2769
b23dd4fe 2770 return rt;
1da177e4 2771}
d8c97a94
ACM
2772EXPORT_SYMBOL_GPL(ip_route_output_flow);
2773
4feb88e5
BT
2774static int rt_fill_info(struct net *net,
2775 struct sk_buff *skb, u32 pid, u32 seq, int event,
b6544c0b 2776 int nowait, unsigned int flags)
1da177e4 2777{
511c3f92 2778 struct rtable *rt = skb_rtable(skb);
1da177e4 2779 struct rtmsg *r;
be403ea1 2780 struct nlmsghdr *nlh;
e3703b3d
TG
2781 long expires;
2782 u32 id = 0, ts = 0, tsage = 0, error;
be403ea1
TG
2783
2784 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2785 if (nlh == NULL)
26932566 2786 return -EMSGSIZE;
be403ea1
TG
2787
2788 r = nlmsg_data(nlh);
1da177e4
LT
2789 r->rtm_family = AF_INET;
2790 r->rtm_dst_len = 32;
2791 r->rtm_src_len = 0;
5e2b61f7 2792 r->rtm_tos = rt->rt_tos;
1da177e4 2793 r->rtm_table = RT_TABLE_MAIN;
be403ea1 2794 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
1da177e4
LT
2795 r->rtm_type = rt->rt_type;
2796 r->rtm_scope = RT_SCOPE_UNIVERSE;
2797 r->rtm_protocol = RTPROT_UNSPEC;
2798 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2799 if (rt->rt_flags & RTCF_NOTIFY)
2800 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2801
17fb2c64 2802 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
be403ea1 2803
5e2b61f7 2804 if (rt->rt_key_src) {
1da177e4 2805 r->rtm_src_len = 32;
5e2b61f7 2806 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src);
1da177e4 2807 }
d8d1f30b
CG
2808 if (rt->dst.dev)
2809 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
c7066f70 2810#ifdef CONFIG_IP_ROUTE_CLASSID
d8d1f30b
CG
2811 if (rt->dst.tclassid)
2812 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
1da177e4 2813#endif
c7537967 2814 if (rt_is_input_route(rt))
17fb2c64 2815 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
5e2b61f7 2816 else if (rt->rt_src != rt->rt_key_src)
17fb2c64 2817 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
be403ea1 2818
1da177e4 2819 if (rt->rt_dst != rt->rt_gateway)
17fb2c64 2820 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
be403ea1 2821
defb3519 2822 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
be403ea1
TG
2823 goto nla_put_failure;
2824
5e2b61f7
DM
2825 if (rt->rt_mark)
2826 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
963bfeee 2827
d8d1f30b 2828 error = rt->dst.error;
2c8cec5c
DM
2829 expires = (rt->peer && rt->peer->pmtu_expires) ?
2830 rt->peer->pmtu_expires - jiffies : 0;
1da177e4 2831 if (rt->peer) {
317fe0e6 2832 inet_peer_refcheck(rt->peer);
2c1409a0 2833 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
1da177e4 2834 if (rt->peer->tcp_ts_stamp) {
e3703b3d 2835 ts = rt->peer->tcp_ts;
9d729f72 2836 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
1da177e4
LT
2837 }
2838 }
be403ea1 2839
c7537967 2840 if (rt_is_input_route(rt)) {
1da177e4 2841#ifdef CONFIG_IP_MROUTE
e448515c 2842 __be32 dst = rt->rt_dst;
1da177e4 2843
f97c1e0c 2844 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
4feb88e5
BT
2845 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2846 int err = ipmr_get_route(net, skb, r, nowait);
1da177e4
LT
2847 if (err <= 0) {
2848 if (!nowait) {
2849 if (err == 0)
2850 return 0;
be403ea1 2851 goto nla_put_failure;
1da177e4
LT
2852 } else {
2853 if (err == -EMSGSIZE)
be403ea1 2854 goto nla_put_failure;
e3703b3d 2855 error = err;
1da177e4
LT
2856 }
2857 }
2858 } else
2859#endif
5e2b61f7 2860 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif);
1da177e4
LT
2861 }
2862
d8d1f30b 2863 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
e3703b3d
TG
2864 expires, error) < 0)
2865 goto nla_put_failure;
be403ea1
TG
2866
2867 return nlmsg_end(skb, nlh);
1da177e4 2868
be403ea1 2869nla_put_failure:
26932566
PM
2870 nlmsg_cancel(skb, nlh);
2871 return -EMSGSIZE;
1da177e4
LT
2872}
2873
63f3444f 2874static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
1da177e4 2875{
3b1e0a65 2876 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2877 struct rtmsg *rtm;
2878 struct nlattr *tb[RTA_MAX+1];
1da177e4 2879 struct rtable *rt = NULL;
9e12bb22
AV
2880 __be32 dst = 0;
2881 __be32 src = 0;
2882 u32 iif;
d889ce3b 2883 int err;
963bfeee 2884 int mark;
1da177e4
LT
2885 struct sk_buff *skb;
2886
d889ce3b
TG
2887 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2888 if (err < 0)
2889 goto errout;
2890
2891 rtm = nlmsg_data(nlh);
2892
1da177e4 2893 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2894 if (skb == NULL) {
2895 err = -ENOBUFS;
2896 goto errout;
2897 }
1da177e4
LT
2898
2899 /* Reserve room for dummy headers, this skb can pass
2900 through good chunk of routing engine.
2901 */
459a98ed 2902 skb_reset_mac_header(skb);
c1d2bbe1 2903 skb_reset_network_header(skb);
d2c962b8
SH
2904
2905 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2906 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2907 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2908
17fb2c64
AV
2909 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2910 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2911 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2912 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4
LT
2913
2914 if (iif) {
d889ce3b
TG
2915 struct net_device *dev;
2916
1937504d 2917 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2918 if (dev == NULL) {
2919 err = -ENODEV;
2920 goto errout_free;
2921 }
2922
1da177e4
LT
2923 skb->protocol = htons(ETH_P_IP);
2924 skb->dev = dev;
963bfeee 2925 skb->mark = mark;
1da177e4
LT
2926 local_bh_disable();
2927 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2928 local_bh_enable();
d889ce3b 2929
511c3f92 2930 rt = skb_rtable(skb);
d8d1f30b
CG
2931 if (err == 0 && rt->dst.error)
2932 err = -rt->dst.error;
1da177e4 2933 } else {
68a5e3dd
DM
2934 struct flowi4 fl4 = {
2935 .daddr = dst,
2936 .saddr = src,
2937 .flowi4_tos = rtm->rtm_tos,
2938 .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2939 .flowi4_mark = mark,
d889ce3b 2940 };
9d6ec938 2941 rt = ip_route_output_key(net, &fl4);
b23dd4fe
DM
2942
2943 err = 0;
2944 if (IS_ERR(rt))
2945 err = PTR_ERR(rt);
1da177e4 2946 }
d889ce3b 2947
1da177e4 2948 if (err)
d889ce3b 2949 goto errout_free;
1da177e4 2950
d8d1f30b 2951 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2952 if (rtm->rtm_flags & RTM_F_NOTIFY)
2953 rt->rt_flags |= RTCF_NOTIFY;
2954
4feb88e5 2955 err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2956 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2957 if (err <= 0)
2958 goto errout_free;
1da177e4 2959
1937504d 2960 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2961errout:
2942e900 2962 return err;
1da177e4 2963
d889ce3b 2964errout_free:
1da177e4 2965 kfree_skb(skb);
d889ce3b 2966 goto errout;
1da177e4
LT
2967}
2968
2969int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2970{
2971 struct rtable *rt;
2972 int h, s_h;
2973 int idx, s_idx;
1937504d
DL
2974 struct net *net;
2975
3b1e0a65 2976 net = sock_net(skb->sk);
1da177e4
LT
2977
2978 s_h = cb->args[0];
d8c92830
ED
2979 if (s_h < 0)
2980 s_h = 0;
1da177e4 2981 s_idx = idx = cb->args[1];
a6272665
ED
2982 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2983 if (!rt_hash_table[h].chain)
2984 continue;
1da177e4 2985 rcu_read_lock_bh();
a898def2 2986 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
d8d1f30b
CG
2987 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
2988 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
1da177e4 2989 continue;
e84f84f2 2990 if (rt_is_expired(rt))
29e75252 2991 continue;
d8d1f30b 2992 skb_dst_set_noref(skb, &rt->dst);
4feb88e5 2993 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
e905a9ed 2994 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
b6544c0b 2995 1, NLM_F_MULTI) <= 0) {
adf30907 2996 skb_dst_drop(skb);
1da177e4
LT
2997 rcu_read_unlock_bh();
2998 goto done;
2999 }
adf30907 3000 skb_dst_drop(skb);
1da177e4
LT
3001 }
3002 rcu_read_unlock_bh();
3003 }
3004
3005done:
3006 cb->args[0] = h;
3007 cb->args[1] = idx;
3008 return skb->len;
3009}
3010
3011void ip_rt_multicast_event(struct in_device *in_dev)
3012{
76e6ebfb 3013 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
3014}
3015
3016#ifdef CONFIG_SYSCTL
81c684d1 3017static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 3018 void __user *buffer,
1da177e4
LT
3019 size_t *lenp, loff_t *ppos)
3020{
3021 if (write) {
639e104f 3022 int flush_delay;
81c684d1 3023 ctl_table ctl;
39a23e75 3024 struct net *net;
639e104f 3025
81c684d1
DL
3026 memcpy(&ctl, __ctl, sizeof(ctl));
3027 ctl.data = &flush_delay;
8d65af78 3028 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 3029
81c684d1 3030 net = (struct net *)__ctl->extra1;
39a23e75 3031 rt_cache_flush(net, flush_delay);
1da177e4 3032 return 0;
e905a9ed 3033 }
1da177e4
LT
3034
3035 return -EINVAL;
3036}
3037
eeb61f71 3038static ctl_table ipv4_route_table[] = {
1da177e4 3039 {
1da177e4
LT
3040 .procname = "gc_thresh",
3041 .data = &ipv4_dst_ops.gc_thresh,
3042 .maxlen = sizeof(int),
3043 .mode = 0644,
6d9f239a 3044 .proc_handler = proc_dointvec,
1da177e4
LT
3045 },
3046 {
1da177e4
LT
3047 .procname = "max_size",
3048 .data = &ip_rt_max_size,
3049 .maxlen = sizeof(int),
3050 .mode = 0644,
6d9f239a 3051 .proc_handler = proc_dointvec,
1da177e4
LT
3052 },
3053 {
3054 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 3055
1da177e4
LT
3056 .procname = "gc_min_interval",
3057 .data = &ip_rt_gc_min_interval,
3058 .maxlen = sizeof(int),
3059 .mode = 0644,
6d9f239a 3060 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3061 },
3062 {
1da177e4
LT
3063 .procname = "gc_min_interval_ms",
3064 .data = &ip_rt_gc_min_interval,
3065 .maxlen = sizeof(int),
3066 .mode = 0644,
6d9f239a 3067 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
3068 },
3069 {
1da177e4
LT
3070 .procname = "gc_timeout",
3071 .data = &ip_rt_gc_timeout,
3072 .maxlen = sizeof(int),
3073 .mode = 0644,
6d9f239a 3074 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3075 },
3076 {
1da177e4
LT
3077 .procname = "gc_interval",
3078 .data = &ip_rt_gc_interval,
3079 .maxlen = sizeof(int),
3080 .mode = 0644,
6d9f239a 3081 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3082 },
3083 {
1da177e4
LT
3084 .procname = "redirect_load",
3085 .data = &ip_rt_redirect_load,
3086 .maxlen = sizeof(int),
3087 .mode = 0644,
6d9f239a 3088 .proc_handler = proc_dointvec,
1da177e4
LT
3089 },
3090 {
1da177e4
LT
3091 .procname = "redirect_number",
3092 .data = &ip_rt_redirect_number,
3093 .maxlen = sizeof(int),
3094 .mode = 0644,
6d9f239a 3095 .proc_handler = proc_dointvec,
1da177e4
LT
3096 },
3097 {
1da177e4
LT
3098 .procname = "redirect_silence",
3099 .data = &ip_rt_redirect_silence,
3100 .maxlen = sizeof(int),
3101 .mode = 0644,
6d9f239a 3102 .proc_handler = proc_dointvec,
1da177e4
LT
3103 },
3104 {
1da177e4
LT
3105 .procname = "error_cost",
3106 .data = &ip_rt_error_cost,
3107 .maxlen = sizeof(int),
3108 .mode = 0644,
6d9f239a 3109 .proc_handler = proc_dointvec,
1da177e4
LT
3110 },
3111 {
1da177e4
LT
3112 .procname = "error_burst",
3113 .data = &ip_rt_error_burst,
3114 .maxlen = sizeof(int),
3115 .mode = 0644,
6d9f239a 3116 .proc_handler = proc_dointvec,
1da177e4
LT
3117 },
3118 {
1da177e4
LT
3119 .procname = "gc_elasticity",
3120 .data = &ip_rt_gc_elasticity,
3121 .maxlen = sizeof(int),
3122 .mode = 0644,
6d9f239a 3123 .proc_handler = proc_dointvec,
1da177e4
LT
3124 },
3125 {
1da177e4
LT
3126 .procname = "mtu_expires",
3127 .data = &ip_rt_mtu_expires,
3128 .maxlen = sizeof(int),
3129 .mode = 0644,
6d9f239a 3130 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
3131 },
3132 {
1da177e4
LT
3133 .procname = "min_pmtu",
3134 .data = &ip_rt_min_pmtu,
3135 .maxlen = sizeof(int),
3136 .mode = 0644,
6d9f239a 3137 .proc_handler = proc_dointvec,
1da177e4
LT
3138 },
3139 {
1da177e4
LT
3140 .procname = "min_adv_mss",
3141 .data = &ip_rt_min_advmss,
3142 .maxlen = sizeof(int),
3143 .mode = 0644,
6d9f239a 3144 .proc_handler = proc_dointvec,
1da177e4 3145 },
f8572d8f 3146 { }
1da177e4 3147};
39a23e75 3148
2f4520d3
AV
3149static struct ctl_table empty[1];
3150
3151static struct ctl_table ipv4_skeleton[] =
3152{
f8572d8f 3153 { .procname = "route",
d994af0d 3154 .mode = 0555, .child = ipv4_route_table},
f8572d8f 3155 { .procname = "neigh",
d994af0d 3156 .mode = 0555, .child = empty},
2f4520d3
AV
3157 { }
3158};
3159
3160static __net_initdata struct ctl_path ipv4_path[] = {
f8572d8f
EB
3161 { .procname = "net", },
3162 { .procname = "ipv4", },
39a23e75
DL
3163 { },
3164};
3165
39a23e75
DL
3166static struct ctl_table ipv4_route_flush_table[] = {
3167 {
39a23e75
DL
3168 .procname = "flush",
3169 .maxlen = sizeof(int),
3170 .mode = 0200,
6d9f239a 3171 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 3172 },
f8572d8f 3173 { },
39a23e75
DL
3174};
3175
2f4520d3 3176static __net_initdata struct ctl_path ipv4_route_path[] = {
f8572d8f
EB
3177 { .procname = "net", },
3178 { .procname = "ipv4", },
3179 { .procname = "route", },
2f4520d3
AV
3180 { },
3181};
3182
39a23e75
DL
3183static __net_init int sysctl_route_net_init(struct net *net)
3184{
3185 struct ctl_table *tbl;
3186
3187 tbl = ipv4_route_flush_table;
09ad9bc7 3188 if (!net_eq(net, &init_net)) {
39a23e75
DL
3189 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3190 if (tbl == NULL)
3191 goto err_dup;
3192 }
3193 tbl[0].extra1 = net;
3194
3195 net->ipv4.route_hdr =
3196 register_net_sysctl_table(net, ipv4_route_path, tbl);
3197 if (net->ipv4.route_hdr == NULL)
3198 goto err_reg;
3199 return 0;
3200
3201err_reg:
3202 if (tbl != ipv4_route_flush_table)
3203 kfree(tbl);
3204err_dup:
3205 return -ENOMEM;
3206}
3207
3208static __net_exit void sysctl_route_net_exit(struct net *net)
3209{
3210 struct ctl_table *tbl;
3211
3212 tbl = net->ipv4.route_hdr->ctl_table_arg;
3213 unregister_net_sysctl_table(net->ipv4.route_hdr);
3214 BUG_ON(tbl == ipv4_route_flush_table);
3215 kfree(tbl);
3216}
3217
3218static __net_initdata struct pernet_operations sysctl_route_ops = {
3219 .init = sysctl_route_net_init,
3220 .exit = sysctl_route_net_exit,
3221};
1da177e4
LT
3222#endif
3223
3ee94372 3224static __net_init int rt_genid_init(struct net *net)
9f5e97e5 3225{
3ee94372
NH
3226 get_random_bytes(&net->ipv4.rt_genid,
3227 sizeof(net->ipv4.rt_genid));
436c3b66
DM
3228 get_random_bytes(&net->ipv4.dev_addr_genid,
3229 sizeof(net->ipv4.dev_addr_genid));
9f5e97e5
DL
3230 return 0;
3231}
3232
3ee94372
NH
3233static __net_initdata struct pernet_operations rt_genid_ops = {
3234 .init = rt_genid_init,
9f5e97e5
DL
3235};
3236
3237
c7066f70 3238#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 3239struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 3240#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4
LT
3241
3242static __initdata unsigned long rhash_entries;
3243static int __init set_rhash_entries(char *str)
3244{
3245 if (!str)
3246 return 0;
3247 rhash_entries = simple_strtoul(str, &str, 0);
3248 return 1;
3249}
3250__setup("rhash_entries=", set_rhash_entries);
3251
3252int __init ip_rt_init(void)
3253{
424c4b70 3254 int rc = 0;
1da177e4 3255
c7066f70 3256#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 3257 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
3258 if (!ip_rt_acct)
3259 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
3260#endif
3261
e5d679f3
AD
3262 ipv4_dst_ops.kmem_cachep =
3263 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 3264 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3265
14e50e57
DM
3266 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3267
fc66f95c
ED
3268 if (dst_entries_init(&ipv4_dst_ops) < 0)
3269 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3270
3271 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3272 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3273
424c4b70
ED
3274 rt_hash_table = (struct rt_hash_bucket *)
3275 alloc_large_system_hash("IP route cache",
3276 sizeof(struct rt_hash_bucket),
3277 rhash_entries,
4481374c 3278 (totalram_pages >= 128 * 1024) ?
18955cfc 3279 15 : 17,
8d1502de 3280 0,
424c4b70
ED
3281 &rt_hash_log,
3282 &rt_hash_mask,
c9503e0f 3283 rhash_entries ? 0 : 512 * 1024);
22c047cc
ED
3284 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3285 rt_hash_lock_init();
1da177e4
LT
3286
3287 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3288 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3289
1da177e4
LT
3290 devinet_init();
3291 ip_fib_init();
3292
73b38711 3293 if (ip_rt_proc_init())
107f1634 3294 printk(KERN_ERR "Unable to create route proc files\n");
1da177e4
LT
3295#ifdef CONFIG_XFRM
3296 xfrm_init();
a33bc5c1 3297 xfrm4_init(ip_rt_max_size);
1da177e4 3298#endif
63f3444f
TG
3299 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3300
39a23e75
DL
3301#ifdef CONFIG_SYSCTL
3302 register_pernet_subsys(&sysctl_route_ops);
3303#endif
3ee94372 3304 register_pernet_subsys(&rt_genid_ops);
1da177e4
LT
3305 return rc;
3306}
3307
a1bc6eb4 3308#ifdef CONFIG_SYSCTL
eeb61f71
AV
3309/*
3310 * We really need to sanitize the damn ipv4 init order, then all
3311 * this nonsense will go away.
3312 */
3313void __init ip_static_sysctl_init(void)
3314{
2f4520d3 3315 register_sysctl_paths(ipv4_path, ipv4_skeleton);
eeb61f71 3316}
a1bc6eb4 3317#endif
This page took 0.892796 seconds and 5 git commands to generate.