2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the IP router.
8 * Version: @(#)route.h 1.0.4 05/27/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox : Reformatted. Added ip_rt_local()
14 * Alan Cox : Support for TCP parameters.
15 * Alexey Kuznetsov: Major changes for new routing code.
16 * Mike McLagan : Routing by source
17 * Robert Olsson : Added rt_cache statistics
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
28 #include <net/inetpeer.h>
30 #include <net/inet_sock.h>
31 #include <linux/in_route.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/route.h>
35 #include <linux/cache.h>
36 #include <linux/security.h>
38 #define RTO_ONLINK 0x01
40 #define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
53 unsigned int rt_flags
;
57 __be32 rt_dst
; /* Path destination */
58 __be32 rt_src
; /* Path source */
64 /* Info on neighbour */
67 /* Miscellaneous cached information */
68 __be32 rt_spec_dst
; /* RFC1122 specific destination */
70 unsigned long _peer
; /* long-living peer info */
71 struct fib_info
*fi
; /* for client ref to shared metrics */
74 static inline struct inet_peer
*rt_peer_ptr(struct rtable
*rt
)
76 return inetpeer_ptr(rt
->_peer
);
79 static inline bool rt_has_peer(struct rtable
*rt
)
81 return inetpeer_ptr_is_peer(rt
->_peer
);
84 static inline void __rt_set_peer(struct rtable
*rt
, struct inet_peer
*peer
)
86 __inetpeer_ptr_set_peer(&rt
->_peer
, peer
);
89 static inline bool rt_set_peer(struct rtable
*rt
, struct inet_peer
*peer
)
91 return inetpeer_ptr_set_peer(&rt
->_peer
, peer
);
94 static inline void rt_init_peer(struct rtable
*rt
, struct inet_peer_base
*base
)
96 inetpeer_init_ptr(&rt
->_peer
, base
);
99 static inline void rt_transfer_peer(struct rtable
*rt
, struct rtable
*ort
)
101 rt
->_peer
= ort
->_peer
;
102 if (rt_has_peer(ort
)) {
103 struct inet_peer
*peer
= rt_peer_ptr(ort
);
104 atomic_inc(&peer
->refcnt
);
108 static inline bool rt_is_input_route(const struct rtable
*rt
)
110 return rt
->rt_route_iif
!= 0;
113 static inline bool rt_is_output_route(const struct rtable
*rt
)
115 return rt
->rt_route_iif
== 0;
125 struct rt_cache_stat
{
127 unsigned int in_slow_tot
;
128 unsigned int in_slow_mc
;
129 unsigned int in_no_route
;
131 unsigned int in_martian_dst
;
132 unsigned int in_martian_src
;
133 unsigned int out_hit
;
134 unsigned int out_slow_tot
;
135 unsigned int out_slow_mc
;
136 unsigned int gc_total
;
137 unsigned int gc_ignored
;
138 unsigned int gc_goal_miss
;
139 unsigned int gc_dst_overflow
;
140 unsigned int in_hlist_search
;
141 unsigned int out_hlist_search
;
144 extern struct ip_rt_acct __percpu
*ip_rt_acct
;
147 extern int ip_rt_init(void);
148 extern void ip_rt_redirect(__be32 old_gw
, __be32 dst
, __be32 new_gw
,
149 __be32 src
, struct net_device
*dev
);
150 extern void rt_cache_flush(struct net
*net
, int how
);
151 extern void rt_cache_flush_batch(struct net
*net
);
152 extern struct rtable
*__ip_route_output_key(struct net
*, struct flowi4
*flp
);
153 extern struct rtable
*ip_route_output_flow(struct net
*, struct flowi4
*flp
,
155 extern struct dst_entry
*ipv4_blackhole_route(struct net
*net
, struct dst_entry
*dst_orig
);
157 static inline struct rtable
*ip_route_output_key(struct net
*net
, struct flowi4
*flp
)
159 return ip_route_output_flow(net
, flp
, NULL
);
162 static inline struct rtable
*ip_route_output(struct net
*net
, __be32 daddr
,
163 __be32 saddr
, u8 tos
, int oif
)
165 struct flowi4 fl4
= {
171 return ip_route_output_key(net
, &fl4
);
174 static inline struct rtable
*ip_route_output_ports(struct net
*net
, struct flowi4
*fl4
,
176 __be32 daddr
, __be32 saddr
,
177 __be16 dport
, __be16 sport
,
178 __u8 proto
, __u8 tos
, int oif
)
180 flowi4_init_output(fl4
, oif
, sk
? sk
->sk_mark
: 0, tos
,
181 RT_SCOPE_UNIVERSE
, proto
,
182 sk
? inet_sk_flowi_flags(sk
) : 0,
183 daddr
, saddr
, dport
, sport
);
185 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
186 return ip_route_output_flow(net
, fl4
, sk
);
189 static inline struct rtable
*ip_route_output_gre(struct net
*net
, struct flowi4
*fl4
,
190 __be32 daddr
, __be32 saddr
,
191 __be32 gre_key
, __u8 tos
, int oif
)
193 memset(fl4
, 0, sizeof(*fl4
));
194 fl4
->flowi4_oif
= oif
;
197 fl4
->flowi4_tos
= tos
;
198 fl4
->flowi4_proto
= IPPROTO_GRE
;
199 fl4
->fl4_gre_key
= gre_key
;
200 return ip_route_output_key(net
, fl4
);
203 extern int ip_route_input_common(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
204 u8 tos
, struct net_device
*devin
, bool noref
);
206 static inline int ip_route_input(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
207 u8 tos
, struct net_device
*devin
)
209 return ip_route_input_common(skb
, dst
, src
, tos
, devin
, false);
212 static inline int ip_route_input_noref(struct sk_buff
*skb
, __be32 dst
, __be32 src
,
213 u8 tos
, struct net_device
*devin
)
215 return ip_route_input_common(skb
, dst
, src
, tos
, devin
, true);
218 extern void ip_rt_send_redirect(struct sk_buff
*skb
);
220 extern unsigned int inet_addr_type(struct net
*net
, __be32 addr
);
221 extern unsigned int inet_dev_addr_type(struct net
*net
, const struct net_device
*dev
, __be32 addr
);
222 extern void ip_rt_multicast_event(struct in_device
*);
223 extern int ip_rt_ioctl(struct net
*, unsigned int cmd
, void __user
*arg
);
224 extern void ip_rt_get_source(u8
*src
, struct sk_buff
*skb
, struct rtable
*rt
);
225 extern int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
);
228 extern void fib_add_ifaddr(struct in_ifaddr
*);
229 extern void fib_del_ifaddr(struct in_ifaddr
*, struct in_ifaddr
*);
231 static inline void ip_rt_put(struct rtable
* rt
)
234 dst_release(&rt
->dst
);
237 #define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
239 extern const __u8 ip_tos2prio
[16];
241 static inline char rt_tos2priority(u8 tos
)
243 return ip_tos2prio
[IPTOS_TOS(tos
)>>1];
246 /* ip_route_connect() and ip_route_newports() work in tandem whilst
247 * binding a socket for a new outgoing connection.
249 * In order to use IPSEC properly, we must, in the end, have a
250 * route that was looked up using all available keys including source
251 * and destination ports.
253 * However, if a source port needs to be allocated (the user specified
254 * a wildcard source port) we need to obtain addressing information
255 * in order to perform that allocation.
257 * So ip_route_connect() looks up a route using wildcarded source and
258 * destination ports in the key, simply so that we can get a pair of
259 * addresses to use for port allocation.
261 * Later, once the ports are allocated, ip_route_newports() will make
262 * another route lookup if needed to make sure we catch any IPSEC
263 * rules keyed on the port information.
265 * The callers allocate the flow key on their stack, and must pass in
266 * the same flowi4 object to both the ip_route_connect() and the
267 * ip_route_newports() calls.
270 static inline void ip_route_connect_init(struct flowi4
*fl4
, __be32 dst
, __be32 src
,
271 u32 tos
, int oif
, u8 protocol
,
272 __be16 sport
, __be16 dport
,
273 struct sock
*sk
, bool can_sleep
)
277 if (inet_sk(sk
)->transparent
)
278 flow_flags
|= FLOWI_FLAG_ANYSRC
;
279 if (protocol
== IPPROTO_TCP
)
280 flow_flags
|= FLOWI_FLAG_PRECOW_METRICS
;
282 flow_flags
|= FLOWI_FLAG_CAN_SLEEP
;
284 flowi4_init_output(fl4
, oif
, sk
->sk_mark
, tos
, RT_SCOPE_UNIVERSE
,
285 protocol
, flow_flags
, dst
, src
, dport
, sport
);
288 static inline struct rtable
*ip_route_connect(struct flowi4
*fl4
,
289 __be32 dst
, __be32 src
, u32 tos
,
290 int oif
, u8 protocol
,
291 __be16 sport
, __be16 dport
,
292 struct sock
*sk
, bool can_sleep
)
294 struct net
*net
= sock_net(sk
);
297 ip_route_connect_init(fl4
, dst
, src
, tos
, oif
, protocol
,
298 sport
, dport
, sk
, can_sleep
);
301 rt
= __ip_route_output_key(net
, fl4
);
305 flowi4_update_output(fl4
, oif
, tos
, fl4
->daddr
, fl4
->saddr
);
307 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
308 return ip_route_output_flow(net
, fl4
, sk
);
311 static inline struct rtable
*ip_route_newports(struct flowi4
*fl4
, struct rtable
*rt
,
312 __be16 orig_sport
, __be16 orig_dport
,
313 __be16 sport
, __be16 dport
,
316 if (sport
!= orig_sport
|| dport
!= orig_dport
) {
317 fl4
->fl4_dport
= dport
;
318 fl4
->fl4_sport
= sport
;
320 flowi4_update_output(fl4
, sk
->sk_bound_dev_if
,
321 RT_CONN_FLAGS(sk
), fl4
->daddr
,
323 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
324 return ip_route_output_flow(sock_net(sk
), fl4
, sk
);
329 extern void rt_bind_peer(struct rtable
*rt
, __be32 daddr
, int create
);
331 static inline struct inet_peer
*__rt_get_peer(struct rtable
*rt
, __be32 daddr
, int create
)
334 return rt_peer_ptr(rt
);
336 rt_bind_peer(rt
, daddr
, create
);
337 return (rt_has_peer(rt
) ? rt_peer_ptr(rt
) : NULL
);
340 static inline struct inet_peer
*rt_get_peer(struct rtable
*rt
, __be32 daddr
)
342 return __rt_get_peer(rt
, daddr
, 0);
345 static inline struct inet_peer
*rt_get_peer_create(struct rtable
*rt
, __be32 daddr
)
347 return __rt_get_peer(rt
, daddr
, 1);
350 static inline int inet_iif(const struct sk_buff
*skb
)
352 return skb_rtable(skb
)->rt_iif
;
355 extern int sysctl_ip_default_ttl
;
357 static inline int ip4_dst_hoplimit(const struct dst_entry
*dst
)
359 int hoplimit
= dst_metric_raw(dst
, RTAX_HOPLIMIT
);
362 hoplimit
= sysctl_ip_default_ttl
;
366 #endif /* _ROUTE_H */