Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * net/dst.h Protocol independent destination cache definitions. | |
3 | * | |
4 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | |
5 | * | |
6 | */ | |
7 | ||
8 | #ifndef _NET_DST_H | |
9 | #define _NET_DST_H | |
10 | ||
86393e52 | 11 | #include <net/dst_ops.h> |
14c85021 | 12 | #include <linux/netdevice.h> |
1da177e4 LT |
13 | #include <linux/rtnetlink.h> |
14 | #include <linux/rcupdate.h> | |
15 | #include <linux/jiffies.h> | |
16 | #include <net/neighbour.h> | |
17 | #include <asm/processor.h> | |
18 | ||
19 | /* | |
20 | * 0 - no debugging messages | |
21 | * 1 - rare events and bugs (default) | |
22 | * 2 - trace mode. | |
23 | */ | |
24 | #define RT_CACHE_DEBUG 0 | |
25 | ||
26 | #define DST_GC_MIN (HZ/10) | |
27 | #define DST_GC_INC (HZ/2) | |
28 | #define DST_GC_MAX (120*HZ) | |
29 | ||
30 | /* Each dst_entry has reference count and sits in some parent list(s). | |
31 | * When it is removed from parent list, it is "freed" (dst_free). | |
32 | * After this it enters dead state (dst->obsolete > 0) and if its refcnt | |
33 | * is zero, it can be destroyed immediately, otherwise it is added | |
34 | * to gc list and garbage collector periodically checks the refcnt. | |
35 | */ | |
36 | ||
37 | struct sk_buff; | |
38 | ||
fd2c3ef7 | 39 | struct dst_entry { |
1e19e02c | 40 | struct rcu_head rcu_head; |
1da177e4 LT |
41 | struct dst_entry *child; |
42 | struct net_device *dev; | |
62fa8a84 DM |
43 | struct dst_ops *ops; |
44 | unsigned long _metrics; | |
1da177e4 | 45 | unsigned long expires; |
f1dd9c37 | 46 | struct dst_entry *path; |
1da177e4 LT |
47 | struct neighbour *neighbour; |
48 | struct hh_cache *hh; | |
def8b4fa | 49 | #ifdef CONFIG_XFRM |
1da177e4 | 50 | struct xfrm_state *xfrm; |
5635c10d ED |
51 | #else |
52 | void *__pad1; | |
def8b4fa | 53 | #endif |
1da177e4 LT |
54 | int (*input)(struct sk_buff*); |
55 | int (*output)(struct sk_buff*); | |
56 | ||
62fa8a84 DM |
57 | short error; |
58 | short obsolete; | |
59 | unsigned short header_len; /* more space at head required */ | |
60 | unsigned short trailer_len; /* space to reserve at tail */ | |
c7066f70 | 61 | #ifdef CONFIG_IP_ROUTE_CLASSID |
f1dd9c37 | 62 | __u32 tclassid; |
5635c10d ED |
63 | #else |
64 | __u32 __pad2; | |
f1dd9c37 ZY |
65 | #endif |
66 | ||
5635c10d ED |
67 | /* |
68 | * Align __refcnt to a 64 bytes alignment | |
69 | * (L1_CACHE_SIZE would be too much) | |
70 | */ | |
71 | #ifdef CONFIG_64BIT | |
5635c10d ED |
72 | long __pad_to_align_refcnt[1]; |
73 | #endif | |
f1dd9c37 ZY |
74 | /* |
75 | * __refcnt wants to be on a different cache line from | |
76 | * input/output/ops or performance tanks badly | |
77 | */ | |
1e19e02c ED |
78 | atomic_t __refcnt; /* client references */ |
79 | int __use; | |
f1dd9c37 | 80 | unsigned long lastuse; |
62fa8a84 DM |
81 | unsigned long rate_last; /* rate limiting for ICMP */ |
82 | unsigned int rate_tokens; | |
83 | int flags; | |
84 | #define DST_HOST 0x0001 | |
85 | #define DST_NOXFRM 0x0002 | |
86 | #define DST_NOPOLICY 0x0004 | |
87 | #define DST_NOHASH 0x0008 | |
88 | #define DST_NOCACHE 0x0010 | |
1e19e02c | 89 | union { |
fc766e4c ED |
90 | struct dst_entry *next; |
91 | struct rtable __rcu *rt_next; | |
92 | struct rt6_info *rt6_next; | |
93 | struct dn_route __rcu *dn_next; | |
1e19e02c | 94 | }; |
1da177e4 LT |
95 | }; |
96 | ||
1da177e4 LT |
97 | #ifdef __KERNEL__ |
98 | ||
62fa8a84 DM |
99 | extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); |
100 | ||
101 | #define DST_METRICS_READ_ONLY 0x1UL | |
102 | #define __DST_METRICS_PTR(Y) \ | |
103 | ((u32 *)((Y) & ~DST_METRICS_READ_ONLY)) | |
104 | #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) | |
105 | ||
106 | static inline bool dst_metrics_read_only(const struct dst_entry *dst) | |
107 | { | |
108 | return dst->_metrics & DST_METRICS_READ_ONLY; | |
109 | } | |
110 | ||
111 | extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); | |
112 | ||
113 | static inline void dst_destroy_metrics_generic(struct dst_entry *dst) | |
114 | { | |
115 | unsigned long val = dst->_metrics; | |
116 | if (!(val & DST_METRICS_READ_ONLY)) | |
117 | __dst_destroy_metrics_generic(dst, val); | |
118 | } | |
119 | ||
120 | static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) | |
121 | { | |
122 | unsigned long p = dst->_metrics; | |
123 | ||
124 | if (p & DST_METRICS_READ_ONLY) | |
125 | return dst->ops->cow_metrics(dst, p); | |
126 | return __DST_METRICS_PTR(p); | |
127 | } | |
128 | ||
129 | /* This may only be invoked before the entry has reached global | |
130 | * visibility. | |
131 | */ | |
132 | static inline void dst_init_metrics(struct dst_entry *dst, | |
133 | const u32 *src_metrics, | |
134 | bool read_only) | |
135 | { | |
136 | dst->_metrics = ((unsigned long) src_metrics) | | |
137 | (read_only ? DST_METRICS_READ_ONLY : 0); | |
138 | } | |
139 | ||
140 | static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) | |
141 | { | |
142 | u32 *dst_metrics = dst_metrics_write_ptr(dest); | |
143 | ||
144 | if (dst_metrics) { | |
145 | u32 *src_metrics = DST_METRICS_PTR(src); | |
146 | ||
147 | memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); | |
148 | } | |
149 | } | |
150 | ||
151 | static inline u32 *dst_metrics_ptr(struct dst_entry *dst) | |
152 | { | |
153 | return DST_METRICS_PTR(dst); | |
154 | } | |
155 | ||
1da177e4 | 156 | static inline u32 |
5170ae82 | 157 | dst_metric_raw(const struct dst_entry *dst, const int metric) |
1da177e4 | 158 | { |
62fa8a84 DM |
159 | u32 *p = DST_METRICS_PTR(dst); |
160 | ||
161 | return p[metric-1]; | |
defb3519 DM |
162 | } |
163 | ||
5170ae82 DM |
164 | static inline u32 |
165 | dst_metric(const struct dst_entry *dst, const int metric) | |
166 | { | |
0dbaee3b | 167 | WARN_ON_ONCE(metric == RTAX_HOPLIMIT || |
d33e4553 DM |
168 | metric == RTAX_ADVMSS || |
169 | metric == RTAX_MTU); | |
5170ae82 DM |
170 | return dst_metric_raw(dst, metric); |
171 | } | |
172 | ||
0dbaee3b DM |
173 | static inline u32 |
174 | dst_metric_advmss(const struct dst_entry *dst) | |
175 | { | |
176 | u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); | |
177 | ||
178 | if (!advmss) | |
179 | advmss = dst->ops->default_advmss(dst); | |
180 | ||
181 | return advmss; | |
182 | } | |
183 | ||
defb3519 DM |
184 | static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) |
185 | { | |
62fa8a84 | 186 | u32 *p = dst_metrics_write_ptr(dst); |
defb3519 | 187 | |
62fa8a84 DM |
188 | if (p) |
189 | p[metric-1] = val; | |
1da177e4 LT |
190 | } |
191 | ||
0c3adfb8 GBY |
192 | static inline u32 |
193 | dst_feature(const struct dst_entry *dst, u32 feature) | |
194 | { | |
bb5b7c11 | 195 | return dst_metric(dst, RTAX_FEATURES) & feature; |
0c3adfb8 GBY |
196 | } |
197 | ||
1da177e4 LT |
198 | static inline u32 dst_mtu(const struct dst_entry *dst) |
199 | { | |
d33e4553 DM |
200 | u32 mtu = dst_metric_raw(dst, RTAX_MTU); |
201 | ||
202 | if (!mtu) | |
203 | mtu = dst->ops->default_mtu(dst); | |
204 | ||
1da177e4 LT |
205 | return mtu; |
206 | } | |
207 | ||
c1e20f7c SH |
208 | /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ |
209 | static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) | |
210 | { | |
211 | return msecs_to_jiffies(dst_metric(dst, metric)); | |
212 | } | |
213 | ||
214 | static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, | |
215 | unsigned long rtt) | |
216 | { | |
defb3519 | 217 | dst_metric_set(dst, metric, jiffies_to_msecs(rtt)); |
c1e20f7c SH |
218 | } |
219 | ||
1da177e4 LT |
220 | static inline u32 |
221 | dst_allfrag(const struct dst_entry *dst) | |
222 | { | |
0c3adfb8 | 223 | int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); |
1da177e4 LT |
224 | /* Yes, _exactly_. This is paranoia. */ |
225 | barrier(); | |
226 | return ret; | |
227 | } | |
228 | ||
229 | static inline int | |
d33e4553 | 230 | dst_metric_locked(const struct dst_entry *dst, int metric) |
1da177e4 LT |
231 | { |
232 | return dst_metric(dst, RTAX_LOCK) & (1<<metric); | |
233 | } | |
234 | ||
235 | static inline void dst_hold(struct dst_entry * dst) | |
236 | { | |
5635c10d ED |
237 | /* |
238 | * If your kernel compilation stops here, please check | |
239 | * __pad_to_align_refcnt declaration in struct dst_entry | |
240 | */ | |
241 | BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); | |
1da177e4 LT |
242 | atomic_inc(&dst->__refcnt); |
243 | } | |
244 | ||
03f49f34 PE |
245 | static inline void dst_use(struct dst_entry *dst, unsigned long time) |
246 | { | |
247 | dst_hold(dst); | |
248 | dst->__use++; | |
249 | dst->lastuse = time; | |
250 | } | |
251 | ||
7fee226a ED |
252 | static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) |
253 | { | |
254 | dst->__use++; | |
255 | dst->lastuse = time; | |
256 | } | |
257 | ||
1da177e4 LT |
258 | static inline |
259 | struct dst_entry * dst_clone(struct dst_entry * dst) | |
260 | { | |
261 | if (dst) | |
262 | atomic_inc(&dst->__refcnt); | |
263 | return dst; | |
264 | } | |
265 | ||
8d330868 | 266 | extern void dst_release(struct dst_entry *dst); |
7fee226a ED |
267 | |
268 | static inline void refdst_drop(unsigned long refdst) | |
269 | { | |
270 | if (!(refdst & SKB_DST_NOREF)) | |
271 | dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); | |
272 | } | |
273 | ||
274 | /** | |
275 | * skb_dst_drop - drops skb dst | |
276 | * @skb: buffer | |
277 | * | |
278 | * Drops dst reference count if a reference was taken. | |
279 | */ | |
adf30907 ED |
280 | static inline void skb_dst_drop(struct sk_buff *skb) |
281 | { | |
7fee226a ED |
282 | if (skb->_skb_refdst) { |
283 | refdst_drop(skb->_skb_refdst); | |
284 | skb->_skb_refdst = 0UL; | |
285 | } | |
286 | } | |
287 | ||
288 | static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) | |
289 | { | |
290 | nskb->_skb_refdst = oskb->_skb_refdst; | |
291 | if (!(nskb->_skb_refdst & SKB_DST_NOREF)) | |
292 | dst_clone(skb_dst(nskb)); | |
293 | } | |
294 | ||
295 | /** | |
296 | * skb_dst_force - makes sure skb dst is refcounted | |
297 | * @skb: buffer | |
298 | * | |
299 | * If dst is not yet refcounted, let's do it | |
300 | */ | |
301 | static inline void skb_dst_force(struct sk_buff *skb) | |
302 | { | |
303 | if (skb_dst_is_noref(skb)) { | |
304 | WARN_ON(!rcu_read_lock_held()); | |
305 | skb->_skb_refdst &= ~SKB_DST_NOREF; | |
306 | dst_clone(skb_dst(skb)); | |
307 | } | |
adf30907 | 308 | } |
1da177e4 | 309 | |
d19d56dd | 310 | |
290b895e ED |
311 | /** |
312 | * __skb_tunnel_rx - prepare skb for rx reinsert | |
313 | * @skb: buffer | |
314 | * @dev: tunnel device | |
315 | * | |
316 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
317 | * so make some cleanups. (no accounting done) | |
318 | */ | |
319 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) | |
320 | { | |
321 | skb->dev = dev; | |
322 | skb->rxhash = 0; | |
323 | skb_set_queue_mapping(skb, 0); | |
324 | skb_dst_drop(skb); | |
325 | nf_reset(skb); | |
326 | } | |
327 | ||
d19d56dd ED |
328 | /** |
329 | * skb_tunnel_rx - prepare skb for rx reinsert | |
330 | * @skb: buffer | |
331 | * @dev: tunnel device | |
332 | * | |
333 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | |
334 | * so make some cleanups, and perform accounting. | |
290b895e | 335 | * Note: this accounting is not SMP safe. |
d19d56dd ED |
336 | */ |
337 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) | |
338 | { | |
d19d56dd ED |
339 | /* TODO : stats should be SMP safe */ |
340 | dev->stats.rx_packets++; | |
341 | dev->stats.rx_bytes += skb->len; | |
290b895e | 342 | __skb_tunnel_rx(skb, dev); |
d19d56dd ED |
343 | } |
344 | ||
1da177e4 LT |
345 | /* Children define the path of the packet through the |
346 | * Linux networking. Thus, destinations are stackable. | |
347 | */ | |
348 | ||
8764ab2c | 349 | static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) |
1da177e4 | 350 | { |
8764ab2c | 351 | struct dst_entry *child = skb_dst(skb)->child; |
1da177e4 | 352 | |
8764ab2c | 353 | skb_dst_drop(skb); |
1da177e4 LT |
354 | return child; |
355 | } | |
356 | ||
352e512c | 357 | extern int dst_discard(struct sk_buff *skb); |
1da177e4 LT |
358 | extern void * dst_alloc(struct dst_ops * ops); |
359 | extern void __dst_free(struct dst_entry * dst); | |
360 | extern struct dst_entry *dst_destroy(struct dst_entry * dst); | |
361 | ||
362 | static inline void dst_free(struct dst_entry * dst) | |
363 | { | |
364 | if (dst->obsolete > 1) | |
365 | return; | |
366 | if (!atomic_read(&dst->__refcnt)) { | |
367 | dst = dst_destroy(dst); | |
368 | if (!dst) | |
369 | return; | |
370 | } | |
371 | __dst_free(dst); | |
372 | } | |
373 | ||
374 | static inline void dst_rcu_free(struct rcu_head *head) | |
375 | { | |
376 | struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); | |
377 | dst_free(dst); | |
378 | } | |
379 | ||
380 | static inline void dst_confirm(struct dst_entry *dst) | |
381 | { | |
382 | if (dst) | |
383 | neigh_confirm(dst->neighbour); | |
384 | } | |
385 | ||
1da177e4 LT |
386 | static inline void dst_link_failure(struct sk_buff *skb) |
387 | { | |
adf30907 | 388 | struct dst_entry *dst = skb_dst(skb); |
1da177e4 LT |
389 | if (dst && dst->ops && dst->ops->link_failure) |
390 | dst->ops->link_failure(skb); | |
391 | } | |
392 | ||
393 | static inline void dst_set_expires(struct dst_entry *dst, int timeout) | |
394 | { | |
395 | unsigned long expires = jiffies + timeout; | |
396 | ||
397 | if (expires == 0) | |
398 | expires = 1; | |
399 | ||
400 | if (dst->expires == 0 || time_before(expires, dst->expires)) | |
401 | dst->expires = expires; | |
402 | } | |
403 | ||
404 | /* Output packet to network from transport. */ | |
405 | static inline int dst_output(struct sk_buff *skb) | |
406 | { | |
adf30907 | 407 | return skb_dst(skb)->output(skb); |
1da177e4 LT |
408 | } |
409 | ||
410 | /* Input packet from network to transport. */ | |
411 | static inline int dst_input(struct sk_buff *skb) | |
412 | { | |
adf30907 | 413 | return skb_dst(skb)->input(skb); |
1da177e4 LT |
414 | } |
415 | ||
416 | static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) | |
417 | { | |
418 | if (dst->obsolete) | |
419 | dst = dst->ops->check(dst, cookie); | |
420 | return dst; | |
421 | } | |
422 | ||
423 | extern void dst_init(void); | |
424 | ||
815f4e57 HX |
425 | /* Flags for xfrm_lookup flags argument. */ |
426 | enum { | |
427 | XFRM_LOOKUP_WAIT = 1 << 0, | |
8b7817f3 | 428 | XFRM_LOOKUP_ICMP = 1 << 1, |
815f4e57 HX |
429 | }; |
430 | ||
1da177e4 LT |
431 | struct flowi; |
432 | #ifndef CONFIG_XFRM | |
52479b62 AD |
433 | static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p, |
434 | struct flowi *fl, struct sock *sk, int flags) | |
1da177e4 LT |
435 | { |
436 | return 0; | |
437 | } | |
52479b62 AD |
438 | static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, |
439 | struct flowi *fl, struct sock *sk, int flags) | |
14e50e57 DM |
440 | { |
441 | return 0; | |
442 | } | |
1da177e4 | 443 | #else |
52479b62 AD |
444 | extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p, |
445 | struct flowi *fl, struct sock *sk, int flags); | |
446 | extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, | |
447 | struct flowi *fl, struct sock *sk, int flags); | |
1da177e4 LT |
448 | #endif |
449 | #endif | |
450 | ||
451 | #endif /* _NET_DST_H */ |