Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[deliverable/linux.git] / include / net / dst.h
1 /*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/jiffies.h>
16 #include <net/neighbour.h>
17 #include <asm/processor.h>
18
19 /*
20 * 0 - no debugging messages
21 * 1 - rare events and bugs (default)
22 * 2 - trace mode.
23 */
24 #define RT_CACHE_DEBUG 0
25
26 #define DST_GC_MIN (HZ/10)
27 #define DST_GC_INC (HZ/2)
28 #define DST_GC_MAX (120*HZ)
29
30 /* Each dst_entry has reference count and sits in some parent list(s).
31 * When it is removed from parent list, it is "freed" (dst_free).
32 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
33 * is zero, it can be destroyed immediately, otherwise it is added
34 * to gc list and garbage collector periodically checks the refcnt.
35 */
36
37 struct sk_buff;
38
39 struct dst_entry {
40 struct rcu_head rcu_head;
41 struct dst_entry *child;
42 struct net_device *dev;
43 struct dst_ops *ops;
44 unsigned long _metrics;
45 unsigned long expires;
46 struct dst_entry *path;
47 struct neighbour *neighbour;
48 struct hh_cache *hh;
49 #ifdef CONFIG_XFRM
50 struct xfrm_state *xfrm;
51 #else
52 void *__pad1;
53 #endif
54 int (*input)(struct sk_buff*);
55 int (*output)(struct sk_buff*);
56
57 short error;
58 short obsolete;
59 unsigned short header_len; /* more space at head required */
60 unsigned short trailer_len; /* space to reserve at tail */
61 #ifdef CONFIG_IP_ROUTE_CLASSID
62 __u32 tclassid;
63 #else
64 __u32 __pad2;
65 #endif
66
67 /*
68 * Align __refcnt to a 64 bytes alignment
69 * (L1_CACHE_SIZE would be too much)
70 */
71 #ifdef CONFIG_64BIT
72 long __pad_to_align_refcnt[1];
73 #endif
74 /*
75 * __refcnt wants to be on a different cache line from
76 * input/output/ops or performance tanks badly
77 */
78 atomic_t __refcnt; /* client references */
79 int __use;
80 unsigned long lastuse;
81 int flags;
82 #define DST_HOST 0x0001
83 #define DST_NOXFRM 0x0002
84 #define DST_NOPOLICY 0x0004
85 #define DST_NOHASH 0x0008
86 #define DST_NOCACHE 0x0010
87 union {
88 struct dst_entry *next;
89 struct rtable __rcu *rt_next;
90 struct rt6_info *rt6_next;
91 struct dn_route __rcu *dn_next;
92 };
93 };
94
95 extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
96 extern const u32 dst_default_metrics[RTAX_MAX];
97
98 #define DST_METRICS_READ_ONLY 0x1UL
99 #define __DST_METRICS_PTR(Y) \
100 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
101 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
102
103 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
104 {
105 return dst->_metrics & DST_METRICS_READ_ONLY;
106 }
107
108 extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
109
110 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
111 {
112 unsigned long val = dst->_metrics;
113 if (!(val & DST_METRICS_READ_ONLY))
114 __dst_destroy_metrics_generic(dst, val);
115 }
116
117 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
118 {
119 unsigned long p = dst->_metrics;
120
121 if (p & DST_METRICS_READ_ONLY)
122 return dst->ops->cow_metrics(dst, p);
123 return __DST_METRICS_PTR(p);
124 }
125
126 /* This may only be invoked before the entry has reached global
127 * visibility.
128 */
129 static inline void dst_init_metrics(struct dst_entry *dst,
130 const u32 *src_metrics,
131 bool read_only)
132 {
133 dst->_metrics = ((unsigned long) src_metrics) |
134 (read_only ? DST_METRICS_READ_ONLY : 0);
135 }
136
137 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
138 {
139 u32 *dst_metrics = dst_metrics_write_ptr(dest);
140
141 if (dst_metrics) {
142 u32 *src_metrics = DST_METRICS_PTR(src);
143
144 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
145 }
146 }
147
148 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
149 {
150 return DST_METRICS_PTR(dst);
151 }
152
153 static inline u32
154 dst_metric_raw(const struct dst_entry *dst, const int metric)
155 {
156 u32 *p = DST_METRICS_PTR(dst);
157
158 return p[metric-1];
159 }
160
161 static inline u32
162 dst_metric(const struct dst_entry *dst, const int metric)
163 {
164 WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
165 metric == RTAX_ADVMSS ||
166 metric == RTAX_MTU);
167 return dst_metric_raw(dst, metric);
168 }
169
170 static inline u32
171 dst_metric_advmss(const struct dst_entry *dst)
172 {
173 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
174
175 if (!advmss)
176 advmss = dst->ops->default_advmss(dst);
177
178 return advmss;
179 }
180
181 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
182 {
183 u32 *p = dst_metrics_write_ptr(dst);
184
185 if (p)
186 p[metric-1] = val;
187 }
188
189 static inline u32
190 dst_feature(const struct dst_entry *dst, u32 feature)
191 {
192 return dst_metric(dst, RTAX_FEATURES) & feature;
193 }
194
195 static inline u32 dst_mtu(const struct dst_entry *dst)
196 {
197 u32 mtu = dst_metric_raw(dst, RTAX_MTU);
198
199 if (!mtu)
200 mtu = dst->ops->default_mtu(dst);
201
202 return mtu;
203 }
204
205 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
206 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
207 {
208 return msecs_to_jiffies(dst_metric(dst, metric));
209 }
210
211 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
212 unsigned long rtt)
213 {
214 dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
215 }
216
217 static inline u32
218 dst_allfrag(const struct dst_entry *dst)
219 {
220 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
221 return ret;
222 }
223
224 static inline int
225 dst_metric_locked(const struct dst_entry *dst, int metric)
226 {
227 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
228 }
229
230 static inline void dst_hold(struct dst_entry * dst)
231 {
232 /*
233 * If your kernel compilation stops here, please check
234 * __pad_to_align_refcnt declaration in struct dst_entry
235 */
236 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
237 atomic_inc(&dst->__refcnt);
238 }
239
240 static inline void dst_use(struct dst_entry *dst, unsigned long time)
241 {
242 dst_hold(dst);
243 dst->__use++;
244 dst->lastuse = time;
245 }
246
247 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
248 {
249 dst->__use++;
250 dst->lastuse = time;
251 }
252
253 static inline
254 struct dst_entry * dst_clone(struct dst_entry * dst)
255 {
256 if (dst)
257 atomic_inc(&dst->__refcnt);
258 return dst;
259 }
260
261 extern void dst_release(struct dst_entry *dst);
262
263 static inline void refdst_drop(unsigned long refdst)
264 {
265 if (!(refdst & SKB_DST_NOREF))
266 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
267 }
268
269 /**
270 * skb_dst_drop - drops skb dst
271 * @skb: buffer
272 *
273 * Drops dst reference count if a reference was taken.
274 */
275 static inline void skb_dst_drop(struct sk_buff *skb)
276 {
277 if (skb->_skb_refdst) {
278 refdst_drop(skb->_skb_refdst);
279 skb->_skb_refdst = 0UL;
280 }
281 }
282
283 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
284 {
285 nskb->_skb_refdst = oskb->_skb_refdst;
286 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
287 dst_clone(skb_dst(nskb));
288 }
289
290 /**
291 * skb_dst_force - makes sure skb dst is refcounted
292 * @skb: buffer
293 *
294 * If dst is not yet refcounted, let's do it
295 */
296 static inline void skb_dst_force(struct sk_buff *skb)
297 {
298 if (skb_dst_is_noref(skb)) {
299 WARN_ON(!rcu_read_lock_held());
300 skb->_skb_refdst &= ~SKB_DST_NOREF;
301 dst_clone(skb_dst(skb));
302 }
303 }
304
305
306 /**
307 * __skb_tunnel_rx - prepare skb for rx reinsert
308 * @skb: buffer
309 * @dev: tunnel device
310 *
311 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
312 * so make some cleanups. (no accounting done)
313 */
314 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
315 {
316 skb->dev = dev;
317 skb->rxhash = 0;
318 skb_set_queue_mapping(skb, 0);
319 skb_dst_drop(skb);
320 nf_reset(skb);
321 }
322
323 /**
324 * skb_tunnel_rx - prepare skb for rx reinsert
325 * @skb: buffer
326 * @dev: tunnel device
327 *
328 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
329 * so make some cleanups, and perform accounting.
330 * Note: this accounting is not SMP safe.
331 */
332 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
333 {
334 /* TODO : stats should be SMP safe */
335 dev->stats.rx_packets++;
336 dev->stats.rx_bytes += skb->len;
337 __skb_tunnel_rx(skb, dev);
338 }
339
340 /* Children define the path of the packet through the
341 * Linux networking. Thus, destinations are stackable.
342 */
343
344 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
345 {
346 struct dst_entry *child = dst_clone(skb_dst(skb)->child);
347
348 skb_dst_drop(skb);
349 return child;
350 }
351
352 extern int dst_discard(struct sk_buff *skb);
353 extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
354 extern void __dst_free(struct dst_entry * dst);
355 extern struct dst_entry *dst_destroy(struct dst_entry * dst);
356
357 static inline void dst_free(struct dst_entry * dst)
358 {
359 if (dst->obsolete > 1)
360 return;
361 if (!atomic_read(&dst->__refcnt)) {
362 dst = dst_destroy(dst);
363 if (!dst)
364 return;
365 }
366 __dst_free(dst);
367 }
368
369 static inline void dst_rcu_free(struct rcu_head *head)
370 {
371 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
372 dst_free(dst);
373 }
374
375 static inline void dst_confirm(struct dst_entry *dst)
376 {
377 if (dst)
378 neigh_confirm(dst->neighbour);
379 }
380
381 static inline void dst_link_failure(struct sk_buff *skb)
382 {
383 struct dst_entry *dst = skb_dst(skb);
384 if (dst && dst->ops && dst->ops->link_failure)
385 dst->ops->link_failure(skb);
386 }
387
388 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
389 {
390 unsigned long expires = jiffies + timeout;
391
392 if (expires == 0)
393 expires = 1;
394
395 if (dst->expires == 0 || time_before(expires, dst->expires))
396 dst->expires = expires;
397 }
398
399 /* Output packet to network from transport. */
400 static inline int dst_output(struct sk_buff *skb)
401 {
402 return skb_dst(skb)->output(skb);
403 }
404
405 /* Input packet from network to transport. */
406 static inline int dst_input(struct sk_buff *skb)
407 {
408 return skb_dst(skb)->input(skb);
409 }
410
411 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
412 {
413 if (dst->obsolete)
414 dst = dst->ops->check(dst, cookie);
415 return dst;
416 }
417
418 extern void dst_init(void);
419
420 /* Flags for xfrm_lookup flags argument. */
421 enum {
422 XFRM_LOOKUP_ICMP = 1 << 0,
423 };
424
425 struct flowi;
426 #ifndef CONFIG_XFRM
427 static inline struct dst_entry *xfrm_lookup(struct net *net,
428 struct dst_entry *dst_orig,
429 const struct flowi *fl, struct sock *sk,
430 int flags)
431 {
432 return dst_orig;
433 }
434 #else
435 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
436 const struct flowi *fl, struct sock *sk,
437 int flags);
438 #endif
439
440 #endif /* _NET_DST_H */
This page took 0.074723 seconds and 6 git commands to generate.