1 #ifndef _NET_NEIGHBOUR_H
2 #define _NET_NEIGHBOUR_H
4 #include <linux/neighbour.h>
7 * Generic neighbour manipulation
10 * Pedro Roque <roque@di.fc.ul.pt>
11 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Harald Welte: <laforge@gnumonks.org>
16 * - Add neighbour cache statistics like rtstat
19 #include <linux/atomic.h>
20 #include <linux/netdevice.h>
21 #include <linux/skbuff.h>
22 #include <linux/rcupdate.h>
23 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/sysctl.h>
27 #include <linux/workqueue.h>
28 #include <net/rtnetlink.h>
31 * NUD stands for "neighbor unreachability detection"
34 #define NUD_IN_TIMER (NUD_INCOMPLETE|NUD_REACHABLE|NUD_DELAY|NUD_PROBE)
35 #define NUD_VALID (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE|NUD_PROBE|NUD_STALE|NUD_DELAY)
36 #define NUD_CONNECTED (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE)
41 NEIGH_VAR_MCAST_PROBES
,
42 NEIGH_VAR_UCAST_PROBES
,
44 NEIGH_VAR_RETRANS_TIME
,
45 NEIGH_VAR_BASE_REACHABLE_TIME
,
46 NEIGH_VAR_DELAY_PROBE_TIME
,
47 NEIGH_VAR_GC_STALETIME
,
48 NEIGH_VAR_QUEUE_LEN_BYTES
,
50 NEIGH_VAR_ANYCAST_DELAY
,
51 NEIGH_VAR_PROXY_DELAY
,
53 #define NEIGH_VAR_DATA_MAX (NEIGH_VAR_LOCKTIME + 1)
54 /* Following are used as a second way to access one of the above */
55 NEIGH_VAR_QUEUE_LEN
, /* same data as NEIGH_VAR_QUEUE_LEN_BYTES */
56 NEIGH_VAR_RETRANS_TIME_MS
, /* same data as NEIGH_VAR_RETRANS_TIME */
57 NEIGH_VAR_BASE_REACHABLE_TIME_MS
, /* same data as NEIGH_VAR_BASE_REACHABLE_TIME */
58 /* Following are used by "default" only */
59 NEIGH_VAR_GC_INTERVAL
,
70 struct net_device
*dev
;
71 struct neigh_parms
*next
;
72 int (*neigh_setup
)(struct neighbour
*);
73 void (*neigh_cleanup
)(struct neighbour
*);
74 struct neigh_table
*tbl
;
80 struct rcu_head rcu_head
;
83 int data
[NEIGH_VAR_DATA_MAX
];
86 static inline void neigh_var_set(struct neigh_parms
*p
, int index
, int val
)
91 #define NEIGH_VAR(p, attr) ((p)->data[NEIGH_VAR_ ## attr])
92 #define NEIGH_VAR_SET(p, attr, val) neigh_var_set(p, NEIGH_VAR_ ## attr, val)
94 struct neigh_statistics
{
95 unsigned long allocs
; /* number of allocated neighs */
96 unsigned long destroys
; /* number of destroyed neighs */
97 unsigned long hash_grows
; /* number of hash resizes */
99 unsigned long res_failed
; /* number of failed resolutions */
101 unsigned long lookups
; /* number of lookups */
102 unsigned long hits
; /* number of hits (among lookups) */
104 unsigned long rcv_probes_mcast
; /* number of received mcast ipv6 */
105 unsigned long rcv_probes_ucast
; /* number of received ucast ipv6 */
107 unsigned long periodic_gc_runs
; /* number of periodic GC runs */
108 unsigned long forced_gc_runs
; /* number of forced GC runs */
110 unsigned long unres_discards
; /* number of unresolved drops */
113 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
116 struct neighbour __rcu
*next
;
117 struct neigh_table
*tbl
;
118 struct neigh_parms
*parms
;
119 unsigned long confirmed
;
120 unsigned long updated
;
123 struct sk_buff_head arp_queue
;
124 unsigned int arp_queue_len_bytes
;
125 struct timer_list timer
;
133 unsigned char ha
[ALIGN(MAX_ADDR_LEN
, sizeof(unsigned long))];
135 int (*output
)(struct neighbour
*, struct sk_buff
*);
136 const struct neigh_ops
*ops
;
138 struct net_device
*dev
;
144 void (*solicit
)(struct neighbour
*, struct sk_buff
*);
145 void (*error_report
)(struct neighbour
*, struct sk_buff
*);
146 int (*output
)(struct neighbour
*, struct sk_buff
*);
147 int (*connected_output
)(struct neighbour
*, struct sk_buff
*);
150 struct pneigh_entry
{
151 struct pneigh_entry
*next
;
155 struct net_device
*dev
;
161 * neighbour table manipulation
164 #define NEIGH_NUM_HASH_RND 4
166 struct neigh_hash_table
{
167 struct neighbour __rcu
**hash_buckets
;
168 unsigned int hash_shift
;
169 __u32 hash_rnd
[NEIGH_NUM_HASH_RND
];
175 struct neigh_table
*next
;
179 __u32 (*hash
)(const void *pkey
,
180 const struct net_device
*dev
,
182 int (*constructor
)(struct neighbour
*);
183 int (*pconstructor
)(struct pneigh_entry
*);
184 void (*pdestructor
)(struct pneigh_entry
*);
185 void (*proxy_redo
)(struct sk_buff
*skb
);
187 struct neigh_parms parms
;
188 /* HACK. gc_* should follow parms without a gap! */
193 unsigned long last_flush
;
194 struct delayed_work gc_work
;
195 struct timer_list proxy_timer
;
196 struct sk_buff_head proxy_queue
;
199 unsigned long last_rand
;
200 struct neigh_statistics __percpu
*stats
;
201 struct neigh_hash_table __rcu
*nht
;
202 struct pneigh_entry
**phash_buckets
;
205 #define NEIGH_PRIV_ALIGN sizeof(long long)
206 #define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
208 static inline void *neighbour_priv(const struct neighbour
*n
)
210 return (char *)n
+ n
->tbl
->entry_size
;
213 /* flags for neigh_update() */
214 #define NEIGH_UPDATE_F_OVERRIDE 0x00000001
215 #define NEIGH_UPDATE_F_WEAK_OVERRIDE 0x00000002
216 #define NEIGH_UPDATE_F_OVERRIDE_ISROUTER 0x00000004
217 #define NEIGH_UPDATE_F_ISROUTER 0x40000000
218 #define NEIGH_UPDATE_F_ADMIN 0x80000000
220 void neigh_table_init(struct neigh_table
*tbl
);
221 int neigh_table_clear(struct neigh_table
*tbl
);
222 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
223 struct net_device
*dev
);
224 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
226 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
227 struct net_device
*dev
, bool want_ref
);
228 static inline struct neighbour
*neigh_create(struct neigh_table
*tbl
,
230 struct net_device
*dev
)
232 return __neigh_create(tbl
, pkey
, dev
, true);
234 void neigh_destroy(struct neighbour
*neigh
);
235 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
);
236 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new, u32 flags
);
237 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
);
238 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
239 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
);
240 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
);
241 int neigh_compat_output(struct neighbour
*neigh
, struct sk_buff
*skb
);
242 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
);
243 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
244 u8
*lladdr
, void *saddr
,
245 struct net_device
*dev
);
247 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
248 struct neigh_table
*tbl
);
249 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
);
252 struct net
*neigh_parms_net(const struct neigh_parms
*parms
)
254 return read_pnet(&parms
->net
);
257 unsigned long neigh_rand_reach_time(unsigned long base
);
259 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
260 struct sk_buff
*skb
);
261 struct pneigh_entry
*pneigh_lookup(struct neigh_table
*tbl
, struct net
*net
,
262 const void *key
, struct net_device
*dev
,
264 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
, struct net
*net
,
265 const void *key
, struct net_device
*dev
);
266 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *key
,
267 struct net_device
*dev
);
269 static inline struct net
*pneigh_net(const struct pneigh_entry
*pneigh
)
271 return read_pnet(&pneigh
->net
);
274 void neigh_app_ns(struct neighbour
*n
);
275 void neigh_for_each(struct neigh_table
*tbl
,
276 void (*cb
)(struct neighbour
*, void *), void *cookie
);
277 void __neigh_for_each_release(struct neigh_table
*tbl
,
278 int (*cb
)(struct neighbour
*));
279 void pneigh_for_each(struct neigh_table
*tbl
,
280 void (*cb
)(struct pneigh_entry
*));
282 struct neigh_seq_state
{
283 struct seq_net_private p
;
284 struct neigh_table
*tbl
;
285 struct neigh_hash_table
*nht
;
286 void *(*neigh_sub_iter
)(struct neigh_seq_state
*state
,
287 struct neighbour
*n
, loff_t
*pos
);
290 #define NEIGH_SEQ_NEIGH_ONLY 0x00000001
291 #define NEIGH_SEQ_IS_PNEIGH 0x00000002
292 #define NEIGH_SEQ_SKIP_NOARP 0x00000004
294 void *neigh_seq_start(struct seq_file
*, loff_t
*, struct neigh_table
*,
296 void *neigh_seq_next(struct seq_file
*, void *, loff_t
*);
297 void neigh_seq_stop(struct seq_file
*, void *);
299 int neigh_proc_dointvec(struct ctl_table
*ctl
, int write
,
300 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
);
301 int neigh_proc_dointvec_jiffies(struct ctl_table
*ctl
, int write
,
303 size_t *lenp
, loff_t
*ppos
);
304 int neigh_proc_dointvec_ms_jiffies(struct ctl_table
*ctl
, int write
,
306 size_t *lenp
, loff_t
*ppos
);
308 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
309 char *p_name
, proc_handler
*proc_handler
);
310 void neigh_sysctl_unregister(struct neigh_parms
*p
);
312 static inline void __neigh_parms_put(struct neigh_parms
*parms
)
314 atomic_dec(&parms
->refcnt
);
317 static inline struct neigh_parms
*neigh_parms_clone(struct neigh_parms
*parms
)
319 atomic_inc(&parms
->refcnt
);
324 * Neighbour references
327 static inline void neigh_release(struct neighbour
*neigh
)
329 if (atomic_dec_and_test(&neigh
->refcnt
))
330 neigh_destroy(neigh
);
333 static inline struct neighbour
* neigh_clone(struct neighbour
*neigh
)
336 atomic_inc(&neigh
->refcnt
);
340 #define neigh_hold(n) atomic_inc(&(n)->refcnt)
342 static inline int neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
344 unsigned long now
= jiffies
;
346 if (neigh
->used
!= now
)
348 if (!(neigh
->nud_state
&(NUD_CONNECTED
|NUD_DELAY
|NUD_PROBE
)))
349 return __neigh_event_send(neigh
, skb
);
353 #ifdef CONFIG_BRIDGE_NETFILTER
354 static inline int neigh_hh_bridge(struct hh_cache
*hh
, struct sk_buff
*skb
)
356 unsigned int seq
, hh_alen
;
359 seq
= read_seqbegin(&hh
->hh_lock
);
360 hh_alen
= HH_DATA_ALIGN(ETH_HLEN
);
361 memcpy(skb
->data
- hh_alen
, hh
->hh_data
, ETH_ALEN
+ hh_alen
- ETH_HLEN
);
362 } while (read_seqretry(&hh
->hh_lock
, seq
));
367 static inline int neigh_hh_output(const struct hh_cache
*hh
, struct sk_buff
*skb
)
373 seq
= read_seqbegin(&hh
->hh_lock
);
375 if (likely(hh_len
<= HH_DATA_MOD
)) {
376 /* this is inlined by gcc */
377 memcpy(skb
->data
- HH_DATA_MOD
, hh
->hh_data
, HH_DATA_MOD
);
379 int hh_alen
= HH_DATA_ALIGN(hh_len
);
381 memcpy(skb
->data
- hh_alen
, hh
->hh_data
, hh_alen
);
383 } while (read_seqretry(&hh
->hh_lock
, seq
));
385 skb_push(skb
, hh_len
);
386 return dev_queue_xmit(skb
);
389 static inline struct neighbour
*
390 __neigh_lookup(struct neigh_table
*tbl
, const void *pkey
, struct net_device
*dev
, int creat
)
392 struct neighbour
*n
= neigh_lookup(tbl
, pkey
, dev
);
397 n
= neigh_create(tbl
, pkey
, dev
);
398 return IS_ERR(n
) ? NULL
: n
;
401 static inline struct neighbour
*
402 __neigh_lookup_errno(struct neigh_table
*tbl
, const void *pkey
,
403 struct net_device
*dev
)
405 struct neighbour
*n
= neigh_lookup(tbl
, pkey
, dev
);
410 return neigh_create(tbl
, pkey
, dev
);
413 struct neighbour_cb
{
414 unsigned long sched_next
;
418 #define LOCALLY_ENQUEUED 0x1
420 #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
422 static inline void neigh_ha_snapshot(char *dst
, const struct neighbour
*n
,
423 const struct net_device
*dev
)
428 seq
= read_seqbegin(&n
->ha_lock
);
429 memcpy(dst
, n
->ha
, dev
->addr_len
);
430 } while (read_seqretry(&n
->ha_lock
, seq
));