2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #define PNEIGH_HASHMASK 0xF
58 static void neigh_timer_handler(unsigned long arg
);
59 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
);
60 static void neigh_update_notify(struct neighbour
*neigh
);
61 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
63 static struct neigh_table
*neigh_tables
;
65 static const struct file_operations neigh_stat_seq_fops
;
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
83 Reference count prevents destruction.
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
99 static DEFINE_RWLOCK(neigh_tbl_lock
);
101 static int neigh_blackhole(struct sk_buff
*skb
)
107 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
109 if (neigh
->parms
->neigh_cleanup
)
110 neigh
->parms
->neigh_cleanup(neigh
);
112 __neigh_notify(neigh
, RTM_DELNEIGH
, 0);
113 neigh_release(neigh
);
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
122 unsigned long neigh_rand_reach_time(unsigned long base
)
124 return (base
? (net_random() % base
) + (base
>> 1) : 0);
128 static int neigh_forced_gc(struct neigh_table
*tbl
)
133 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
135 write_lock_bh(&tbl
->lock
);
136 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
137 struct neighbour
*n
, **np
;
139 np
= &tbl
->hash_buckets
[i
];
140 while ((n
= *np
) != NULL
) {
141 /* Neighbour record may be discarded if:
142 * - nobody refers to it.
143 * - it is not permanent
145 write_lock(&n
->lock
);
146 if (atomic_read(&n
->refcnt
) == 1 &&
147 !(n
->nud_state
& NUD_PERMANENT
)) {
151 write_unlock(&n
->lock
);
152 neigh_cleanup_and_release(n
);
155 write_unlock(&n
->lock
);
160 tbl
->last_flush
= jiffies
;
162 write_unlock_bh(&tbl
->lock
);
167 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
170 if (unlikely(mod_timer(&n
->timer
, when
))) {
171 printk("NEIGH: BUG, double timer add, state is %x\n",
177 static int neigh_del_timer(struct neighbour
*n
)
179 if ((n
->nud_state
& NUD_IN_TIMER
) &&
180 del_timer(&n
->timer
)) {
187 static void pneigh_queue_purge(struct sk_buff_head
*list
)
191 while ((skb
= skb_dequeue(list
)) != NULL
) {
197 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
201 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
202 struct neighbour
*n
, **np
= &tbl
->hash_buckets
[i
];
204 while ((n
= *np
) != NULL
) {
205 if (dev
&& n
->dev
!= dev
) {
210 write_lock(&n
->lock
);
214 if (atomic_read(&n
->refcnt
) != 1) {
215 /* The most unpleasant situation.
216 We must destroy neighbour entry,
217 but someone still uses it.
219 The destroy will be delayed until
220 the last user releases us, but
221 we must kill timers etc. and move
224 skb_queue_purge(&n
->arp_queue
);
225 n
->output
= neigh_blackhole
;
226 if (n
->nud_state
& NUD_VALID
)
227 n
->nud_state
= NUD_NOARP
;
229 n
->nud_state
= NUD_NONE
;
230 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
232 write_unlock(&n
->lock
);
233 neigh_cleanup_and_release(n
);
238 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
240 write_lock_bh(&tbl
->lock
);
241 neigh_flush_dev(tbl
, dev
);
242 write_unlock_bh(&tbl
->lock
);
245 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
247 write_lock_bh(&tbl
->lock
);
248 neigh_flush_dev(tbl
, dev
);
249 pneigh_ifdown(tbl
, dev
);
250 write_unlock_bh(&tbl
->lock
);
252 del_timer_sync(&tbl
->proxy_timer
);
253 pneigh_queue_purge(&tbl
->proxy_queue
);
257 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
)
259 struct neighbour
*n
= NULL
;
260 unsigned long now
= jiffies
;
263 entries
= atomic_inc_return(&tbl
->entries
) - 1;
264 if (entries
>= tbl
->gc_thresh3
||
265 (entries
>= tbl
->gc_thresh2
&&
266 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
267 if (!neigh_forced_gc(tbl
) &&
268 entries
>= tbl
->gc_thresh3
)
272 n
= kmem_cache_zalloc(tbl
->kmem_cachep
, GFP_ATOMIC
);
276 skb_queue_head_init(&n
->arp_queue
);
277 rwlock_init(&n
->lock
);
278 n
->updated
= n
->used
= now
;
279 n
->nud_state
= NUD_NONE
;
280 n
->output
= neigh_blackhole
;
281 n
->parms
= neigh_parms_clone(&tbl
->parms
);
282 setup_timer(&n
->timer
, neigh_timer_handler
, (unsigned long)n
);
284 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
286 atomic_set(&n
->refcnt
, 1);
292 atomic_dec(&tbl
->entries
);
296 static struct neighbour
**neigh_hash_alloc(unsigned int entries
)
298 unsigned long size
= entries
* sizeof(struct neighbour
*);
299 struct neighbour
**ret
;
301 if (size
<= PAGE_SIZE
) {
302 ret
= kzalloc(size
, GFP_ATOMIC
);
304 ret
= (struct neighbour
**)
305 __get_free_pages(GFP_ATOMIC
|__GFP_ZERO
, get_order(size
));
310 static void neigh_hash_free(struct neighbour
**hash
, unsigned int entries
)
312 unsigned long size
= entries
* sizeof(struct neighbour
*);
314 if (size
<= PAGE_SIZE
)
317 free_pages((unsigned long)hash
, get_order(size
));
320 static void neigh_hash_grow(struct neigh_table
*tbl
, unsigned long new_entries
)
322 struct neighbour
**new_hash
, **old_hash
;
323 unsigned int i
, new_hash_mask
, old_entries
;
325 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
327 BUG_ON(!is_power_of_2(new_entries
));
328 new_hash
= neigh_hash_alloc(new_entries
);
332 old_entries
= tbl
->hash_mask
+ 1;
333 new_hash_mask
= new_entries
- 1;
334 old_hash
= tbl
->hash_buckets
;
336 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
337 for (i
= 0; i
< old_entries
; i
++) {
338 struct neighbour
*n
, *next
;
340 for (n
= old_hash
[i
]; n
; n
= next
) {
341 unsigned int hash_val
= tbl
->hash(n
->primary_key
, n
->dev
);
343 hash_val
&= new_hash_mask
;
346 n
->next
= new_hash
[hash_val
];
347 new_hash
[hash_val
] = n
;
350 tbl
->hash_buckets
= new_hash
;
351 tbl
->hash_mask
= new_hash_mask
;
353 neigh_hash_free(old_hash
, old_entries
);
356 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
357 struct net_device
*dev
)
360 int key_len
= tbl
->key_len
;
361 u32 hash_val
= tbl
->hash(pkey
, dev
);
363 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
365 read_lock_bh(&tbl
->lock
);
366 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
367 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
369 NEIGH_CACHE_STAT_INC(tbl
, hits
);
373 read_unlock_bh(&tbl
->lock
);
377 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
381 int key_len
= tbl
->key_len
;
382 u32 hash_val
= tbl
->hash(pkey
, NULL
);
384 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
386 read_lock_bh(&tbl
->lock
);
387 for (n
= tbl
->hash_buckets
[hash_val
& tbl
->hash_mask
]; n
; n
= n
->next
) {
388 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
389 (net
== n
->dev
->nd_net
)) {
391 NEIGH_CACHE_STAT_INC(tbl
, hits
);
395 read_unlock_bh(&tbl
->lock
);
399 struct neighbour
*neigh_create(struct neigh_table
*tbl
, const void *pkey
,
400 struct net_device
*dev
)
403 int key_len
= tbl
->key_len
;
405 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
);
408 rc
= ERR_PTR(-ENOBUFS
);
412 memcpy(n
->primary_key
, pkey
, key_len
);
416 /* Protocol specific setup. */
417 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
419 goto out_neigh_release
;
422 /* Device specific setup. */
423 if (n
->parms
->neigh_setup
&&
424 (error
= n
->parms
->neigh_setup(n
)) < 0) {
426 goto out_neigh_release
;
429 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
431 write_lock_bh(&tbl
->lock
);
433 if (atomic_read(&tbl
->entries
) > (tbl
->hash_mask
+ 1))
434 neigh_hash_grow(tbl
, (tbl
->hash_mask
+ 1) << 1);
436 hash_val
= tbl
->hash(pkey
, dev
) & tbl
->hash_mask
;
438 if (n
->parms
->dead
) {
439 rc
= ERR_PTR(-EINVAL
);
443 for (n1
= tbl
->hash_buckets
[hash_val
]; n1
; n1
= n1
->next
) {
444 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
451 n
->next
= tbl
->hash_buckets
[hash_val
];
452 tbl
->hash_buckets
[hash_val
] = n
;
455 write_unlock_bh(&tbl
->lock
);
456 NEIGH_PRINTK2("neigh %p is created.\n", n
);
461 write_unlock_bh(&tbl
->lock
);
467 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
468 struct net
*net
, const void *pkey
,
469 struct net_device
*dev
, int creat
)
471 struct pneigh_entry
*n
;
472 int key_len
= tbl
->key_len
;
473 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
475 hash_val
^= (hash_val
>> 16);
476 hash_val
^= hash_val
>> 8;
477 hash_val
^= hash_val
>> 4;
478 hash_val
&= PNEIGH_HASHMASK
;
480 read_lock_bh(&tbl
->lock
);
482 for (n
= tbl
->phash_buckets
[hash_val
]; n
; n
= n
->next
) {
483 if (!memcmp(n
->key
, pkey
, key_len
) &&
485 (n
->dev
== dev
|| !n
->dev
)) {
486 read_unlock_bh(&tbl
->lock
);
490 read_unlock_bh(&tbl
->lock
);
497 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
501 n
->net
= hold_net(net
);
502 memcpy(n
->key
, pkey
, key_len
);
507 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
515 write_lock_bh(&tbl
->lock
);
516 n
->next
= tbl
->phash_buckets
[hash_val
];
517 tbl
->phash_buckets
[hash_val
] = n
;
518 write_unlock_bh(&tbl
->lock
);
524 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
525 struct net_device
*dev
)
527 struct pneigh_entry
*n
, **np
;
528 int key_len
= tbl
->key_len
;
529 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
531 hash_val
^= (hash_val
>> 16);
532 hash_val
^= hash_val
>> 8;
533 hash_val
^= hash_val
>> 4;
534 hash_val
&= PNEIGH_HASHMASK
;
536 write_lock_bh(&tbl
->lock
);
537 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
539 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
542 write_unlock_bh(&tbl
->lock
);
543 if (tbl
->pdestructor
)
552 write_unlock_bh(&tbl
->lock
);
556 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
558 struct pneigh_entry
*n
, **np
;
561 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
562 np
= &tbl
->phash_buckets
[h
];
563 while ((n
= *np
) != NULL
) {
564 if (!dev
|| n
->dev
== dev
) {
566 if (tbl
->pdestructor
)
580 static void neigh_parms_destroy(struct neigh_parms
*parms
);
582 static inline void neigh_parms_put(struct neigh_parms
*parms
)
584 if (atomic_dec_and_test(&parms
->refcnt
))
585 neigh_parms_destroy(parms
);
589 * neighbour must already be out of the table;
592 void neigh_destroy(struct neighbour
*neigh
)
596 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
600 "Destroying alive neighbour %p\n", neigh
);
605 if (neigh_del_timer(neigh
))
606 printk(KERN_WARNING
"Impossible event.\n");
608 while ((hh
= neigh
->hh
) != NULL
) {
609 neigh
->hh
= hh
->hh_next
;
612 write_seqlock_bh(&hh
->hh_lock
);
613 hh
->hh_output
= neigh_blackhole
;
614 write_sequnlock_bh(&hh
->hh_lock
);
615 if (atomic_dec_and_test(&hh
->hh_refcnt
))
619 skb_queue_purge(&neigh
->arp_queue
);
622 neigh_parms_put(neigh
->parms
);
624 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
626 atomic_dec(&neigh
->tbl
->entries
);
627 kmem_cache_free(neigh
->tbl
->kmem_cachep
, neigh
);
630 /* Neighbour state is suspicious;
633 Called with write_locked neigh.
635 static void neigh_suspect(struct neighbour
*neigh
)
639 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
641 neigh
->output
= neigh
->ops
->output
;
643 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
644 hh
->hh_output
= neigh
->ops
->output
;
647 /* Neighbour state is OK;
650 Called with write_locked neigh.
652 static void neigh_connect(struct neighbour
*neigh
)
656 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
658 neigh
->output
= neigh
->ops
->connected_output
;
660 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
)
661 hh
->hh_output
= neigh
->ops
->hh_output
;
664 static void neigh_periodic_timer(unsigned long arg
)
666 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
667 struct neighbour
*n
, **np
;
668 unsigned long expire
, now
= jiffies
;
670 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
672 write_lock(&tbl
->lock
);
675 * periodically recompute ReachableTime from random function
678 if (time_after(now
, tbl
->last_rand
+ 300 * HZ
)) {
679 struct neigh_parms
*p
;
680 tbl
->last_rand
= now
;
681 for (p
= &tbl
->parms
; p
; p
= p
->next
)
683 neigh_rand_reach_time(p
->base_reachable_time
);
686 np
= &tbl
->hash_buckets
[tbl
->hash_chain_gc
];
687 tbl
->hash_chain_gc
= ((tbl
->hash_chain_gc
+ 1) & tbl
->hash_mask
);
689 while ((n
= *np
) != NULL
) {
692 write_lock(&n
->lock
);
694 state
= n
->nud_state
;
695 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
696 write_unlock(&n
->lock
);
700 if (time_before(n
->used
, n
->confirmed
))
701 n
->used
= n
->confirmed
;
703 if (atomic_read(&n
->refcnt
) == 1 &&
704 (state
== NUD_FAILED
||
705 time_after(now
, n
->used
+ n
->parms
->gc_staletime
))) {
708 write_unlock(&n
->lock
);
709 neigh_cleanup_and_release(n
);
712 write_unlock(&n
->lock
);
718 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
719 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
720 * base_reachable_time.
722 expire
= tbl
->parms
.base_reachable_time
>> 1;
723 expire
/= (tbl
->hash_mask
+ 1);
728 mod_timer(&tbl
->gc_timer
, round_jiffies(now
+ expire
));
730 mod_timer(&tbl
->gc_timer
, now
+ expire
);
732 write_unlock(&tbl
->lock
);
735 static __inline__
int neigh_max_probes(struct neighbour
*n
)
737 struct neigh_parms
*p
= n
->parms
;
738 return (n
->nud_state
& NUD_PROBE
?
740 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
);
743 /* Called when a timer expires for a neighbour entry. */
745 static void neigh_timer_handler(unsigned long arg
)
747 unsigned long now
, next
;
748 struct neighbour
*neigh
= (struct neighbour
*)arg
;
752 write_lock(&neigh
->lock
);
754 state
= neigh
->nud_state
;
758 if (!(state
& NUD_IN_TIMER
)) {
760 printk(KERN_WARNING
"neigh: timer & !nud_in_timer\n");
765 if (state
& NUD_REACHABLE
) {
766 if (time_before_eq(now
,
767 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
768 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
769 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
770 } else if (time_before_eq(now
,
771 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
772 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
773 neigh
->nud_state
= NUD_DELAY
;
774 neigh
->updated
= jiffies
;
775 neigh_suspect(neigh
);
776 next
= now
+ neigh
->parms
->delay_probe_time
;
778 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
779 neigh
->nud_state
= NUD_STALE
;
780 neigh
->updated
= jiffies
;
781 neigh_suspect(neigh
);
784 } else if (state
& NUD_DELAY
) {
785 if (time_before_eq(now
,
786 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
787 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
788 neigh
->nud_state
= NUD_REACHABLE
;
789 neigh
->updated
= jiffies
;
790 neigh_connect(neigh
);
792 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
794 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
795 neigh
->nud_state
= NUD_PROBE
;
796 neigh
->updated
= jiffies
;
797 atomic_set(&neigh
->probes
, 0);
798 next
= now
+ neigh
->parms
->retrans_time
;
801 /* NUD_PROBE|NUD_INCOMPLETE */
802 next
= now
+ neigh
->parms
->retrans_time
;
805 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
806 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
809 neigh
->nud_state
= NUD_FAILED
;
810 neigh
->updated
= jiffies
;
812 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
813 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
815 /* It is very thin place. report_unreachable is very complicated
816 routine. Particularly, it can hit the same neighbour entry!
818 So that, we try to be accurate and avoid dead loop. --ANK
820 while (neigh
->nud_state
== NUD_FAILED
&&
821 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
822 write_unlock(&neigh
->lock
);
823 neigh
->ops
->error_report(neigh
, skb
);
824 write_lock(&neigh
->lock
);
826 skb_queue_purge(&neigh
->arp_queue
);
829 if (neigh
->nud_state
& NUD_IN_TIMER
) {
830 if (time_before(next
, jiffies
+ HZ
/2))
831 next
= jiffies
+ HZ
/2;
832 if (!mod_timer(&neigh
->timer
, next
))
835 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
836 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
838 neigh
->ops
->solicit(neigh
, skb
);
839 atomic_inc(&neigh
->probes
);
842 write_unlock(&neigh
->lock
);
845 neigh_update_notify(neigh
);
847 neigh_release(neigh
);
850 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
855 write_lock_bh(&neigh
->lock
);
858 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
863 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
864 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
865 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
866 neigh
->nud_state
= NUD_INCOMPLETE
;
867 neigh
->updated
= jiffies
;
868 neigh_add_timer(neigh
, now
+ 1);
870 neigh
->nud_state
= NUD_FAILED
;
871 neigh
->updated
= jiffies
;
872 write_unlock_bh(&neigh
->lock
);
878 } else if (neigh
->nud_state
& NUD_STALE
) {
879 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
880 neigh
->nud_state
= NUD_DELAY
;
881 neigh
->updated
= jiffies
;
882 neigh_add_timer(neigh
,
883 jiffies
+ neigh
->parms
->delay_probe_time
);
886 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
888 if (skb_queue_len(&neigh
->arp_queue
) >=
889 neigh
->parms
->queue_len
) {
890 struct sk_buff
*buff
;
891 buff
= neigh
->arp_queue
.next
;
892 __skb_unlink(buff
, &neigh
->arp_queue
);
895 __skb_queue_tail(&neigh
->arp_queue
, skb
);
900 write_unlock_bh(&neigh
->lock
);
904 static void neigh_update_hhs(struct neighbour
*neigh
)
907 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
908 = neigh
->dev
->header_ops
->cache_update
;
911 for (hh
= neigh
->hh
; hh
; hh
= hh
->hh_next
) {
912 write_seqlock_bh(&hh
->hh_lock
);
913 update(hh
, neigh
->dev
, neigh
->ha
);
914 write_sequnlock_bh(&hh
->hh_lock
);
921 /* Generic update routine.
922 -- lladdr is new lladdr or NULL, if it is not supplied.
925 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
927 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
928 lladdr instead of overriding it
930 It also allows to retain current state
931 if lladdr is unchanged.
932 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
934 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
936 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
939 Caller MUST hold reference count on the entry.
942 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
948 struct net_device
*dev
;
949 int update_isrouter
= 0;
951 write_lock_bh(&neigh
->lock
);
954 old
= neigh
->nud_state
;
957 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
958 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
961 if (!(new & NUD_VALID
)) {
962 neigh_del_timer(neigh
);
963 if (old
& NUD_CONNECTED
)
964 neigh_suspect(neigh
);
965 neigh
->nud_state
= new;
967 notify
= old
& NUD_VALID
;
971 /* Compare new lladdr with cached one */
972 if (!dev
->addr_len
) {
973 /* First case: device needs no address. */
976 /* The second case: if something is already cached
977 and a new address is proposed:
979 - if they are different, check override flag
981 if ((old
& NUD_VALID
) &&
982 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
985 /* No address is supplied; if we know something,
986 use it, otherwise discard the request.
989 if (!(old
& NUD_VALID
))
994 if (new & NUD_CONNECTED
)
995 neigh
->confirmed
= jiffies
;
996 neigh
->updated
= jiffies
;
998 /* If entry was valid and address is not changed,
999 do not change entry state, if new one is STALE.
1002 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1003 if (old
& NUD_VALID
) {
1004 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1005 update_isrouter
= 0;
1006 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1007 (old
& NUD_CONNECTED
)) {
1013 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1014 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1015 (old
& NUD_CONNECTED
))
1022 neigh_del_timer(neigh
);
1023 if (new & NUD_IN_TIMER
)
1024 neigh_add_timer(neigh
, (jiffies
+
1025 ((new & NUD_REACHABLE
) ?
1026 neigh
->parms
->reachable_time
:
1028 neigh
->nud_state
= new;
1031 if (lladdr
!= neigh
->ha
) {
1032 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1033 neigh_update_hhs(neigh
);
1034 if (!(new & NUD_CONNECTED
))
1035 neigh
->confirmed
= jiffies
-
1036 (neigh
->parms
->base_reachable_time
<< 1);
1041 if (new & NUD_CONNECTED
)
1042 neigh_connect(neigh
);
1044 neigh_suspect(neigh
);
1045 if (!(old
& NUD_VALID
)) {
1046 struct sk_buff
*skb
;
1048 /* Again: avoid dead loop if something went wrong */
1050 while (neigh
->nud_state
& NUD_VALID
&&
1051 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1052 struct neighbour
*n1
= neigh
;
1053 write_unlock_bh(&neigh
->lock
);
1054 /* On shaper/eql skb->dst->neighbour != neigh :( */
1055 if (skb
->dst
&& skb
->dst
->neighbour
)
1056 n1
= skb
->dst
->neighbour
;
1058 write_lock_bh(&neigh
->lock
);
1060 skb_queue_purge(&neigh
->arp_queue
);
1063 if (update_isrouter
) {
1064 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1065 (neigh
->flags
| NTF_ROUTER
) :
1066 (neigh
->flags
& ~NTF_ROUTER
);
1068 write_unlock_bh(&neigh
->lock
);
1071 neigh_update_notify(neigh
);
1076 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1077 u8
*lladdr
, void *saddr
,
1078 struct net_device
*dev
)
1080 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1081 lladdr
|| !dev
->addr_len
);
1083 neigh_update(neigh
, lladdr
, NUD_STALE
,
1084 NEIGH_UPDATE_F_OVERRIDE
);
1088 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
,
1091 struct hh_cache
*hh
;
1092 struct net_device
*dev
= dst
->dev
;
1094 for (hh
= n
->hh
; hh
; hh
= hh
->hh_next
)
1095 if (hh
->hh_type
== protocol
)
1098 if (!hh
&& (hh
= kzalloc(sizeof(*hh
), GFP_ATOMIC
)) != NULL
) {
1099 seqlock_init(&hh
->hh_lock
);
1100 hh
->hh_type
= protocol
;
1101 atomic_set(&hh
->hh_refcnt
, 0);
1104 if (dev
->header_ops
->cache(n
, hh
)) {
1108 atomic_inc(&hh
->hh_refcnt
);
1109 hh
->hh_next
= n
->hh
;
1111 if (n
->nud_state
& NUD_CONNECTED
)
1112 hh
->hh_output
= n
->ops
->hh_output
;
1114 hh
->hh_output
= n
->ops
->output
;
1118 atomic_inc(&hh
->hh_refcnt
);
1123 /* This function can be used in contexts, where only old dev_queue_xmit
1124 worked, f.e. if you want to override normal output path (eql, shaper),
1125 but resolution is not made yet.
1128 int neigh_compat_output(struct sk_buff
*skb
)
1130 struct net_device
*dev
= skb
->dev
;
1132 __skb_pull(skb
, skb_network_offset(skb
));
1134 if (dev_hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1136 dev
->header_ops
->rebuild(skb
))
1139 return dev_queue_xmit(skb
);
1142 /* Slow and careful. */
1144 int neigh_resolve_output(struct sk_buff
*skb
)
1146 struct dst_entry
*dst
= skb
->dst
;
1147 struct neighbour
*neigh
;
1150 if (!dst
|| !(neigh
= dst
->neighbour
))
1153 __skb_pull(skb
, skb_network_offset(skb
));
1155 if (!neigh_event_send(neigh
, skb
)) {
1157 struct net_device
*dev
= neigh
->dev
;
1158 if (dev
->header_ops
->cache
&& !dst
->hh
) {
1159 write_lock_bh(&neigh
->lock
);
1161 neigh_hh_init(neigh
, dst
, dst
->ops
->protocol
);
1162 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1163 neigh
->ha
, NULL
, skb
->len
);
1164 write_unlock_bh(&neigh
->lock
);
1166 read_lock_bh(&neigh
->lock
);
1167 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1168 neigh
->ha
, NULL
, skb
->len
);
1169 read_unlock_bh(&neigh
->lock
);
1172 rc
= neigh
->ops
->queue_xmit(skb
);
1179 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1180 dst
, dst
? dst
->neighbour
: NULL
);
1187 /* As fast as possible without hh cache */
1189 int neigh_connected_output(struct sk_buff
*skb
)
1192 struct dst_entry
*dst
= skb
->dst
;
1193 struct neighbour
*neigh
= dst
->neighbour
;
1194 struct net_device
*dev
= neigh
->dev
;
1196 __skb_pull(skb
, skb_network_offset(skb
));
1198 read_lock_bh(&neigh
->lock
);
1199 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1200 neigh
->ha
, NULL
, skb
->len
);
1201 read_unlock_bh(&neigh
->lock
);
1203 err
= neigh
->ops
->queue_xmit(skb
);
1211 static void neigh_proxy_process(unsigned long arg
)
1213 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1214 long sched_next
= 0;
1215 unsigned long now
= jiffies
;
1216 struct sk_buff
*skb
;
1218 spin_lock(&tbl
->proxy_queue
.lock
);
1220 skb
= tbl
->proxy_queue
.next
;
1222 while (skb
!= (struct sk_buff
*)&tbl
->proxy_queue
) {
1223 struct sk_buff
*back
= skb
;
1224 long tdif
= NEIGH_CB(back
)->sched_next
- now
;
1228 struct net_device
*dev
= back
->dev
;
1229 __skb_unlink(back
, &tbl
->proxy_queue
);
1230 if (tbl
->proxy_redo
&& netif_running(dev
))
1231 tbl
->proxy_redo(back
);
1236 } else if (!sched_next
|| tdif
< sched_next
)
1239 del_timer(&tbl
->proxy_timer
);
1241 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1242 spin_unlock(&tbl
->proxy_queue
.lock
);
1245 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1246 struct sk_buff
*skb
)
1248 unsigned long now
= jiffies
;
1249 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1251 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1256 NEIGH_CB(skb
)->sched_next
= sched_next
;
1257 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1259 spin_lock(&tbl
->proxy_queue
.lock
);
1260 if (del_timer(&tbl
->proxy_timer
)) {
1261 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1262 sched_next
= tbl
->proxy_timer
.expires
;
1264 dst_release(skb
->dst
);
1267 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1268 mod_timer(&tbl
->proxy_timer
, sched_next
);
1269 spin_unlock(&tbl
->proxy_queue
.lock
);
1272 static inline struct neigh_parms
*lookup_neigh_params(struct neigh_table
*tbl
,
1273 struct net
*net
, int ifindex
)
1275 struct neigh_parms
*p
;
1277 for (p
= &tbl
->parms
; p
; p
= p
->next
) {
1280 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
) ||
1281 (!p
->dev
&& !ifindex
))
1288 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1289 struct neigh_table
*tbl
)
1291 struct neigh_parms
*p
, *ref
;
1295 ref
= lookup_neigh_params(tbl
, net
, 0);
1299 p
= kmemdup(ref
, sizeof(*p
), GFP_KERNEL
);
1302 atomic_set(&p
->refcnt
, 1);
1303 INIT_RCU_HEAD(&p
->rcu_head
);
1305 neigh_rand_reach_time(p
->base_reachable_time
);
1307 if (dev
->neigh_setup
&& dev
->neigh_setup(dev
, p
)) {
1314 p
->net
= hold_net(net
);
1315 p
->sysctl_table
= NULL
;
1316 write_lock_bh(&tbl
->lock
);
1317 p
->next
= tbl
->parms
.next
;
1318 tbl
->parms
.next
= p
;
1319 write_unlock_bh(&tbl
->lock
);
1324 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1326 struct neigh_parms
*parms
=
1327 container_of(head
, struct neigh_parms
, rcu_head
);
1329 neigh_parms_put(parms
);
1332 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1334 struct neigh_parms
**p
;
1336 if (!parms
|| parms
== &tbl
->parms
)
1338 write_lock_bh(&tbl
->lock
);
1339 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1343 write_unlock_bh(&tbl
->lock
);
1345 dev_put(parms
->dev
);
1346 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1350 write_unlock_bh(&tbl
->lock
);
1351 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1354 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1356 release_net(parms
->net
);
1360 static struct lock_class_key neigh_table_proxy_queue_class
;
1362 void neigh_table_init_no_netlink(struct neigh_table
*tbl
)
1364 unsigned long now
= jiffies
;
1365 unsigned long phsize
;
1367 tbl
->parms
.net
= &init_net
;
1368 atomic_set(&tbl
->parms
.refcnt
, 1);
1369 INIT_RCU_HEAD(&tbl
->parms
.rcu_head
);
1370 tbl
->parms
.reachable_time
=
1371 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1373 if (!tbl
->kmem_cachep
)
1375 kmem_cache_create(tbl
->id
, tbl
->entry_size
, 0,
1376 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
1378 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1380 panic("cannot create neighbour cache statistics");
1382 #ifdef CONFIG_PROC_FS
1383 tbl
->pde
= create_proc_entry(tbl
->id
, 0, init_net
.proc_net_stat
);
1385 panic("cannot create neighbour proc dir entry");
1386 tbl
->pde
->proc_fops
= &neigh_stat_seq_fops
;
1387 tbl
->pde
->data
= tbl
;
1391 tbl
->hash_buckets
= neigh_hash_alloc(tbl
->hash_mask
+ 1);
1393 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1394 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1396 if (!tbl
->hash_buckets
|| !tbl
->phash_buckets
)
1397 panic("cannot allocate neighbour cache hashes");
1399 get_random_bytes(&tbl
->hash_rnd
, sizeof(tbl
->hash_rnd
));
1401 rwlock_init(&tbl
->lock
);
1402 setup_timer(&tbl
->gc_timer
, neigh_periodic_timer
, (unsigned long)tbl
);
1403 tbl
->gc_timer
.expires
= now
+ 1;
1404 add_timer(&tbl
->gc_timer
);
1406 setup_timer(&tbl
->proxy_timer
, neigh_proxy_process
, (unsigned long)tbl
);
1407 skb_queue_head_init_class(&tbl
->proxy_queue
,
1408 &neigh_table_proxy_queue_class
);
1410 tbl
->last_flush
= now
;
1411 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1414 void neigh_table_init(struct neigh_table
*tbl
)
1416 struct neigh_table
*tmp
;
1418 neigh_table_init_no_netlink(tbl
);
1419 write_lock(&neigh_tbl_lock
);
1420 for (tmp
= neigh_tables
; tmp
; tmp
= tmp
->next
) {
1421 if (tmp
->family
== tbl
->family
)
1424 tbl
->next
= neigh_tables
;
1426 write_unlock(&neigh_tbl_lock
);
1428 if (unlikely(tmp
)) {
1429 printk(KERN_ERR
"NEIGH: Registering multiple tables for "
1430 "family %d\n", tbl
->family
);
1435 int neigh_table_clear(struct neigh_table
*tbl
)
1437 struct neigh_table
**tp
;
1439 /* It is not clean... Fix it to unload IPv6 module safely */
1440 del_timer_sync(&tbl
->gc_timer
);
1441 del_timer_sync(&tbl
->proxy_timer
);
1442 pneigh_queue_purge(&tbl
->proxy_queue
);
1443 neigh_ifdown(tbl
, NULL
);
1444 if (atomic_read(&tbl
->entries
))
1445 printk(KERN_CRIT
"neighbour leakage\n");
1446 write_lock(&neigh_tbl_lock
);
1447 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1453 write_unlock(&neigh_tbl_lock
);
1455 neigh_hash_free(tbl
->hash_buckets
, tbl
->hash_mask
+ 1);
1456 tbl
->hash_buckets
= NULL
;
1458 kfree(tbl
->phash_buckets
);
1459 tbl
->phash_buckets
= NULL
;
1461 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1463 free_percpu(tbl
->stats
);
1466 kmem_cache_destroy(tbl
->kmem_cachep
);
1467 tbl
->kmem_cachep
= NULL
;
1472 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1474 struct net
*net
= skb
->sk
->sk_net
;
1476 struct nlattr
*dst_attr
;
1477 struct neigh_table
*tbl
;
1478 struct net_device
*dev
= NULL
;
1481 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1484 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1485 if (dst_attr
== NULL
)
1488 ndm
= nlmsg_data(nlh
);
1489 if (ndm
->ndm_ifindex
) {
1490 dev
= dev_get_by_index(net
, ndm
->ndm_ifindex
);
1497 read_lock(&neigh_tbl_lock
);
1498 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1499 struct neighbour
*neigh
;
1501 if (tbl
->family
!= ndm
->ndm_family
)
1503 read_unlock(&neigh_tbl_lock
);
1505 if (nla_len(dst_attr
) < tbl
->key_len
)
1508 if (ndm
->ndm_flags
& NTF_PROXY
) {
1509 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1516 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1517 if (neigh
== NULL
) {
1522 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1523 NEIGH_UPDATE_F_OVERRIDE
|
1524 NEIGH_UPDATE_F_ADMIN
);
1525 neigh_release(neigh
);
1528 read_unlock(&neigh_tbl_lock
);
1529 err
= -EAFNOSUPPORT
;
1538 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1540 struct net
*net
= skb
->sk
->sk_net
;
1542 struct nlattr
*tb
[NDA_MAX
+1];
1543 struct neigh_table
*tbl
;
1544 struct net_device
*dev
= NULL
;
1547 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1552 if (tb
[NDA_DST
] == NULL
)
1555 ndm
= nlmsg_data(nlh
);
1556 if (ndm
->ndm_ifindex
) {
1557 dev
= dev_get_by_index(net
, ndm
->ndm_ifindex
);
1563 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1567 read_lock(&neigh_tbl_lock
);
1568 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1569 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1570 struct neighbour
*neigh
;
1573 if (tbl
->family
!= ndm
->ndm_family
)
1575 read_unlock(&neigh_tbl_lock
);
1577 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1579 dst
= nla_data(tb
[NDA_DST
]);
1580 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1582 if (ndm
->ndm_flags
& NTF_PROXY
) {
1583 struct pneigh_entry
*pn
;
1586 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1588 pn
->flags
= ndm
->ndm_flags
;
1597 neigh
= neigh_lookup(tbl
, dst
, dev
);
1598 if (neigh
== NULL
) {
1599 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1604 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1605 if (IS_ERR(neigh
)) {
1606 err
= PTR_ERR(neigh
);
1610 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1612 neigh_release(neigh
);
1616 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1617 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1620 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1621 neigh_release(neigh
);
1625 read_unlock(&neigh_tbl_lock
);
1626 err
= -EAFNOSUPPORT
;
1635 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1637 struct nlattr
*nest
;
1639 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1644 NLA_PUT_U32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
);
1646 NLA_PUT_U32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
));
1647 NLA_PUT_U32(skb
, NDTPA_QUEUE_LEN
, parms
->queue_len
);
1648 NLA_PUT_U32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
);
1649 NLA_PUT_U32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
);
1650 NLA_PUT_U32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
);
1651 NLA_PUT_U32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
);
1652 NLA_PUT_MSECS(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
);
1653 NLA_PUT_MSECS(skb
, NDTPA_BASE_REACHABLE_TIME
,
1654 parms
->base_reachable_time
);
1655 NLA_PUT_MSECS(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
);
1656 NLA_PUT_MSECS(skb
, NDTPA_DELAY_PROBE_TIME
, parms
->delay_probe_time
);
1657 NLA_PUT_MSECS(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
);
1658 NLA_PUT_MSECS(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
);
1659 NLA_PUT_MSECS(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
);
1660 NLA_PUT_MSECS(skb
, NDTPA_LOCKTIME
, parms
->locktime
);
1662 return nla_nest_end(skb
, nest
);
1665 return nla_nest_cancel(skb
, nest
);
1668 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1669 u32 pid
, u32 seq
, int type
, int flags
)
1671 struct nlmsghdr
*nlh
;
1672 struct ndtmsg
*ndtmsg
;
1674 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1678 ndtmsg
= nlmsg_data(nlh
);
1680 read_lock_bh(&tbl
->lock
);
1681 ndtmsg
->ndtm_family
= tbl
->family
;
1682 ndtmsg
->ndtm_pad1
= 0;
1683 ndtmsg
->ndtm_pad2
= 0;
1685 NLA_PUT_STRING(skb
, NDTA_NAME
, tbl
->id
);
1686 NLA_PUT_MSECS(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
);
1687 NLA_PUT_U32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
);
1688 NLA_PUT_U32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
);
1689 NLA_PUT_U32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
);
1692 unsigned long now
= jiffies
;
1693 unsigned int flush_delta
= now
- tbl
->last_flush
;
1694 unsigned int rand_delta
= now
- tbl
->last_rand
;
1696 struct ndt_config ndc
= {
1697 .ndtc_key_len
= tbl
->key_len
,
1698 .ndtc_entry_size
= tbl
->entry_size
,
1699 .ndtc_entries
= atomic_read(&tbl
->entries
),
1700 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1701 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1702 .ndtc_hash_rnd
= tbl
->hash_rnd
,
1703 .ndtc_hash_mask
= tbl
->hash_mask
,
1704 .ndtc_hash_chain_gc
= tbl
->hash_chain_gc
,
1705 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1708 NLA_PUT(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
);
1713 struct ndt_stats ndst
;
1715 memset(&ndst
, 0, sizeof(ndst
));
1717 for_each_possible_cpu(cpu
) {
1718 struct neigh_statistics
*st
;
1720 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1721 ndst
.ndts_allocs
+= st
->allocs
;
1722 ndst
.ndts_destroys
+= st
->destroys
;
1723 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1724 ndst
.ndts_res_failed
+= st
->res_failed
;
1725 ndst
.ndts_lookups
+= st
->lookups
;
1726 ndst
.ndts_hits
+= st
->hits
;
1727 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1728 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1729 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1730 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1733 NLA_PUT(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
);
1736 BUG_ON(tbl
->parms
.dev
);
1737 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1738 goto nla_put_failure
;
1740 read_unlock_bh(&tbl
->lock
);
1741 return nlmsg_end(skb
, nlh
);
1744 read_unlock_bh(&tbl
->lock
);
1745 nlmsg_cancel(skb
, nlh
);
1749 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1750 struct neigh_table
*tbl
,
1751 struct neigh_parms
*parms
,
1752 u32 pid
, u32 seq
, int type
,
1755 struct ndtmsg
*ndtmsg
;
1756 struct nlmsghdr
*nlh
;
1758 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1762 ndtmsg
= nlmsg_data(nlh
);
1764 read_lock_bh(&tbl
->lock
);
1765 ndtmsg
->ndtm_family
= tbl
->family
;
1766 ndtmsg
->ndtm_pad1
= 0;
1767 ndtmsg
->ndtm_pad2
= 0;
1769 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1770 neightbl_fill_parms(skb
, parms
) < 0)
1773 read_unlock_bh(&tbl
->lock
);
1774 return nlmsg_end(skb
, nlh
);
1776 read_unlock_bh(&tbl
->lock
);
1777 nlmsg_cancel(skb
, nlh
);
1781 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1782 [NDTA_NAME
] = { .type
= NLA_STRING
},
1783 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1784 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1785 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1786 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1787 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1790 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1791 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1792 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1793 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1794 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1795 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1796 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1797 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1798 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1799 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1800 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1801 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1802 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1803 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1806 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1808 struct net
*net
= skb
->sk
->sk_net
;
1809 struct neigh_table
*tbl
;
1810 struct ndtmsg
*ndtmsg
;
1811 struct nlattr
*tb
[NDTA_MAX
+1];
1814 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1815 nl_neightbl_policy
);
1819 if (tb
[NDTA_NAME
] == NULL
) {
1824 ndtmsg
= nlmsg_data(nlh
);
1825 read_lock(&neigh_tbl_lock
);
1826 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1827 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1830 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0)
1840 * We acquire tbl->lock to be nice to the periodic timers and
1841 * make sure they always see a consistent set of values.
1843 write_lock_bh(&tbl
->lock
);
1845 if (tb
[NDTA_PARMS
]) {
1846 struct nlattr
*tbp
[NDTPA_MAX
+1];
1847 struct neigh_parms
*p
;
1850 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1851 nl_ntbl_parm_policy
);
1853 goto errout_tbl_lock
;
1855 if (tbp
[NDTPA_IFINDEX
])
1856 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1858 p
= lookup_neigh_params(tbl
, net
, ifindex
);
1861 goto errout_tbl_lock
;
1864 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
1869 case NDTPA_QUEUE_LEN
:
1870 p
->queue_len
= nla_get_u32(tbp
[i
]);
1872 case NDTPA_PROXY_QLEN
:
1873 p
->proxy_qlen
= nla_get_u32(tbp
[i
]);
1875 case NDTPA_APP_PROBES
:
1876 p
->app_probes
= nla_get_u32(tbp
[i
]);
1878 case NDTPA_UCAST_PROBES
:
1879 p
->ucast_probes
= nla_get_u32(tbp
[i
]);
1881 case NDTPA_MCAST_PROBES
:
1882 p
->mcast_probes
= nla_get_u32(tbp
[i
]);
1884 case NDTPA_BASE_REACHABLE_TIME
:
1885 p
->base_reachable_time
= nla_get_msecs(tbp
[i
]);
1887 case NDTPA_GC_STALETIME
:
1888 p
->gc_staletime
= nla_get_msecs(tbp
[i
]);
1890 case NDTPA_DELAY_PROBE_TIME
:
1891 p
->delay_probe_time
= nla_get_msecs(tbp
[i
]);
1893 case NDTPA_RETRANS_TIME
:
1894 p
->retrans_time
= nla_get_msecs(tbp
[i
]);
1896 case NDTPA_ANYCAST_DELAY
:
1897 p
->anycast_delay
= nla_get_msecs(tbp
[i
]);
1899 case NDTPA_PROXY_DELAY
:
1900 p
->proxy_delay
= nla_get_msecs(tbp
[i
]);
1902 case NDTPA_LOCKTIME
:
1903 p
->locktime
= nla_get_msecs(tbp
[i
]);
1909 if (tb
[NDTA_THRESH1
])
1910 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
1912 if (tb
[NDTA_THRESH2
])
1913 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
1915 if (tb
[NDTA_THRESH3
])
1916 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
1918 if (tb
[NDTA_GC_INTERVAL
])
1919 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
1924 write_unlock_bh(&tbl
->lock
);
1926 read_unlock(&neigh_tbl_lock
);
1931 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1933 struct net
*net
= skb
->sk
->sk_net
;
1934 int family
, tidx
, nidx
= 0;
1935 int tbl_skip
= cb
->args
[0];
1936 int neigh_skip
= cb
->args
[1];
1937 struct neigh_table
*tbl
;
1939 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
1941 read_lock(&neigh_tbl_lock
);
1942 for (tbl
= neigh_tables
, tidx
= 0; tbl
; tbl
= tbl
->next
, tidx
++) {
1943 struct neigh_parms
*p
;
1945 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
1948 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).pid
,
1949 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
1953 for (nidx
= 0, p
= tbl
->parms
.next
; p
; p
= p
->next
) {
1957 if (nidx
++ < neigh_skip
)
1960 if (neightbl_fill_param_info(skb
, tbl
, p
,
1961 NETLINK_CB(cb
->skb
).pid
,
1971 read_unlock(&neigh_tbl_lock
);
1978 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
1979 u32 pid
, u32 seq
, int type
, unsigned int flags
)
1981 unsigned long now
= jiffies
;
1982 struct nda_cacheinfo ci
;
1983 struct nlmsghdr
*nlh
;
1986 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
1990 ndm
= nlmsg_data(nlh
);
1991 ndm
->ndm_family
= neigh
->ops
->family
;
1994 ndm
->ndm_flags
= neigh
->flags
;
1995 ndm
->ndm_type
= neigh
->type
;
1996 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
1998 NLA_PUT(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
);
2000 read_lock_bh(&neigh
->lock
);
2001 ndm
->ndm_state
= neigh
->nud_state
;
2002 if ((neigh
->nud_state
& NUD_VALID
) &&
2003 nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, neigh
->ha
) < 0) {
2004 read_unlock_bh(&neigh
->lock
);
2005 goto nla_put_failure
;
2008 ci
.ndm_used
= now
- neigh
->used
;
2009 ci
.ndm_confirmed
= now
- neigh
->confirmed
;
2010 ci
.ndm_updated
= now
- neigh
->updated
;
2011 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
2012 read_unlock_bh(&neigh
->lock
);
2014 NLA_PUT_U32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
));
2015 NLA_PUT(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
);
2017 return nlmsg_end(skb
, nlh
);
2020 nlmsg_cancel(skb
, nlh
);
2024 static void neigh_update_notify(struct neighbour
*neigh
)
2026 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2027 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0);
2030 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2031 struct netlink_callback
*cb
)
2033 struct net
* net
= skb
->sk
->sk_net
;
2034 struct neighbour
*n
;
2035 int rc
, h
, s_h
= cb
->args
[1];
2036 int idx
, s_idx
= idx
= cb
->args
[2];
2038 read_lock_bh(&tbl
->lock
);
2039 for (h
= 0; h
<= tbl
->hash_mask
; h
++) {
2044 for (n
= tbl
->hash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2046 if (n
->dev
->nd_net
!= net
)
2051 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
2054 NLM_F_MULTI
) <= 0) {
2055 read_unlock_bh(&tbl
->lock
);
2061 read_unlock_bh(&tbl
->lock
);
2069 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2071 struct neigh_table
*tbl
;
2074 read_lock(&neigh_tbl_lock
);
2075 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2078 for (tbl
= neigh_tables
, t
= 0; tbl
; tbl
= tbl
->next
, t
++) {
2079 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2082 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2083 sizeof(cb
->args
[0]));
2084 if (neigh_dump_table(tbl
, skb
, cb
) < 0)
2087 read_unlock(&neigh_tbl_lock
);
2093 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2097 read_lock_bh(&tbl
->lock
);
2098 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2099 struct neighbour
*n
;
2101 for (n
= tbl
->hash_buckets
[chain
]; n
; n
= n
->next
)
2104 read_unlock_bh(&tbl
->lock
);
2106 EXPORT_SYMBOL(neigh_for_each
);
2108 /* The tbl->lock must be held as a writer and BH disabled. */
2109 void __neigh_for_each_release(struct neigh_table
*tbl
,
2110 int (*cb
)(struct neighbour
*))
2114 for (chain
= 0; chain
<= tbl
->hash_mask
; chain
++) {
2115 struct neighbour
*n
, **np
;
2117 np
= &tbl
->hash_buckets
[chain
];
2118 while ((n
= *np
) != NULL
) {
2121 write_lock(&n
->lock
);
2128 write_unlock(&n
->lock
);
2130 neigh_cleanup_and_release(n
);
2134 EXPORT_SYMBOL(__neigh_for_each_release
);
2136 #ifdef CONFIG_PROC_FS
2138 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2140 struct neigh_seq_state
*state
= seq
->private;
2141 struct net
*net
= state
->p
.net
;
2142 struct neigh_table
*tbl
= state
->tbl
;
2143 struct neighbour
*n
= NULL
;
2144 int bucket
= state
->bucket
;
2146 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2147 for (bucket
= 0; bucket
<= tbl
->hash_mask
; bucket
++) {
2148 n
= tbl
->hash_buckets
[bucket
];
2151 if (n
->dev
->nd_net
!= net
)
2153 if (state
->neigh_sub_iter
) {
2157 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2161 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2163 if (n
->nud_state
& ~NUD_NOARP
)
2172 state
->bucket
= bucket
;
2177 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2178 struct neighbour
*n
,
2181 struct neigh_seq_state
*state
= seq
->private;
2182 struct net
*net
= state
->p
.net
;
2183 struct neigh_table
*tbl
= state
->tbl
;
2185 if (state
->neigh_sub_iter
) {
2186 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2194 if (n
->dev
->nd_net
!= net
)
2196 if (state
->neigh_sub_iter
) {
2197 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2202 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2205 if (n
->nud_state
& ~NUD_NOARP
)
2214 if (++state
->bucket
> tbl
->hash_mask
)
2217 n
= tbl
->hash_buckets
[state
->bucket
];
2225 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2227 struct neighbour
*n
= neigh_get_first(seq
);
2231 n
= neigh_get_next(seq
, n
, pos
);
2236 return *pos
? NULL
: n
;
2239 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2241 struct neigh_seq_state
*state
= seq
->private;
2242 struct net
* net
= state
->p
.net
;
2243 struct neigh_table
*tbl
= state
->tbl
;
2244 struct pneigh_entry
*pn
= NULL
;
2245 int bucket
= state
->bucket
;
2247 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2248 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2249 pn
= tbl
->phash_buckets
[bucket
];
2250 while (pn
&& (pn
->net
!= net
))
2255 state
->bucket
= bucket
;
2260 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2261 struct pneigh_entry
*pn
,
2264 struct neigh_seq_state
*state
= seq
->private;
2265 struct net
* net
= state
->p
.net
;
2266 struct neigh_table
*tbl
= state
->tbl
;
2270 if (++state
->bucket
> PNEIGH_HASHMASK
)
2272 pn
= tbl
->phash_buckets
[state
->bucket
];
2273 while (pn
&& (pn
->net
!= net
))
2285 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2287 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2291 pn
= pneigh_get_next(seq
, pn
, pos
);
2296 return *pos
? NULL
: pn
;
2299 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2301 struct neigh_seq_state
*state
= seq
->private;
2304 rc
= neigh_get_idx(seq
, pos
);
2305 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2306 rc
= pneigh_get_idx(seq
, pos
);
2311 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2312 __acquires(tbl
->lock
)
2314 struct neigh_seq_state
*state
= seq
->private;
2315 loff_t pos_minus_one
;
2319 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2321 read_lock_bh(&tbl
->lock
);
2323 pos_minus_one
= *pos
- 1;
2324 return *pos
? neigh_get_idx_any(seq
, &pos_minus_one
) : SEQ_START_TOKEN
;
2326 EXPORT_SYMBOL(neigh_seq_start
);
2328 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2330 struct neigh_seq_state
*state
;
2333 if (v
== SEQ_START_TOKEN
) {
2334 rc
= neigh_get_idx(seq
, pos
);
2338 state
= seq
->private;
2339 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2340 rc
= neigh_get_next(seq
, v
, NULL
);
2343 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2344 rc
= pneigh_get_first(seq
);
2346 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2347 rc
= pneigh_get_next(seq
, v
, NULL
);
2353 EXPORT_SYMBOL(neigh_seq_next
);
2355 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2356 __releases(tbl
->lock
)
2358 struct neigh_seq_state
*state
= seq
->private;
2359 struct neigh_table
*tbl
= state
->tbl
;
2361 read_unlock_bh(&tbl
->lock
);
2363 EXPORT_SYMBOL(neigh_seq_stop
);
2365 /* statistics via seq_file */
2367 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2369 struct proc_dir_entry
*pde
= seq
->private;
2370 struct neigh_table
*tbl
= pde
->data
;
2374 return SEQ_START_TOKEN
;
2376 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
2377 if (!cpu_possible(cpu
))
2380 return per_cpu_ptr(tbl
->stats
, cpu
);
2385 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2387 struct proc_dir_entry
*pde
= seq
->private;
2388 struct neigh_table
*tbl
= pde
->data
;
2391 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
2392 if (!cpu_possible(cpu
))
2395 return per_cpu_ptr(tbl
->stats
, cpu
);
2400 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2405 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2407 struct proc_dir_entry
*pde
= seq
->private;
2408 struct neigh_table
*tbl
= pde
->data
;
2409 struct neigh_statistics
*st
= v
;
2411 if (v
== SEQ_START_TOKEN
) {
2412 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2416 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2417 "%08lx %08lx %08lx %08lx\n",
2418 atomic_read(&tbl
->entries
),
2429 st
->rcv_probes_mcast
,
2430 st
->rcv_probes_ucast
,
2432 st
->periodic_gc_runs
,
2439 static const struct seq_operations neigh_stat_seq_ops
= {
2440 .start
= neigh_stat_seq_start
,
2441 .next
= neigh_stat_seq_next
,
2442 .stop
= neigh_stat_seq_stop
,
2443 .show
= neigh_stat_seq_show
,
2446 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2448 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2451 struct seq_file
*sf
= file
->private_data
;
2452 sf
->private = PDE(inode
);
2457 static const struct file_operations neigh_stat_seq_fops
= {
2458 .owner
= THIS_MODULE
,
2459 .open
= neigh_stat_seq_open
,
2461 .llseek
= seq_lseek
,
2462 .release
= seq_release
,
2465 #endif /* CONFIG_PROC_FS */
2467 static inline size_t neigh_nlmsg_size(void)
2469 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2470 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2471 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2472 + nla_total_size(sizeof(struct nda_cacheinfo
))
2473 + nla_total_size(4); /* NDA_PROBES */
2476 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2478 struct net
*net
= n
->dev
->nd_net
;
2479 struct sk_buff
*skb
;
2482 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2486 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2488 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2489 WARN_ON(err
== -EMSGSIZE
);
2493 err
= rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2496 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2500 void neigh_app_ns(struct neighbour
*n
)
2502 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2504 #endif /* CONFIG_ARPD */
2506 #ifdef CONFIG_SYSCTL
2508 static struct neigh_sysctl_table
{
2509 struct ctl_table_header
*sysctl_header
;
2510 struct ctl_table neigh_vars
[__NET_NEIGH_MAX
];
2512 } neigh_sysctl_template __read_mostly
= {
2515 .ctl_name
= NET_NEIGH_MCAST_SOLICIT
,
2516 .procname
= "mcast_solicit",
2517 .maxlen
= sizeof(int),
2519 .proc_handler
= &proc_dointvec
,
2522 .ctl_name
= NET_NEIGH_UCAST_SOLICIT
,
2523 .procname
= "ucast_solicit",
2524 .maxlen
= sizeof(int),
2526 .proc_handler
= &proc_dointvec
,
2529 .ctl_name
= NET_NEIGH_APP_SOLICIT
,
2530 .procname
= "app_solicit",
2531 .maxlen
= sizeof(int),
2533 .proc_handler
= &proc_dointvec
,
2536 .procname
= "retrans_time",
2537 .maxlen
= sizeof(int),
2539 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2542 .ctl_name
= NET_NEIGH_REACHABLE_TIME
,
2543 .procname
= "base_reachable_time",
2544 .maxlen
= sizeof(int),
2546 .proc_handler
= &proc_dointvec_jiffies
,
2547 .strategy
= &sysctl_jiffies
,
2550 .ctl_name
= NET_NEIGH_DELAY_PROBE_TIME
,
2551 .procname
= "delay_first_probe_time",
2552 .maxlen
= sizeof(int),
2554 .proc_handler
= &proc_dointvec_jiffies
,
2555 .strategy
= &sysctl_jiffies
,
2558 .ctl_name
= NET_NEIGH_GC_STALE_TIME
,
2559 .procname
= "gc_stale_time",
2560 .maxlen
= sizeof(int),
2562 .proc_handler
= &proc_dointvec_jiffies
,
2563 .strategy
= &sysctl_jiffies
,
2566 .ctl_name
= NET_NEIGH_UNRES_QLEN
,
2567 .procname
= "unres_qlen",
2568 .maxlen
= sizeof(int),
2570 .proc_handler
= &proc_dointvec
,
2573 .ctl_name
= NET_NEIGH_PROXY_QLEN
,
2574 .procname
= "proxy_qlen",
2575 .maxlen
= sizeof(int),
2577 .proc_handler
= &proc_dointvec
,
2580 .procname
= "anycast_delay",
2581 .maxlen
= sizeof(int),
2583 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2586 .procname
= "proxy_delay",
2587 .maxlen
= sizeof(int),
2589 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2592 .procname
= "locktime",
2593 .maxlen
= sizeof(int),
2595 .proc_handler
= &proc_dointvec_userhz_jiffies
,
2598 .ctl_name
= NET_NEIGH_RETRANS_TIME_MS
,
2599 .procname
= "retrans_time_ms",
2600 .maxlen
= sizeof(int),
2602 .proc_handler
= &proc_dointvec_ms_jiffies
,
2603 .strategy
= &sysctl_ms_jiffies
,
2606 .ctl_name
= NET_NEIGH_REACHABLE_TIME_MS
,
2607 .procname
= "base_reachable_time_ms",
2608 .maxlen
= sizeof(int),
2610 .proc_handler
= &proc_dointvec_ms_jiffies
,
2611 .strategy
= &sysctl_ms_jiffies
,
2614 .ctl_name
= NET_NEIGH_GC_INTERVAL
,
2615 .procname
= "gc_interval",
2616 .maxlen
= sizeof(int),
2618 .proc_handler
= &proc_dointvec_jiffies
,
2619 .strategy
= &sysctl_jiffies
,
2622 .ctl_name
= NET_NEIGH_GC_THRESH1
,
2623 .procname
= "gc_thresh1",
2624 .maxlen
= sizeof(int),
2626 .proc_handler
= &proc_dointvec
,
2629 .ctl_name
= NET_NEIGH_GC_THRESH2
,
2630 .procname
= "gc_thresh2",
2631 .maxlen
= sizeof(int),
2633 .proc_handler
= &proc_dointvec
,
2636 .ctl_name
= NET_NEIGH_GC_THRESH3
,
2637 .procname
= "gc_thresh3",
2638 .maxlen
= sizeof(int),
2640 .proc_handler
= &proc_dointvec
,
2646 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2647 int p_id
, int pdev_id
, char *p_name
,
2648 proc_handler
*handler
, ctl_handler
*strategy
)
2650 struct neigh_sysctl_table
*t
;
2651 const char *dev_name_source
= NULL
;
2653 #define NEIGH_CTL_PATH_ROOT 0
2654 #define NEIGH_CTL_PATH_PROTO 1
2655 #define NEIGH_CTL_PATH_NEIGH 2
2656 #define NEIGH_CTL_PATH_DEV 3
2658 struct ctl_path neigh_path
[] = {
2659 { .procname
= "net", .ctl_name
= CTL_NET
, },
2660 { .procname
= "proto", .ctl_name
= 0, },
2661 { .procname
= "neigh", .ctl_name
= 0, },
2662 { .procname
= "default", .ctl_name
= NET_PROTO_CONF_DEFAULT
, },
2666 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
2670 t
->neigh_vars
[0].data
= &p
->mcast_probes
;
2671 t
->neigh_vars
[1].data
= &p
->ucast_probes
;
2672 t
->neigh_vars
[2].data
= &p
->app_probes
;
2673 t
->neigh_vars
[3].data
= &p
->retrans_time
;
2674 t
->neigh_vars
[4].data
= &p
->base_reachable_time
;
2675 t
->neigh_vars
[5].data
= &p
->delay_probe_time
;
2676 t
->neigh_vars
[6].data
= &p
->gc_staletime
;
2677 t
->neigh_vars
[7].data
= &p
->queue_len
;
2678 t
->neigh_vars
[8].data
= &p
->proxy_qlen
;
2679 t
->neigh_vars
[9].data
= &p
->anycast_delay
;
2680 t
->neigh_vars
[10].data
= &p
->proxy_delay
;
2681 t
->neigh_vars
[11].data
= &p
->locktime
;
2682 t
->neigh_vars
[12].data
= &p
->retrans_time
;
2683 t
->neigh_vars
[13].data
= &p
->base_reachable_time
;
2686 dev_name_source
= dev
->name
;
2687 neigh_path
[NEIGH_CTL_PATH_DEV
].ctl_name
= dev
->ifindex
;
2688 /* Terminate the table early */
2689 memset(&t
->neigh_vars
[14], 0, sizeof(t
->neigh_vars
[14]));
2691 dev_name_source
= neigh_path
[NEIGH_CTL_PATH_DEV
].procname
;
2692 t
->neigh_vars
[14].data
= (int *)(p
+ 1);
2693 t
->neigh_vars
[15].data
= (int *)(p
+ 1) + 1;
2694 t
->neigh_vars
[16].data
= (int *)(p
+ 1) + 2;
2695 t
->neigh_vars
[17].data
= (int *)(p
+ 1) + 3;
2699 if (handler
|| strategy
) {
2701 t
->neigh_vars
[3].proc_handler
= handler
;
2702 t
->neigh_vars
[3].strategy
= strategy
;
2703 t
->neigh_vars
[3].extra1
= dev
;
2705 t
->neigh_vars
[3].ctl_name
= CTL_UNNUMBERED
;
2707 t
->neigh_vars
[4].proc_handler
= handler
;
2708 t
->neigh_vars
[4].strategy
= strategy
;
2709 t
->neigh_vars
[4].extra1
= dev
;
2711 t
->neigh_vars
[4].ctl_name
= CTL_UNNUMBERED
;
2712 /* RetransTime (in milliseconds)*/
2713 t
->neigh_vars
[12].proc_handler
= handler
;
2714 t
->neigh_vars
[12].strategy
= strategy
;
2715 t
->neigh_vars
[12].extra1
= dev
;
2717 t
->neigh_vars
[12].ctl_name
= CTL_UNNUMBERED
;
2718 /* ReachableTime (in milliseconds) */
2719 t
->neigh_vars
[13].proc_handler
= handler
;
2720 t
->neigh_vars
[13].strategy
= strategy
;
2721 t
->neigh_vars
[13].extra1
= dev
;
2723 t
->neigh_vars
[13].ctl_name
= CTL_UNNUMBERED
;
2726 t
->dev_name
= kstrdup(dev_name_source
, GFP_KERNEL
);
2730 neigh_path
[NEIGH_CTL_PATH_DEV
].procname
= t
->dev_name
;
2731 neigh_path
[NEIGH_CTL_PATH_NEIGH
].ctl_name
= pdev_id
;
2732 neigh_path
[NEIGH_CTL_PATH_PROTO
].procname
= p_name
;
2733 neigh_path
[NEIGH_CTL_PATH_PROTO
].ctl_name
= p_id
;
2735 t
->sysctl_header
= register_sysctl_paths(neigh_path
, t
->neigh_vars
);
2736 if (!t
->sysctl_header
)
2739 p
->sysctl_table
= t
;
2750 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2752 if (p
->sysctl_table
) {
2753 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
2754 p
->sysctl_table
= NULL
;
2755 unregister_sysctl_table(t
->sysctl_header
);
2761 #endif /* CONFIG_SYSCTL */
2763 static int __init
neigh_init(void)
2765 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
);
2766 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
);
2767 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
);
2769 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
);
2770 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
);
2775 subsys_initcall(neigh_init
);
2777 EXPORT_SYMBOL(__neigh_event_send
);
2778 EXPORT_SYMBOL(neigh_changeaddr
);
2779 EXPORT_SYMBOL(neigh_compat_output
);
2780 EXPORT_SYMBOL(neigh_connected_output
);
2781 EXPORT_SYMBOL(neigh_create
);
2782 EXPORT_SYMBOL(neigh_destroy
);
2783 EXPORT_SYMBOL(neigh_event_ns
);
2784 EXPORT_SYMBOL(neigh_ifdown
);
2785 EXPORT_SYMBOL(neigh_lookup
);
2786 EXPORT_SYMBOL(neigh_lookup_nodev
);
2787 EXPORT_SYMBOL(neigh_parms_alloc
);
2788 EXPORT_SYMBOL(neigh_parms_release
);
2789 EXPORT_SYMBOL(neigh_rand_reach_time
);
2790 EXPORT_SYMBOL(neigh_resolve_output
);
2791 EXPORT_SYMBOL(neigh_table_clear
);
2792 EXPORT_SYMBOL(neigh_table_init
);
2793 EXPORT_SYMBOL(neigh_table_init_no_netlink
);
2794 EXPORT_SYMBOL(neigh_update
);
2795 EXPORT_SYMBOL(pneigh_enqueue
);
2796 EXPORT_SYMBOL(pneigh_lookup
);
2799 EXPORT_SYMBOL(neigh_app_ns
);
2801 #ifdef CONFIG_SYSCTL
2802 EXPORT_SYMBOL(neigh_sysctl_register
);
2803 EXPORT_SYMBOL(neigh_sysctl_unregister
);