2 * Generic address resolution entity
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
28 #include <linux/sysctl.h>
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
44 #define NEIGH_PRINTK(x...) printk(x)
45 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
46 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
51 #define NEIGH_PRINTK1 NEIGH_PRINTK
55 #define NEIGH_PRINTK2 NEIGH_PRINTK
58 #define PNEIGH_HASHMASK 0xF
60 static void neigh_timer_handler(unsigned long arg
);
61 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
);
62 static void neigh_update_notify(struct neighbour
*neigh
);
63 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
);
65 static struct neigh_table
*neigh_tables
;
67 static const struct file_operations neigh_stat_seq_fops
;
71 Neighbour hash table buckets are protected with rwlock tbl->lock.
73 - All the scans/updates to hash buckets MUST be made under this lock.
74 - NOTHING clever should be made under this lock: no callbacks
75 to protocol backends, no attempts to send something to network.
76 It will result in deadlocks, if backend/driver wants to use neighbour
78 - If the entry requires some non-trivial actions, increase
79 its reference count and release table lock.
81 Neighbour entries are protected:
82 - with reference count.
83 - with rwlock neigh->lock
85 Reference count prevents destruction.
87 neigh->lock mainly serializes ll address data and its validity state.
88 However, the same lock is used to protect another entry fields:
92 Again, nothing clever shall be made under neigh->lock,
93 the most complicated procedure, which we allow is dev->hard_header.
94 It is supposed, that dev->hard_header is simplistic and does
95 not make callbacks to neighbour tables.
97 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
98 list of neighbour tables. This list is used only in process context,
101 static DEFINE_RWLOCK(neigh_tbl_lock
);
103 static int neigh_blackhole(struct neighbour
*neigh
, struct sk_buff
*skb
)
109 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
111 if (neigh
->parms
->neigh_cleanup
)
112 neigh
->parms
->neigh_cleanup(neigh
);
114 __neigh_notify(neigh
, RTM_DELNEIGH
, 0);
115 neigh_release(neigh
);
119 * It is random distribution in the interval (1/2)*base...(3/2)*base.
120 * It corresponds to default IPv6 settings and is not overridable,
121 * because it is really reasonable choice.
124 unsigned long neigh_rand_reach_time(unsigned long base
)
126 return base
? (net_random() % base
) + (base
>> 1) : 0;
128 EXPORT_SYMBOL(neigh_rand_reach_time
);
131 static int neigh_forced_gc(struct neigh_table
*tbl
)
135 struct neigh_hash_table
*nht
;
137 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
139 write_lock_bh(&tbl
->lock
);
140 nht
= rcu_dereference_protected(tbl
->nht
,
141 lockdep_is_held(&tbl
->lock
));
142 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
144 struct neighbour __rcu
**np
;
146 np
= &nht
->hash_buckets
[i
];
147 while ((n
= rcu_dereference_protected(*np
,
148 lockdep_is_held(&tbl
->lock
))) != NULL
) {
149 /* Neighbour record may be discarded if:
150 * - nobody refers to it.
151 * - it is not permanent
153 write_lock(&n
->lock
);
154 if (atomic_read(&n
->refcnt
) == 1 &&
155 !(n
->nud_state
& NUD_PERMANENT
)) {
156 rcu_assign_pointer(*np
,
157 rcu_dereference_protected(n
->next
,
158 lockdep_is_held(&tbl
->lock
)));
161 write_unlock(&n
->lock
);
162 neigh_cleanup_and_release(n
);
165 write_unlock(&n
->lock
);
170 tbl
->last_flush
= jiffies
;
172 write_unlock_bh(&tbl
->lock
);
177 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
180 if (unlikely(mod_timer(&n
->timer
, when
))) {
181 printk("NEIGH: BUG, double timer add, state is %x\n",
187 static int neigh_del_timer(struct neighbour
*n
)
189 if ((n
->nud_state
& NUD_IN_TIMER
) &&
190 del_timer(&n
->timer
)) {
197 static void pneigh_queue_purge(struct sk_buff_head
*list
)
201 while ((skb
= skb_dequeue(list
)) != NULL
) {
207 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
)
210 struct neigh_hash_table
*nht
;
212 nht
= rcu_dereference_protected(tbl
->nht
,
213 lockdep_is_held(&tbl
->lock
));
215 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
217 struct neighbour __rcu
**np
= &nht
->hash_buckets
[i
];
219 while ((n
= rcu_dereference_protected(*np
,
220 lockdep_is_held(&tbl
->lock
))) != NULL
) {
221 if (dev
&& n
->dev
!= dev
) {
225 rcu_assign_pointer(*np
,
226 rcu_dereference_protected(n
->next
,
227 lockdep_is_held(&tbl
->lock
)));
228 write_lock(&n
->lock
);
232 if (atomic_read(&n
->refcnt
) != 1) {
233 /* The most unpleasant situation.
234 We must destroy neighbour entry,
235 but someone still uses it.
237 The destroy will be delayed until
238 the last user releases us, but
239 we must kill timers etc. and move
242 skb_queue_purge(&n
->arp_queue
);
243 n
->arp_queue_len_bytes
= 0;
244 n
->output
= neigh_blackhole
;
245 if (n
->nud_state
& NUD_VALID
)
246 n
->nud_state
= NUD_NOARP
;
248 n
->nud_state
= NUD_NONE
;
249 NEIGH_PRINTK2("neigh %p is stray.\n", n
);
251 write_unlock(&n
->lock
);
252 neigh_cleanup_and_release(n
);
257 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
259 write_lock_bh(&tbl
->lock
);
260 neigh_flush_dev(tbl
, dev
);
261 write_unlock_bh(&tbl
->lock
);
263 EXPORT_SYMBOL(neigh_changeaddr
);
265 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
267 write_lock_bh(&tbl
->lock
);
268 neigh_flush_dev(tbl
, dev
);
269 pneigh_ifdown(tbl
, dev
);
270 write_unlock_bh(&tbl
->lock
);
272 del_timer_sync(&tbl
->proxy_timer
);
273 pneigh_queue_purge(&tbl
->proxy_queue
);
276 EXPORT_SYMBOL(neigh_ifdown
);
278 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
, struct net_device
*dev
)
280 struct neighbour
*n
= NULL
;
281 unsigned long now
= jiffies
;
284 entries
= atomic_inc_return(&tbl
->entries
) - 1;
285 if (entries
>= tbl
->gc_thresh3
||
286 (entries
>= tbl
->gc_thresh2
&&
287 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
288 if (!neigh_forced_gc(tbl
) &&
289 entries
>= tbl
->gc_thresh3
)
294 n
= kzalloc(tbl
->entry_size
, GFP_ATOMIC
);
296 int sz
= sizeof(*n
) + tbl
->key_len
;
298 sz
= ALIGN(sz
, NEIGH_PRIV_ALIGN
);
299 sz
+= dev
->neigh_priv_len
;
300 n
= kzalloc(sz
, GFP_ATOMIC
);
305 skb_queue_head_init(&n
->arp_queue
);
306 rwlock_init(&n
->lock
);
307 seqlock_init(&n
->ha_lock
);
308 n
->updated
= n
->used
= now
;
309 n
->nud_state
= NUD_NONE
;
310 n
->output
= neigh_blackhole
;
311 seqlock_init(&n
->hh
.hh_lock
);
312 n
->parms
= neigh_parms_clone(&tbl
->parms
);
313 setup_timer(&n
->timer
, neigh_timer_handler
, (unsigned long)n
);
315 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
317 atomic_set(&n
->refcnt
, 1);
323 atomic_dec(&tbl
->entries
);
327 static void neigh_get_hash_rnd(u32
*x
)
329 get_random_bytes(x
, sizeof(*x
));
333 static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift
)
335 size_t size
= (1 << shift
) * sizeof(struct neighbour
*);
336 struct neigh_hash_table
*ret
;
337 struct neighbour __rcu
**buckets
;
340 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
343 if (size
<= PAGE_SIZE
)
344 buckets
= kzalloc(size
, GFP_ATOMIC
);
346 buckets
= (struct neighbour __rcu
**)
347 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
353 ret
->hash_buckets
= buckets
;
354 ret
->hash_shift
= shift
;
355 for (i
= 0; i
< NEIGH_NUM_HASH_RND
; i
++)
356 neigh_get_hash_rnd(&ret
->hash_rnd
[i
]);
360 static void neigh_hash_free_rcu(struct rcu_head
*head
)
362 struct neigh_hash_table
*nht
= container_of(head
,
363 struct neigh_hash_table
,
365 size_t size
= (1 << nht
->hash_shift
) * sizeof(struct neighbour
*);
366 struct neighbour __rcu
**buckets
= nht
->hash_buckets
;
368 if (size
<= PAGE_SIZE
)
371 free_pages((unsigned long)buckets
, get_order(size
));
375 static struct neigh_hash_table
*neigh_hash_grow(struct neigh_table
*tbl
,
376 unsigned long new_shift
)
378 unsigned int i
, hash
;
379 struct neigh_hash_table
*new_nht
, *old_nht
;
381 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
383 old_nht
= rcu_dereference_protected(tbl
->nht
,
384 lockdep_is_held(&tbl
->lock
));
385 new_nht
= neigh_hash_alloc(new_shift
);
389 for (i
= 0; i
< (1 << old_nht
->hash_shift
); i
++) {
390 struct neighbour
*n
, *next
;
392 for (n
= rcu_dereference_protected(old_nht
->hash_buckets
[i
],
393 lockdep_is_held(&tbl
->lock
));
396 hash
= tbl
->hash(n
->primary_key
, n
->dev
,
399 hash
>>= (32 - new_nht
->hash_shift
);
400 next
= rcu_dereference_protected(n
->next
,
401 lockdep_is_held(&tbl
->lock
));
403 rcu_assign_pointer(n
->next
,
404 rcu_dereference_protected(
405 new_nht
->hash_buckets
[hash
],
406 lockdep_is_held(&tbl
->lock
)));
407 rcu_assign_pointer(new_nht
->hash_buckets
[hash
], n
);
411 rcu_assign_pointer(tbl
->nht
, new_nht
);
412 call_rcu(&old_nht
->rcu
, neigh_hash_free_rcu
);
416 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
417 struct net_device
*dev
)
420 int key_len
= tbl
->key_len
;
422 struct neigh_hash_table
*nht
;
424 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
427 nht
= rcu_dereference_bh(tbl
->nht
);
428 hash_val
= tbl
->hash(pkey
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
430 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
432 n
= rcu_dereference_bh(n
->next
)) {
433 if (dev
== n
->dev
&& !memcmp(n
->primary_key
, pkey
, key_len
)) {
434 if (!atomic_inc_not_zero(&n
->refcnt
))
436 NEIGH_CACHE_STAT_INC(tbl
, hits
);
441 rcu_read_unlock_bh();
444 EXPORT_SYMBOL(neigh_lookup
);
446 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
450 int key_len
= tbl
->key_len
;
452 struct neigh_hash_table
*nht
;
454 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
457 nht
= rcu_dereference_bh(tbl
->nht
);
458 hash_val
= tbl
->hash(pkey
, NULL
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
460 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
462 n
= rcu_dereference_bh(n
->next
)) {
463 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
464 net_eq(dev_net(n
->dev
), net
)) {
465 if (!atomic_inc_not_zero(&n
->refcnt
))
467 NEIGH_CACHE_STAT_INC(tbl
, hits
);
472 rcu_read_unlock_bh();
475 EXPORT_SYMBOL(neigh_lookup_nodev
);
477 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
478 struct net_device
*dev
, bool want_ref
)
481 int key_len
= tbl
->key_len
;
483 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
, dev
);
484 struct neigh_hash_table
*nht
;
487 rc
= ERR_PTR(-ENOBUFS
);
491 memcpy(n
->primary_key
, pkey
, key_len
);
495 /* Protocol specific setup. */
496 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
498 goto out_neigh_release
;
501 if (dev
->netdev_ops
->ndo_neigh_construct
) {
502 error
= dev
->netdev_ops
->ndo_neigh_construct(n
);
505 goto out_neigh_release
;
509 /* Device specific setup. */
510 if (n
->parms
->neigh_setup
&&
511 (error
= n
->parms
->neigh_setup(n
)) < 0) {
513 goto out_neigh_release
;
516 n
->confirmed
= jiffies
- (n
->parms
->base_reachable_time
<< 1);
518 write_lock_bh(&tbl
->lock
);
519 nht
= rcu_dereference_protected(tbl
->nht
,
520 lockdep_is_held(&tbl
->lock
));
522 if (atomic_read(&tbl
->entries
) > (1 << nht
->hash_shift
))
523 nht
= neigh_hash_grow(tbl
, nht
->hash_shift
+ 1);
525 hash_val
= tbl
->hash(pkey
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
527 if (n
->parms
->dead
) {
528 rc
= ERR_PTR(-EINVAL
);
532 for (n1
= rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
533 lockdep_is_held(&tbl
->lock
));
535 n1
= rcu_dereference_protected(n1
->next
,
536 lockdep_is_held(&tbl
->lock
))) {
537 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, pkey
, key_len
)) {
548 rcu_assign_pointer(n
->next
,
549 rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
550 lockdep_is_held(&tbl
->lock
)));
551 rcu_assign_pointer(nht
->hash_buckets
[hash_val
], n
);
552 write_unlock_bh(&tbl
->lock
);
553 NEIGH_PRINTK2("neigh %p is created.\n", n
);
558 write_unlock_bh(&tbl
->lock
);
563 EXPORT_SYMBOL(__neigh_create
);
565 static u32
pneigh_hash(const void *pkey
, int key_len
)
567 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
568 hash_val
^= (hash_val
>> 16);
569 hash_val
^= hash_val
>> 8;
570 hash_val
^= hash_val
>> 4;
571 hash_val
&= PNEIGH_HASHMASK
;
575 static struct pneigh_entry
*__pneigh_lookup_1(struct pneigh_entry
*n
,
579 struct net_device
*dev
)
582 if (!memcmp(n
->key
, pkey
, key_len
) &&
583 net_eq(pneigh_net(n
), net
) &&
584 (n
->dev
== dev
|| !n
->dev
))
591 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
,
592 struct net
*net
, const void *pkey
, struct net_device
*dev
)
594 int key_len
= tbl
->key_len
;
595 u32 hash_val
= pneigh_hash(pkey
, key_len
);
597 return __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
598 net
, pkey
, key_len
, dev
);
600 EXPORT_SYMBOL_GPL(__pneigh_lookup
);
602 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
603 struct net
*net
, const void *pkey
,
604 struct net_device
*dev
, int creat
)
606 struct pneigh_entry
*n
;
607 int key_len
= tbl
->key_len
;
608 u32 hash_val
= pneigh_hash(pkey
, key_len
);
610 read_lock_bh(&tbl
->lock
);
611 n
= __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
612 net
, pkey
, key_len
, dev
);
613 read_unlock_bh(&tbl
->lock
);
620 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
624 write_pnet(&n
->net
, hold_net(net
));
625 memcpy(n
->key
, pkey
, key_len
);
630 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
639 write_lock_bh(&tbl
->lock
);
640 n
->next
= tbl
->phash_buckets
[hash_val
];
641 tbl
->phash_buckets
[hash_val
] = n
;
642 write_unlock_bh(&tbl
->lock
);
646 EXPORT_SYMBOL(pneigh_lookup
);
649 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
650 struct net_device
*dev
)
652 struct pneigh_entry
*n
, **np
;
653 int key_len
= tbl
->key_len
;
654 u32 hash_val
= pneigh_hash(pkey
, key_len
);
656 write_lock_bh(&tbl
->lock
);
657 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
659 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
660 net_eq(pneigh_net(n
), net
)) {
662 write_unlock_bh(&tbl
->lock
);
663 if (tbl
->pdestructor
)
667 release_net(pneigh_net(n
));
672 write_unlock_bh(&tbl
->lock
);
676 static int pneigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
678 struct pneigh_entry
*n
, **np
;
681 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
682 np
= &tbl
->phash_buckets
[h
];
683 while ((n
= *np
) != NULL
) {
684 if (!dev
|| n
->dev
== dev
) {
686 if (tbl
->pdestructor
)
690 release_net(pneigh_net(n
));
700 static void neigh_parms_destroy(struct neigh_parms
*parms
);
702 static inline void neigh_parms_put(struct neigh_parms
*parms
)
704 if (atomic_dec_and_test(&parms
->refcnt
))
705 neigh_parms_destroy(parms
);
709 * neighbour must already be out of the table;
712 void neigh_destroy(struct neighbour
*neigh
)
714 struct net_device
*dev
= neigh
->dev
;
716 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
719 pr_warn("Destroying alive neighbour %p\n", neigh
);
724 if (neigh_del_timer(neigh
))
725 pr_warn("Impossible event\n");
727 skb_queue_purge(&neigh
->arp_queue
);
728 neigh
->arp_queue_len_bytes
= 0;
730 if (dev
->netdev_ops
->ndo_neigh_destroy
)
731 dev
->netdev_ops
->ndo_neigh_destroy(neigh
);
734 neigh_parms_put(neigh
->parms
);
736 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh
);
738 atomic_dec(&neigh
->tbl
->entries
);
739 kfree_rcu(neigh
, rcu
);
741 EXPORT_SYMBOL(neigh_destroy
);
743 /* Neighbour state is suspicious;
746 Called with write_locked neigh.
748 static void neigh_suspect(struct neighbour
*neigh
)
750 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
752 neigh
->output
= neigh
->ops
->output
;
755 /* Neighbour state is OK;
758 Called with write_locked neigh.
760 static void neigh_connect(struct neighbour
*neigh
)
762 NEIGH_PRINTK2("neigh %p is connected.\n", neigh
);
764 neigh
->output
= neigh
->ops
->connected_output
;
767 static void neigh_periodic_work(struct work_struct
*work
)
769 struct neigh_table
*tbl
= container_of(work
, struct neigh_table
, gc_work
.work
);
771 struct neighbour __rcu
**np
;
773 struct neigh_hash_table
*nht
;
775 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
777 write_lock_bh(&tbl
->lock
);
778 nht
= rcu_dereference_protected(tbl
->nht
,
779 lockdep_is_held(&tbl
->lock
));
782 * periodically recompute ReachableTime from random function
785 if (time_after(jiffies
, tbl
->last_rand
+ 300 * HZ
)) {
786 struct neigh_parms
*p
;
787 tbl
->last_rand
= jiffies
;
788 for (p
= &tbl
->parms
; p
; p
= p
->next
)
790 neigh_rand_reach_time(p
->base_reachable_time
);
793 for (i
= 0 ; i
< (1 << nht
->hash_shift
); i
++) {
794 np
= &nht
->hash_buckets
[i
];
796 while ((n
= rcu_dereference_protected(*np
,
797 lockdep_is_held(&tbl
->lock
))) != NULL
) {
800 write_lock(&n
->lock
);
802 state
= n
->nud_state
;
803 if (state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) {
804 write_unlock(&n
->lock
);
808 if (time_before(n
->used
, n
->confirmed
))
809 n
->used
= n
->confirmed
;
811 if (atomic_read(&n
->refcnt
) == 1 &&
812 (state
== NUD_FAILED
||
813 time_after(jiffies
, n
->used
+ n
->parms
->gc_staletime
))) {
816 write_unlock(&n
->lock
);
817 neigh_cleanup_and_release(n
);
820 write_unlock(&n
->lock
);
826 * It's fine to release lock here, even if hash table
827 * grows while we are preempted.
829 write_unlock_bh(&tbl
->lock
);
831 write_lock_bh(&tbl
->lock
);
832 nht
= rcu_dereference_protected(tbl
->nht
,
833 lockdep_is_held(&tbl
->lock
));
835 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
836 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
837 * base_reachable_time.
839 schedule_delayed_work(&tbl
->gc_work
,
840 tbl
->parms
.base_reachable_time
>> 1);
841 write_unlock_bh(&tbl
->lock
);
844 static __inline__
int neigh_max_probes(struct neighbour
*n
)
846 struct neigh_parms
*p
= n
->parms
;
847 return (n
->nud_state
& NUD_PROBE
) ?
849 p
->ucast_probes
+ p
->app_probes
+ p
->mcast_probes
;
852 static void neigh_invalidate(struct neighbour
*neigh
)
853 __releases(neigh
->lock
)
854 __acquires(neigh
->lock
)
858 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
859 NEIGH_PRINTK2("neigh %p is failed.\n", neigh
);
860 neigh
->updated
= jiffies
;
862 /* It is very thin place. report_unreachable is very complicated
863 routine. Particularly, it can hit the same neighbour entry!
865 So that, we try to be accurate and avoid dead loop. --ANK
867 while (neigh
->nud_state
== NUD_FAILED
&&
868 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
869 write_unlock(&neigh
->lock
);
870 neigh
->ops
->error_report(neigh
, skb
);
871 write_lock(&neigh
->lock
);
873 skb_queue_purge(&neigh
->arp_queue
);
874 neigh
->arp_queue_len_bytes
= 0;
877 static void neigh_probe(struct neighbour
*neigh
)
878 __releases(neigh
->lock
)
880 struct sk_buff
*skb
= skb_peek(&neigh
->arp_queue
);
881 /* keep skb alive even if arp_queue overflows */
883 skb
= skb_copy(skb
, GFP_ATOMIC
);
884 write_unlock(&neigh
->lock
);
885 neigh
->ops
->solicit(neigh
, skb
);
886 atomic_inc(&neigh
->probes
);
890 /* Called when a timer expires for a neighbour entry. */
892 static void neigh_timer_handler(unsigned long arg
)
894 unsigned long now
, next
;
895 struct neighbour
*neigh
= (struct neighbour
*)arg
;
899 write_lock(&neigh
->lock
);
901 state
= neigh
->nud_state
;
905 if (!(state
& NUD_IN_TIMER
))
908 if (state
& NUD_REACHABLE
) {
909 if (time_before_eq(now
,
910 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
911 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh
);
912 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
913 } else if (time_before_eq(now
,
914 neigh
->used
+ neigh
->parms
->delay_probe_time
)) {
915 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
916 neigh
->nud_state
= NUD_DELAY
;
917 neigh
->updated
= jiffies
;
918 neigh_suspect(neigh
);
919 next
= now
+ neigh
->parms
->delay_probe_time
;
921 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh
);
922 neigh
->nud_state
= NUD_STALE
;
923 neigh
->updated
= jiffies
;
924 neigh_suspect(neigh
);
927 } else if (state
& NUD_DELAY
) {
928 if (time_before_eq(now
,
929 neigh
->confirmed
+ neigh
->parms
->delay_probe_time
)) {
930 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh
);
931 neigh
->nud_state
= NUD_REACHABLE
;
932 neigh
->updated
= jiffies
;
933 neigh_connect(neigh
);
935 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
937 NEIGH_PRINTK2("neigh %p is probed.\n", neigh
);
938 neigh
->nud_state
= NUD_PROBE
;
939 neigh
->updated
= jiffies
;
940 atomic_set(&neigh
->probes
, 0);
941 next
= now
+ neigh
->parms
->retrans_time
;
944 /* NUD_PROBE|NUD_INCOMPLETE */
945 next
= now
+ neigh
->parms
->retrans_time
;
948 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
949 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
950 neigh
->nud_state
= NUD_FAILED
;
952 neigh_invalidate(neigh
);
955 if (neigh
->nud_state
& NUD_IN_TIMER
) {
956 if (time_before(next
, jiffies
+ HZ
/2))
957 next
= jiffies
+ HZ
/2;
958 if (!mod_timer(&neigh
->timer
, next
))
961 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
965 write_unlock(&neigh
->lock
);
969 neigh_update_notify(neigh
);
971 neigh_release(neigh
);
974 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
977 bool immediate_probe
= false;
979 write_lock_bh(&neigh
->lock
);
982 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
985 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
986 if (neigh
->parms
->mcast_probes
+ neigh
->parms
->app_probes
) {
987 unsigned long next
, now
= jiffies
;
989 atomic_set(&neigh
->probes
, neigh
->parms
->ucast_probes
);
990 neigh
->nud_state
= NUD_INCOMPLETE
;
991 neigh
->updated
= now
;
992 next
= now
+ max(neigh
->parms
->retrans_time
, HZ
/2);
993 neigh_add_timer(neigh
, next
);
994 immediate_probe
= true;
996 neigh
->nud_state
= NUD_FAILED
;
997 neigh
->updated
= jiffies
;
998 write_unlock_bh(&neigh
->lock
);
1003 } else if (neigh
->nud_state
& NUD_STALE
) {
1004 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh
);
1005 neigh
->nud_state
= NUD_DELAY
;
1006 neigh
->updated
= jiffies
;
1007 neigh_add_timer(neigh
,
1008 jiffies
+ neigh
->parms
->delay_probe_time
);
1011 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
1013 while (neigh
->arp_queue_len_bytes
+ skb
->truesize
>
1014 neigh
->parms
->queue_len_bytes
) {
1015 struct sk_buff
*buff
;
1017 buff
= __skb_dequeue(&neigh
->arp_queue
);
1020 neigh
->arp_queue_len_bytes
-= buff
->truesize
;
1022 NEIGH_CACHE_STAT_INC(neigh
->tbl
, unres_discards
);
1025 __skb_queue_tail(&neigh
->arp_queue
, skb
);
1026 neigh
->arp_queue_len_bytes
+= skb
->truesize
;
1031 if (immediate_probe
)
1034 write_unlock(&neigh
->lock
);
1038 EXPORT_SYMBOL(__neigh_event_send
);
1040 static void neigh_update_hhs(struct neighbour
*neigh
)
1042 struct hh_cache
*hh
;
1043 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
1046 if (neigh
->dev
->header_ops
)
1047 update
= neigh
->dev
->header_ops
->cache_update
;
1052 write_seqlock_bh(&hh
->hh_lock
);
1053 update(hh
, neigh
->dev
, neigh
->ha
);
1054 write_sequnlock_bh(&hh
->hh_lock
);
1061 /* Generic update routine.
1062 -- lladdr is new lladdr or NULL, if it is not supplied.
1063 -- new is new state.
1065 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1067 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1068 lladdr instead of overriding it
1070 It also allows to retain current state
1071 if lladdr is unchanged.
1072 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1074 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1076 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1079 Caller MUST hold reference count on the entry.
1082 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
1088 struct net_device
*dev
;
1089 int update_isrouter
= 0;
1091 write_lock_bh(&neigh
->lock
);
1094 old
= neigh
->nud_state
;
1097 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
1098 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
1101 if (!(new & NUD_VALID
)) {
1102 neigh_del_timer(neigh
);
1103 if (old
& NUD_CONNECTED
)
1104 neigh_suspect(neigh
);
1105 neigh
->nud_state
= new;
1107 notify
= old
& NUD_VALID
;
1108 if ((old
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1109 (new & NUD_FAILED
)) {
1110 neigh_invalidate(neigh
);
1116 /* Compare new lladdr with cached one */
1117 if (!dev
->addr_len
) {
1118 /* First case: device needs no address. */
1120 } else if (lladdr
) {
1121 /* The second case: if something is already cached
1122 and a new address is proposed:
1124 - if they are different, check override flag
1126 if ((old
& NUD_VALID
) &&
1127 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
1130 /* No address is supplied; if we know something,
1131 use it, otherwise discard the request.
1134 if (!(old
& NUD_VALID
))
1139 if (new & NUD_CONNECTED
)
1140 neigh
->confirmed
= jiffies
;
1141 neigh
->updated
= jiffies
;
1143 /* If entry was valid and address is not changed,
1144 do not change entry state, if new one is STALE.
1147 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1148 if (old
& NUD_VALID
) {
1149 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1150 update_isrouter
= 0;
1151 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1152 (old
& NUD_CONNECTED
)) {
1158 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1159 ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) ||
1160 (old
& NUD_CONNECTED
))
1167 neigh_del_timer(neigh
);
1168 if (new & NUD_IN_TIMER
)
1169 neigh_add_timer(neigh
, (jiffies
+
1170 ((new & NUD_REACHABLE
) ?
1171 neigh
->parms
->reachable_time
:
1173 neigh
->nud_state
= new;
1176 if (lladdr
!= neigh
->ha
) {
1177 write_seqlock(&neigh
->ha_lock
);
1178 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1179 write_sequnlock(&neigh
->ha_lock
);
1180 neigh_update_hhs(neigh
);
1181 if (!(new & NUD_CONNECTED
))
1182 neigh
->confirmed
= jiffies
-
1183 (neigh
->parms
->base_reachable_time
<< 1);
1188 if (new & NUD_CONNECTED
)
1189 neigh_connect(neigh
);
1191 neigh_suspect(neigh
);
1192 if (!(old
& NUD_VALID
)) {
1193 struct sk_buff
*skb
;
1195 /* Again: avoid dead loop if something went wrong */
1197 while (neigh
->nud_state
& NUD_VALID
&&
1198 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1199 struct dst_entry
*dst
= skb_dst(skb
);
1200 struct neighbour
*n2
, *n1
= neigh
;
1201 write_unlock_bh(&neigh
->lock
);
1204 /* On shaper/eql skb->dst->neighbour != neigh :( */
1205 if (dst
&& (n2
= dst_get_neighbour_noref(dst
)) != NULL
)
1207 n1
->output(n1
, skb
);
1210 write_lock_bh(&neigh
->lock
);
1212 skb_queue_purge(&neigh
->arp_queue
);
1213 neigh
->arp_queue_len_bytes
= 0;
1216 if (update_isrouter
) {
1217 neigh
->flags
= (flags
& NEIGH_UPDATE_F_ISROUTER
) ?
1218 (neigh
->flags
| NTF_ROUTER
) :
1219 (neigh
->flags
& ~NTF_ROUTER
);
1221 write_unlock_bh(&neigh
->lock
);
1224 neigh_update_notify(neigh
);
1228 EXPORT_SYMBOL(neigh_update
);
1230 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1231 u8
*lladdr
, void *saddr
,
1232 struct net_device
*dev
)
1234 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1235 lladdr
|| !dev
->addr_len
);
1237 neigh_update(neigh
, lladdr
, NUD_STALE
,
1238 NEIGH_UPDATE_F_OVERRIDE
);
1241 EXPORT_SYMBOL(neigh_event_ns
);
1243 /* called with read_lock_bh(&n->lock); */
1244 static void neigh_hh_init(struct neighbour
*n
, struct dst_entry
*dst
)
1246 struct net_device
*dev
= dst
->dev
;
1247 __be16 prot
= dst
->ops
->protocol
;
1248 struct hh_cache
*hh
= &n
->hh
;
1250 write_lock_bh(&n
->lock
);
1252 /* Only one thread can come in here and initialize the
1256 dev
->header_ops
->cache(n
, hh
, prot
);
1258 write_unlock_bh(&n
->lock
);
1261 /* This function can be used in contexts, where only old dev_queue_xmit
1262 * worked, f.e. if you want to override normal output path (eql, shaper),
1263 * but resolution is not made yet.
1266 int neigh_compat_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1268 struct net_device
*dev
= skb
->dev
;
1270 __skb_pull(skb
, skb_network_offset(skb
));
1272 if (dev_hard_header(skb
, dev
, ntohs(skb
->protocol
), NULL
, NULL
,
1274 dev
->header_ops
->rebuild(skb
))
1277 return dev_queue_xmit(skb
);
1279 EXPORT_SYMBOL(neigh_compat_output
);
1281 /* Slow and careful. */
1283 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1285 struct dst_entry
*dst
= skb_dst(skb
);
1291 __skb_pull(skb
, skb_network_offset(skb
));
1293 if (!neigh_event_send(neigh
, skb
)) {
1295 struct net_device
*dev
= neigh
->dev
;
1298 if (dev
->header_ops
->cache
&& !neigh
->hh
.hh_len
)
1299 neigh_hh_init(neigh
, dst
);
1302 seq
= read_seqbegin(&neigh
->ha_lock
);
1303 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1304 neigh
->ha
, NULL
, skb
->len
);
1305 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1308 rc
= dev_queue_xmit(skb
);
1315 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1322 EXPORT_SYMBOL(neigh_resolve_output
);
1324 /* As fast as possible without hh cache */
1326 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1328 struct net_device
*dev
= neigh
->dev
;
1332 __skb_pull(skb
, skb_network_offset(skb
));
1335 seq
= read_seqbegin(&neigh
->ha_lock
);
1336 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1337 neigh
->ha
, NULL
, skb
->len
);
1338 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1341 err
= dev_queue_xmit(skb
);
1348 EXPORT_SYMBOL(neigh_connected_output
);
1350 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1352 return dev_queue_xmit(skb
);
1354 EXPORT_SYMBOL(neigh_direct_output
);
1356 static void neigh_proxy_process(unsigned long arg
)
1358 struct neigh_table
*tbl
= (struct neigh_table
*)arg
;
1359 long sched_next
= 0;
1360 unsigned long now
= jiffies
;
1361 struct sk_buff
*skb
, *n
;
1363 spin_lock(&tbl
->proxy_queue
.lock
);
1365 skb_queue_walk_safe(&tbl
->proxy_queue
, skb
, n
) {
1366 long tdif
= NEIGH_CB(skb
)->sched_next
- now
;
1369 struct net_device
*dev
= skb
->dev
;
1371 __skb_unlink(skb
, &tbl
->proxy_queue
);
1372 if (tbl
->proxy_redo
&& netif_running(dev
)) {
1374 tbl
->proxy_redo(skb
);
1381 } else if (!sched_next
|| tdif
< sched_next
)
1384 del_timer(&tbl
->proxy_timer
);
1386 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1387 spin_unlock(&tbl
->proxy_queue
.lock
);
1390 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1391 struct sk_buff
*skb
)
1393 unsigned long now
= jiffies
;
1394 unsigned long sched_next
= now
+ (net_random() % p
->proxy_delay
);
1396 if (tbl
->proxy_queue
.qlen
> p
->proxy_qlen
) {
1401 NEIGH_CB(skb
)->sched_next
= sched_next
;
1402 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1404 spin_lock(&tbl
->proxy_queue
.lock
);
1405 if (del_timer(&tbl
->proxy_timer
)) {
1406 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1407 sched_next
= tbl
->proxy_timer
.expires
;
1411 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1412 mod_timer(&tbl
->proxy_timer
, sched_next
);
1413 spin_unlock(&tbl
->proxy_queue
.lock
);
1415 EXPORT_SYMBOL(pneigh_enqueue
);
1417 static inline struct neigh_parms
*lookup_neigh_parms(struct neigh_table
*tbl
,
1418 struct net
*net
, int ifindex
)
1420 struct neigh_parms
*p
;
1422 for (p
= &tbl
->parms
; p
; p
= p
->next
) {
1423 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
&& net_eq(neigh_parms_net(p
), net
)) ||
1424 (!p
->dev
&& !ifindex
))
1431 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1432 struct neigh_table
*tbl
)
1434 struct neigh_parms
*p
, *ref
;
1435 struct net
*net
= dev_net(dev
);
1436 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1438 ref
= lookup_neigh_parms(tbl
, net
, 0);
1442 p
= kmemdup(ref
, sizeof(*p
), GFP_KERNEL
);
1445 atomic_set(&p
->refcnt
, 1);
1447 neigh_rand_reach_time(p
->base_reachable_time
);
1449 if (ops
->ndo_neigh_setup
&& ops
->ndo_neigh_setup(dev
, p
)) {
1456 write_pnet(&p
->net
, hold_net(net
));
1457 p
->sysctl_table
= NULL
;
1458 write_lock_bh(&tbl
->lock
);
1459 p
->next
= tbl
->parms
.next
;
1460 tbl
->parms
.next
= p
;
1461 write_unlock_bh(&tbl
->lock
);
1465 EXPORT_SYMBOL(neigh_parms_alloc
);
1467 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1469 struct neigh_parms
*parms
=
1470 container_of(head
, struct neigh_parms
, rcu_head
);
1472 neigh_parms_put(parms
);
1475 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1477 struct neigh_parms
**p
;
1479 if (!parms
|| parms
== &tbl
->parms
)
1481 write_lock_bh(&tbl
->lock
);
1482 for (p
= &tbl
->parms
.next
; *p
; p
= &(*p
)->next
) {
1486 write_unlock_bh(&tbl
->lock
);
1488 dev_put(parms
->dev
);
1489 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1493 write_unlock_bh(&tbl
->lock
);
1494 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1496 EXPORT_SYMBOL(neigh_parms_release
);
1498 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1500 release_net(neigh_parms_net(parms
));
1504 static struct lock_class_key neigh_table_proxy_queue_class
;
1506 static void neigh_table_init_no_netlink(struct neigh_table
*tbl
)
1508 unsigned long now
= jiffies
;
1509 unsigned long phsize
;
1511 write_pnet(&tbl
->parms
.net
, &init_net
);
1512 atomic_set(&tbl
->parms
.refcnt
, 1);
1513 tbl
->parms
.reachable_time
=
1514 neigh_rand_reach_time(tbl
->parms
.base_reachable_time
);
1516 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1518 panic("cannot create neighbour cache statistics");
1520 #ifdef CONFIG_PROC_FS
1521 if (!proc_create_data(tbl
->id
, 0, init_net
.proc_net_stat
,
1522 &neigh_stat_seq_fops
, tbl
))
1523 panic("cannot create neighbour proc dir entry");
1526 RCU_INIT_POINTER(tbl
->nht
, neigh_hash_alloc(3));
1528 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1529 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1531 if (!tbl
->nht
|| !tbl
->phash_buckets
)
1532 panic("cannot allocate neighbour cache hashes");
1534 rwlock_init(&tbl
->lock
);
1535 INIT_DELAYED_WORK_DEFERRABLE(&tbl
->gc_work
, neigh_periodic_work
);
1536 schedule_delayed_work(&tbl
->gc_work
, tbl
->parms
.reachable_time
);
1537 setup_timer(&tbl
->proxy_timer
, neigh_proxy_process
, (unsigned long)tbl
);
1538 skb_queue_head_init_class(&tbl
->proxy_queue
,
1539 &neigh_table_proxy_queue_class
);
1541 tbl
->last_flush
= now
;
1542 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1545 void neigh_table_init(struct neigh_table
*tbl
)
1547 struct neigh_table
*tmp
;
1549 neigh_table_init_no_netlink(tbl
);
1550 write_lock(&neigh_tbl_lock
);
1551 for (tmp
= neigh_tables
; tmp
; tmp
= tmp
->next
) {
1552 if (tmp
->family
== tbl
->family
)
1555 tbl
->next
= neigh_tables
;
1557 write_unlock(&neigh_tbl_lock
);
1559 if (unlikely(tmp
)) {
1560 pr_err("Registering multiple tables for family %d\n",
1565 EXPORT_SYMBOL(neigh_table_init
);
1567 int neigh_table_clear(struct neigh_table
*tbl
)
1569 struct neigh_table
**tp
;
1571 /* It is not clean... Fix it to unload IPv6 module safely */
1572 cancel_delayed_work_sync(&tbl
->gc_work
);
1573 del_timer_sync(&tbl
->proxy_timer
);
1574 pneigh_queue_purge(&tbl
->proxy_queue
);
1575 neigh_ifdown(tbl
, NULL
);
1576 if (atomic_read(&tbl
->entries
))
1577 pr_crit("neighbour leakage\n");
1578 write_lock(&neigh_tbl_lock
);
1579 for (tp
= &neigh_tables
; *tp
; tp
= &(*tp
)->next
) {
1585 write_unlock(&neigh_tbl_lock
);
1587 call_rcu(&rcu_dereference_protected(tbl
->nht
, 1)->rcu
,
1588 neigh_hash_free_rcu
);
1591 kfree(tbl
->phash_buckets
);
1592 tbl
->phash_buckets
= NULL
;
1594 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1596 free_percpu(tbl
->stats
);
1601 EXPORT_SYMBOL(neigh_table_clear
);
1603 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1605 struct net
*net
= sock_net(skb
->sk
);
1607 struct nlattr
*dst_attr
;
1608 struct neigh_table
*tbl
;
1609 struct net_device
*dev
= NULL
;
1613 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1616 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1617 if (dst_attr
== NULL
)
1620 ndm
= nlmsg_data(nlh
);
1621 if (ndm
->ndm_ifindex
) {
1622 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1629 read_lock(&neigh_tbl_lock
);
1630 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1631 struct neighbour
*neigh
;
1633 if (tbl
->family
!= ndm
->ndm_family
)
1635 read_unlock(&neigh_tbl_lock
);
1637 if (nla_len(dst_attr
) < tbl
->key_len
)
1640 if (ndm
->ndm_flags
& NTF_PROXY
) {
1641 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1648 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1649 if (neigh
== NULL
) {
1654 err
= neigh_update(neigh
, NULL
, NUD_FAILED
,
1655 NEIGH_UPDATE_F_OVERRIDE
|
1656 NEIGH_UPDATE_F_ADMIN
);
1657 neigh_release(neigh
);
1660 read_unlock(&neigh_tbl_lock
);
1661 err
= -EAFNOSUPPORT
;
1667 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1669 struct net
*net
= sock_net(skb
->sk
);
1671 struct nlattr
*tb
[NDA_MAX
+1];
1672 struct neigh_table
*tbl
;
1673 struct net_device
*dev
= NULL
;
1677 err
= nlmsg_parse(nlh
, sizeof(*ndm
), tb
, NDA_MAX
, NULL
);
1682 if (tb
[NDA_DST
] == NULL
)
1685 ndm
= nlmsg_data(nlh
);
1686 if (ndm
->ndm_ifindex
) {
1687 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1693 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
)
1697 read_lock(&neigh_tbl_lock
);
1698 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1699 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
;
1700 struct neighbour
*neigh
;
1703 if (tbl
->family
!= ndm
->ndm_family
)
1705 read_unlock(&neigh_tbl_lock
);
1707 if (nla_len(tb
[NDA_DST
]) < tbl
->key_len
)
1709 dst
= nla_data(tb
[NDA_DST
]);
1710 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1712 if (ndm
->ndm_flags
& NTF_PROXY
) {
1713 struct pneigh_entry
*pn
;
1716 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1718 pn
->flags
= ndm
->ndm_flags
;
1727 neigh
= neigh_lookup(tbl
, dst
, dev
);
1728 if (neigh
== NULL
) {
1729 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1734 neigh
= __neigh_lookup_errno(tbl
, dst
, dev
);
1735 if (IS_ERR(neigh
)) {
1736 err
= PTR_ERR(neigh
);
1740 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1742 neigh_release(neigh
);
1746 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1747 flags
&= ~NEIGH_UPDATE_F_OVERRIDE
;
1750 if (ndm
->ndm_flags
& NTF_USE
) {
1751 neigh_event_send(neigh
, NULL
);
1754 err
= neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
);
1755 neigh_release(neigh
);
1759 read_unlock(&neigh_tbl_lock
);
1760 err
= -EAFNOSUPPORT
;
1765 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1767 struct nlattr
*nest
;
1769 nest
= nla_nest_start(skb
, NDTA_PARMS
);
1774 nla_put_u32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
)) ||
1775 nla_put_u32(skb
, NDTPA_REFCNT
, atomic_read(&parms
->refcnt
)) ||
1776 nla_put_u32(skb
, NDTPA_QUEUE_LENBYTES
, parms
->queue_len_bytes
) ||
1777 /* approximative value for deprecated QUEUE_LEN (in packets) */
1778 nla_put_u32(skb
, NDTPA_QUEUE_LEN
,
1779 DIV_ROUND_UP(parms
->queue_len_bytes
,
1780 SKB_TRUESIZE(ETH_FRAME_LEN
))) ||
1781 nla_put_u32(skb
, NDTPA_PROXY_QLEN
, parms
->proxy_qlen
) ||
1782 nla_put_u32(skb
, NDTPA_APP_PROBES
, parms
->app_probes
) ||
1783 nla_put_u32(skb
, NDTPA_UCAST_PROBES
, parms
->ucast_probes
) ||
1784 nla_put_u32(skb
, NDTPA_MCAST_PROBES
, parms
->mcast_probes
) ||
1785 nla_put_msecs(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
) ||
1786 nla_put_msecs(skb
, NDTPA_BASE_REACHABLE_TIME
,
1787 parms
->base_reachable_time
) ||
1788 nla_put_msecs(skb
, NDTPA_GC_STALETIME
, parms
->gc_staletime
) ||
1789 nla_put_msecs(skb
, NDTPA_DELAY_PROBE_TIME
,
1790 parms
->delay_probe_time
) ||
1791 nla_put_msecs(skb
, NDTPA_RETRANS_TIME
, parms
->retrans_time
) ||
1792 nla_put_msecs(skb
, NDTPA_ANYCAST_DELAY
, parms
->anycast_delay
) ||
1793 nla_put_msecs(skb
, NDTPA_PROXY_DELAY
, parms
->proxy_delay
) ||
1794 nla_put_msecs(skb
, NDTPA_LOCKTIME
, parms
->locktime
))
1795 goto nla_put_failure
;
1796 return nla_nest_end(skb
, nest
);
1799 nla_nest_cancel(skb
, nest
);
1803 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
1804 u32 pid
, u32 seq
, int type
, int flags
)
1806 struct nlmsghdr
*nlh
;
1807 struct ndtmsg
*ndtmsg
;
1809 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1813 ndtmsg
= nlmsg_data(nlh
);
1815 read_lock_bh(&tbl
->lock
);
1816 ndtmsg
->ndtm_family
= tbl
->family
;
1817 ndtmsg
->ndtm_pad1
= 0;
1818 ndtmsg
->ndtm_pad2
= 0;
1820 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) ||
1821 nla_put_msecs(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
) ||
1822 nla_put_u32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
) ||
1823 nla_put_u32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
) ||
1824 nla_put_u32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
))
1825 goto nla_put_failure
;
1827 unsigned long now
= jiffies
;
1828 unsigned int flush_delta
= now
- tbl
->last_flush
;
1829 unsigned int rand_delta
= now
- tbl
->last_rand
;
1830 struct neigh_hash_table
*nht
;
1831 struct ndt_config ndc
= {
1832 .ndtc_key_len
= tbl
->key_len
,
1833 .ndtc_entry_size
= tbl
->entry_size
,
1834 .ndtc_entries
= atomic_read(&tbl
->entries
),
1835 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
1836 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
1837 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
1841 nht
= rcu_dereference_bh(tbl
->nht
);
1842 ndc
.ndtc_hash_rnd
= nht
->hash_rnd
[0];
1843 ndc
.ndtc_hash_mask
= ((1 << nht
->hash_shift
) - 1);
1844 rcu_read_unlock_bh();
1846 if (nla_put(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
))
1847 goto nla_put_failure
;
1852 struct ndt_stats ndst
;
1854 memset(&ndst
, 0, sizeof(ndst
));
1856 for_each_possible_cpu(cpu
) {
1857 struct neigh_statistics
*st
;
1859 st
= per_cpu_ptr(tbl
->stats
, cpu
);
1860 ndst
.ndts_allocs
+= st
->allocs
;
1861 ndst
.ndts_destroys
+= st
->destroys
;
1862 ndst
.ndts_hash_grows
+= st
->hash_grows
;
1863 ndst
.ndts_res_failed
+= st
->res_failed
;
1864 ndst
.ndts_lookups
+= st
->lookups
;
1865 ndst
.ndts_hits
+= st
->hits
;
1866 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
1867 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
1868 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
1869 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
1872 if (nla_put(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
))
1873 goto nla_put_failure
;
1876 BUG_ON(tbl
->parms
.dev
);
1877 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
1878 goto nla_put_failure
;
1880 read_unlock_bh(&tbl
->lock
);
1881 return nlmsg_end(skb
, nlh
);
1884 read_unlock_bh(&tbl
->lock
);
1885 nlmsg_cancel(skb
, nlh
);
1889 static int neightbl_fill_param_info(struct sk_buff
*skb
,
1890 struct neigh_table
*tbl
,
1891 struct neigh_parms
*parms
,
1892 u32 pid
, u32 seq
, int type
,
1895 struct ndtmsg
*ndtmsg
;
1896 struct nlmsghdr
*nlh
;
1898 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
1902 ndtmsg
= nlmsg_data(nlh
);
1904 read_lock_bh(&tbl
->lock
);
1905 ndtmsg
->ndtm_family
= tbl
->family
;
1906 ndtmsg
->ndtm_pad1
= 0;
1907 ndtmsg
->ndtm_pad2
= 0;
1909 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
1910 neightbl_fill_parms(skb
, parms
) < 0)
1913 read_unlock_bh(&tbl
->lock
);
1914 return nlmsg_end(skb
, nlh
);
1916 read_unlock_bh(&tbl
->lock
);
1917 nlmsg_cancel(skb
, nlh
);
1921 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
1922 [NDTA_NAME
] = { .type
= NLA_STRING
},
1923 [NDTA_THRESH1
] = { .type
= NLA_U32
},
1924 [NDTA_THRESH2
] = { .type
= NLA_U32
},
1925 [NDTA_THRESH3
] = { .type
= NLA_U32
},
1926 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
1927 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
1930 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
1931 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
1932 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
1933 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
1934 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
1935 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
1936 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
1937 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
1938 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
1939 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
1940 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
1941 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
1942 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
1943 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
1946 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
, void *arg
)
1948 struct net
*net
= sock_net(skb
->sk
);
1949 struct neigh_table
*tbl
;
1950 struct ndtmsg
*ndtmsg
;
1951 struct nlattr
*tb
[NDTA_MAX
+1];
1954 err
= nlmsg_parse(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
1955 nl_neightbl_policy
);
1959 if (tb
[NDTA_NAME
] == NULL
) {
1964 ndtmsg
= nlmsg_data(nlh
);
1965 read_lock(&neigh_tbl_lock
);
1966 for (tbl
= neigh_tables
; tbl
; tbl
= tbl
->next
) {
1967 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
1970 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0)
1980 * We acquire tbl->lock to be nice to the periodic timers and
1981 * make sure they always see a consistent set of values.
1983 write_lock_bh(&tbl
->lock
);
1985 if (tb
[NDTA_PARMS
]) {
1986 struct nlattr
*tbp
[NDTPA_MAX
+1];
1987 struct neigh_parms
*p
;
1990 err
= nla_parse_nested(tbp
, NDTPA_MAX
, tb
[NDTA_PARMS
],
1991 nl_ntbl_parm_policy
);
1993 goto errout_tbl_lock
;
1995 if (tbp
[NDTPA_IFINDEX
])
1996 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
1998 p
= lookup_neigh_parms(tbl
, net
, ifindex
);
2001 goto errout_tbl_lock
;
2004 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
2009 case NDTPA_QUEUE_LEN
:
2010 p
->queue_len_bytes
= nla_get_u32(tbp
[i
]) *
2011 SKB_TRUESIZE(ETH_FRAME_LEN
);
2013 case NDTPA_QUEUE_LENBYTES
:
2014 p
->queue_len_bytes
= nla_get_u32(tbp
[i
]);
2016 case NDTPA_PROXY_QLEN
:
2017 p
->proxy_qlen
= nla_get_u32(tbp
[i
]);
2019 case NDTPA_APP_PROBES
:
2020 p
->app_probes
= nla_get_u32(tbp
[i
]);
2022 case NDTPA_UCAST_PROBES
:
2023 p
->ucast_probes
= nla_get_u32(tbp
[i
]);
2025 case NDTPA_MCAST_PROBES
:
2026 p
->mcast_probes
= nla_get_u32(tbp
[i
]);
2028 case NDTPA_BASE_REACHABLE_TIME
:
2029 p
->base_reachable_time
= nla_get_msecs(tbp
[i
]);
2031 case NDTPA_GC_STALETIME
:
2032 p
->gc_staletime
= nla_get_msecs(tbp
[i
]);
2034 case NDTPA_DELAY_PROBE_TIME
:
2035 p
->delay_probe_time
= nla_get_msecs(tbp
[i
]);
2037 case NDTPA_RETRANS_TIME
:
2038 p
->retrans_time
= nla_get_msecs(tbp
[i
]);
2040 case NDTPA_ANYCAST_DELAY
:
2041 p
->anycast_delay
= nla_get_msecs(tbp
[i
]);
2043 case NDTPA_PROXY_DELAY
:
2044 p
->proxy_delay
= nla_get_msecs(tbp
[i
]);
2046 case NDTPA_LOCKTIME
:
2047 p
->locktime
= nla_get_msecs(tbp
[i
]);
2053 if (tb
[NDTA_THRESH1
])
2054 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
2056 if (tb
[NDTA_THRESH2
])
2057 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
2059 if (tb
[NDTA_THRESH3
])
2060 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
2062 if (tb
[NDTA_GC_INTERVAL
])
2063 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
2068 write_unlock_bh(&tbl
->lock
);
2070 read_unlock(&neigh_tbl_lock
);
2075 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2077 struct net
*net
= sock_net(skb
->sk
);
2078 int family
, tidx
, nidx
= 0;
2079 int tbl_skip
= cb
->args
[0];
2080 int neigh_skip
= cb
->args
[1];
2081 struct neigh_table
*tbl
;
2083 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2085 read_lock(&neigh_tbl_lock
);
2086 for (tbl
= neigh_tables
, tidx
= 0; tbl
; tbl
= tbl
->next
, tidx
++) {
2087 struct neigh_parms
*p
;
2089 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
2092 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).pid
,
2093 cb
->nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
2097 for (nidx
= 0, p
= tbl
->parms
.next
; p
; p
= p
->next
) {
2098 if (!net_eq(neigh_parms_net(p
), net
))
2101 if (nidx
< neigh_skip
)
2104 if (neightbl_fill_param_info(skb
, tbl
, p
,
2105 NETLINK_CB(cb
->skb
).pid
,
2117 read_unlock(&neigh_tbl_lock
);
2124 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
2125 u32 pid
, u32 seq
, int type
, unsigned int flags
)
2127 unsigned long now
= jiffies
;
2128 struct nda_cacheinfo ci
;
2129 struct nlmsghdr
*nlh
;
2132 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2136 ndm
= nlmsg_data(nlh
);
2137 ndm
->ndm_family
= neigh
->ops
->family
;
2140 ndm
->ndm_flags
= neigh
->flags
;
2141 ndm
->ndm_type
= neigh
->type
;
2142 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2144 if (nla_put(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
))
2145 goto nla_put_failure
;
2147 read_lock_bh(&neigh
->lock
);
2148 ndm
->ndm_state
= neigh
->nud_state
;
2149 if (neigh
->nud_state
& NUD_VALID
) {
2150 char haddr
[MAX_ADDR_LEN
];
2152 neigh_ha_snapshot(haddr
, neigh
, neigh
->dev
);
2153 if (nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, haddr
) < 0) {
2154 read_unlock_bh(&neigh
->lock
);
2155 goto nla_put_failure
;
2159 ci
.ndm_used
= jiffies_to_clock_t(now
- neigh
->used
);
2160 ci
.ndm_confirmed
= jiffies_to_clock_t(now
- neigh
->confirmed
);
2161 ci
.ndm_updated
= jiffies_to_clock_t(now
- neigh
->updated
);
2162 ci
.ndm_refcnt
= atomic_read(&neigh
->refcnt
) - 1;
2163 read_unlock_bh(&neigh
->lock
);
2165 if (nla_put_u32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
)) ||
2166 nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
2167 goto nla_put_failure
;
2169 return nlmsg_end(skb
, nlh
);
2172 nlmsg_cancel(skb
, nlh
);
2176 static int pneigh_fill_info(struct sk_buff
*skb
, struct pneigh_entry
*pn
,
2177 u32 pid
, u32 seq
, int type
, unsigned int flags
,
2178 struct neigh_table
*tbl
)
2180 struct nlmsghdr
*nlh
;
2183 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2187 ndm
= nlmsg_data(nlh
);
2188 ndm
->ndm_family
= tbl
->family
;
2191 ndm
->ndm_flags
= pn
->flags
| NTF_PROXY
;
2192 ndm
->ndm_type
= NDA_DST
;
2193 ndm
->ndm_ifindex
= pn
->dev
->ifindex
;
2194 ndm
->ndm_state
= NUD_NONE
;
2196 if (nla_put(skb
, NDA_DST
, tbl
->key_len
, pn
->key
))
2197 goto nla_put_failure
;
2199 return nlmsg_end(skb
, nlh
);
2202 nlmsg_cancel(skb
, nlh
);
2206 static void neigh_update_notify(struct neighbour
*neigh
)
2208 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2209 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0);
2212 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2213 struct netlink_callback
*cb
)
2215 struct net
*net
= sock_net(skb
->sk
);
2216 struct neighbour
*n
;
2217 int rc
, h
, s_h
= cb
->args
[1];
2218 int idx
, s_idx
= idx
= cb
->args
[2];
2219 struct neigh_hash_table
*nht
;
2222 nht
= rcu_dereference_bh(tbl
->nht
);
2224 for (h
= s_h
; h
< (1 << nht
->hash_shift
); h
++) {
2227 for (n
= rcu_dereference_bh(nht
->hash_buckets
[h
]), idx
= 0;
2229 n
= rcu_dereference_bh(n
->next
)) {
2230 if (!net_eq(dev_net(n
->dev
), net
))
2234 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
2237 NLM_F_MULTI
) <= 0) {
2247 rcu_read_unlock_bh();
2253 static int pneigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2254 struct netlink_callback
*cb
)
2256 struct pneigh_entry
*n
;
2257 struct net
*net
= sock_net(skb
->sk
);
2258 int rc
, h
, s_h
= cb
->args
[3];
2259 int idx
, s_idx
= idx
= cb
->args
[4];
2261 read_lock_bh(&tbl
->lock
);
2263 for (h
= s_h
; h
<= PNEIGH_HASHMASK
; h
++) {
2266 for (n
= tbl
->phash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2267 if (dev_net(n
->dev
) != net
)
2271 if (pneigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).pid
,
2274 NLM_F_MULTI
, tbl
) <= 0) {
2275 read_unlock_bh(&tbl
->lock
);
2284 read_unlock_bh(&tbl
->lock
);
2293 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2295 struct neigh_table
*tbl
;
2300 read_lock(&neigh_tbl_lock
);
2301 family
= ((struct rtgenmsg
*) nlmsg_data(cb
->nlh
))->rtgen_family
;
2303 /* check for full ndmsg structure presence, family member is
2304 * the same for both structures
2306 if (nlmsg_len(cb
->nlh
) >= sizeof(struct ndmsg
) &&
2307 ((struct ndmsg
*) nlmsg_data(cb
->nlh
))->ndm_flags
== NTF_PROXY
)
2312 for (tbl
= neigh_tables
, t
= 0; tbl
;
2313 tbl
= tbl
->next
, t
++) {
2314 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2317 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2318 sizeof(cb
->args
[0]));
2320 err
= pneigh_dump_table(tbl
, skb
, cb
);
2322 err
= neigh_dump_table(tbl
, skb
, cb
);
2326 read_unlock(&neigh_tbl_lock
);
2332 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2335 struct neigh_hash_table
*nht
;
2338 nht
= rcu_dereference_bh(tbl
->nht
);
2340 read_lock(&tbl
->lock
); /* avoid resizes */
2341 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2342 struct neighbour
*n
;
2344 for (n
= rcu_dereference_bh(nht
->hash_buckets
[chain
]);
2346 n
= rcu_dereference_bh(n
->next
))
2349 read_unlock(&tbl
->lock
);
2350 rcu_read_unlock_bh();
2352 EXPORT_SYMBOL(neigh_for_each
);
2354 /* The tbl->lock must be held as a writer and BH disabled. */
2355 void __neigh_for_each_release(struct neigh_table
*tbl
,
2356 int (*cb
)(struct neighbour
*))
2359 struct neigh_hash_table
*nht
;
2361 nht
= rcu_dereference_protected(tbl
->nht
,
2362 lockdep_is_held(&tbl
->lock
));
2363 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2364 struct neighbour
*n
;
2365 struct neighbour __rcu
**np
;
2367 np
= &nht
->hash_buckets
[chain
];
2368 while ((n
= rcu_dereference_protected(*np
,
2369 lockdep_is_held(&tbl
->lock
))) != NULL
) {
2372 write_lock(&n
->lock
);
2375 rcu_assign_pointer(*np
,
2376 rcu_dereference_protected(n
->next
,
2377 lockdep_is_held(&tbl
->lock
)));
2381 write_unlock(&n
->lock
);
2383 neigh_cleanup_and_release(n
);
2387 EXPORT_SYMBOL(__neigh_for_each_release
);
2389 #ifdef CONFIG_PROC_FS
2391 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
2393 struct neigh_seq_state
*state
= seq
->private;
2394 struct net
*net
= seq_file_net(seq
);
2395 struct neigh_hash_table
*nht
= state
->nht
;
2396 struct neighbour
*n
= NULL
;
2397 int bucket
= state
->bucket
;
2399 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
2400 for (bucket
= 0; bucket
< (1 << nht
->hash_shift
); bucket
++) {
2401 n
= rcu_dereference_bh(nht
->hash_buckets
[bucket
]);
2404 if (!net_eq(dev_net(n
->dev
), net
))
2406 if (state
->neigh_sub_iter
) {
2410 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
2414 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2416 if (n
->nud_state
& ~NUD_NOARP
)
2419 n
= rcu_dereference_bh(n
->next
);
2425 state
->bucket
= bucket
;
2430 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
2431 struct neighbour
*n
,
2434 struct neigh_seq_state
*state
= seq
->private;
2435 struct net
*net
= seq_file_net(seq
);
2436 struct neigh_hash_table
*nht
= state
->nht
;
2438 if (state
->neigh_sub_iter
) {
2439 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2443 n
= rcu_dereference_bh(n
->next
);
2447 if (!net_eq(dev_net(n
->dev
), net
))
2449 if (state
->neigh_sub_iter
) {
2450 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
2455 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
2458 if (n
->nud_state
& ~NUD_NOARP
)
2461 n
= rcu_dereference_bh(n
->next
);
2467 if (++state
->bucket
>= (1 << nht
->hash_shift
))
2470 n
= rcu_dereference_bh(nht
->hash_buckets
[state
->bucket
]);
2478 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2480 struct neighbour
*n
= neigh_get_first(seq
);
2485 n
= neigh_get_next(seq
, n
, pos
);
2490 return *pos
? NULL
: n
;
2493 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
2495 struct neigh_seq_state
*state
= seq
->private;
2496 struct net
*net
= seq_file_net(seq
);
2497 struct neigh_table
*tbl
= state
->tbl
;
2498 struct pneigh_entry
*pn
= NULL
;
2499 int bucket
= state
->bucket
;
2501 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
2502 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
2503 pn
= tbl
->phash_buckets
[bucket
];
2504 while (pn
&& !net_eq(pneigh_net(pn
), net
))
2509 state
->bucket
= bucket
;
2514 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
2515 struct pneigh_entry
*pn
,
2518 struct neigh_seq_state
*state
= seq
->private;
2519 struct net
*net
= seq_file_net(seq
);
2520 struct neigh_table
*tbl
= state
->tbl
;
2524 } while (pn
&& !net_eq(pneigh_net(pn
), net
));
2527 if (++state
->bucket
> PNEIGH_HASHMASK
)
2529 pn
= tbl
->phash_buckets
[state
->bucket
];
2530 while (pn
&& !net_eq(pneigh_net(pn
), net
))
2542 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
2544 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
2549 pn
= pneigh_get_next(seq
, pn
, pos
);
2554 return *pos
? NULL
: pn
;
2557 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
2559 struct neigh_seq_state
*state
= seq
->private;
2561 loff_t idxpos
= *pos
;
2563 rc
= neigh_get_idx(seq
, &idxpos
);
2564 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2565 rc
= pneigh_get_idx(seq
, &idxpos
);
2570 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
2573 struct neigh_seq_state
*state
= seq
->private;
2577 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
2580 state
->nht
= rcu_dereference_bh(tbl
->nht
);
2582 return *pos
? neigh_get_idx_any(seq
, pos
) : SEQ_START_TOKEN
;
2584 EXPORT_SYMBOL(neigh_seq_start
);
2586 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2588 struct neigh_seq_state
*state
;
2591 if (v
== SEQ_START_TOKEN
) {
2592 rc
= neigh_get_first(seq
);
2596 state
= seq
->private;
2597 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
2598 rc
= neigh_get_next(seq
, v
, NULL
);
2601 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
2602 rc
= pneigh_get_first(seq
);
2604 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
2605 rc
= pneigh_get_next(seq
, v
, NULL
);
2611 EXPORT_SYMBOL(neigh_seq_next
);
2613 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
2616 rcu_read_unlock_bh();
2618 EXPORT_SYMBOL(neigh_seq_stop
);
2620 /* statistics via seq_file */
2622 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2624 struct neigh_table
*tbl
= seq
->private;
2628 return SEQ_START_TOKEN
;
2630 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
2631 if (!cpu_possible(cpu
))
2634 return per_cpu_ptr(tbl
->stats
, cpu
);
2639 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2641 struct neigh_table
*tbl
= seq
->private;
2644 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
2645 if (!cpu_possible(cpu
))
2648 return per_cpu_ptr(tbl
->stats
, cpu
);
2653 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
2658 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
2660 struct neigh_table
*tbl
= seq
->private;
2661 struct neigh_statistics
*st
= v
;
2663 if (v
== SEQ_START_TOKEN
) {
2664 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2668 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2669 "%08lx %08lx %08lx %08lx %08lx\n",
2670 atomic_read(&tbl
->entries
),
2681 st
->rcv_probes_mcast
,
2682 st
->rcv_probes_ucast
,
2684 st
->periodic_gc_runs
,
2692 static const struct seq_operations neigh_stat_seq_ops
= {
2693 .start
= neigh_stat_seq_start
,
2694 .next
= neigh_stat_seq_next
,
2695 .stop
= neigh_stat_seq_stop
,
2696 .show
= neigh_stat_seq_show
,
2699 static int neigh_stat_seq_open(struct inode
*inode
, struct file
*file
)
2701 int ret
= seq_open(file
, &neigh_stat_seq_ops
);
2704 struct seq_file
*sf
= file
->private_data
;
2705 sf
->private = PDE(inode
)->data
;
2710 static const struct file_operations neigh_stat_seq_fops
= {
2711 .owner
= THIS_MODULE
,
2712 .open
= neigh_stat_seq_open
,
2714 .llseek
= seq_lseek
,
2715 .release
= seq_release
,
2718 #endif /* CONFIG_PROC_FS */
2720 static inline size_t neigh_nlmsg_size(void)
2722 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2723 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2724 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2725 + nla_total_size(sizeof(struct nda_cacheinfo
))
2726 + nla_total_size(4); /* NDA_PROBES */
2729 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
)
2731 struct net
*net
= dev_net(n
->dev
);
2732 struct sk_buff
*skb
;
2735 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
2739 err
= neigh_fill_info(skb
, n
, 0, 0, type
, flags
);
2741 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2742 WARN_ON(err
== -EMSGSIZE
);
2746 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
2750 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
2754 void neigh_app_ns(struct neighbour
*n
)
2756 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
);
2758 EXPORT_SYMBOL(neigh_app_ns
);
2759 #endif /* CONFIG_ARPD */
2761 #ifdef CONFIG_SYSCTL
2763 static int proc_unres_qlen(ctl_table
*ctl
, int write
, void __user
*buffer
,
2764 size_t *lenp
, loff_t
*ppos
)
2767 ctl_table tmp
= *ctl
;
2770 size
= DIV_ROUND_UP(*(int *)ctl
->data
, SKB_TRUESIZE(ETH_FRAME_LEN
));
2771 ret
= proc_dointvec(&tmp
, write
, buffer
, lenp
, ppos
);
2773 *(int *)ctl
->data
= size
* SKB_TRUESIZE(ETH_FRAME_LEN
);
2778 NEIGH_VAR_MCAST_PROBE
,
2779 NEIGH_VAR_UCAST_PROBE
,
2780 NEIGH_VAR_APP_PROBE
,
2781 NEIGH_VAR_RETRANS_TIME
,
2782 NEIGH_VAR_BASE_REACHABLE_TIME
,
2783 NEIGH_VAR_DELAY_PROBE_TIME
,
2784 NEIGH_VAR_GC_STALETIME
,
2785 NEIGH_VAR_QUEUE_LEN
,
2786 NEIGH_VAR_QUEUE_LEN_BYTES
,
2787 NEIGH_VAR_PROXY_QLEN
,
2788 NEIGH_VAR_ANYCAST_DELAY
,
2789 NEIGH_VAR_PROXY_DELAY
,
2791 NEIGH_VAR_RETRANS_TIME_MS
,
2792 NEIGH_VAR_BASE_REACHABLE_TIME_MS
,
2793 NEIGH_VAR_GC_INTERVAL
,
2794 NEIGH_VAR_GC_THRESH1
,
2795 NEIGH_VAR_GC_THRESH2
,
2796 NEIGH_VAR_GC_THRESH3
,
2800 static struct neigh_sysctl_table
{
2801 struct ctl_table_header
*sysctl_header
;
2802 struct ctl_table neigh_vars
[NEIGH_VAR_MAX
+ 1];
2803 } neigh_sysctl_template __read_mostly
= {
2805 [NEIGH_VAR_MCAST_PROBE
] = {
2806 .procname
= "mcast_solicit",
2807 .maxlen
= sizeof(int),
2809 .proc_handler
= proc_dointvec
,
2811 [NEIGH_VAR_UCAST_PROBE
] = {
2812 .procname
= "ucast_solicit",
2813 .maxlen
= sizeof(int),
2815 .proc_handler
= proc_dointvec
,
2817 [NEIGH_VAR_APP_PROBE
] = {
2818 .procname
= "app_solicit",
2819 .maxlen
= sizeof(int),
2821 .proc_handler
= proc_dointvec
,
2823 [NEIGH_VAR_RETRANS_TIME
] = {
2824 .procname
= "retrans_time",
2825 .maxlen
= sizeof(int),
2827 .proc_handler
= proc_dointvec_userhz_jiffies
,
2829 [NEIGH_VAR_BASE_REACHABLE_TIME
] = {
2830 .procname
= "base_reachable_time",
2831 .maxlen
= sizeof(int),
2833 .proc_handler
= proc_dointvec_jiffies
,
2835 [NEIGH_VAR_DELAY_PROBE_TIME
] = {
2836 .procname
= "delay_first_probe_time",
2837 .maxlen
= sizeof(int),
2839 .proc_handler
= proc_dointvec_jiffies
,
2841 [NEIGH_VAR_GC_STALETIME
] = {
2842 .procname
= "gc_stale_time",
2843 .maxlen
= sizeof(int),
2845 .proc_handler
= proc_dointvec_jiffies
,
2847 [NEIGH_VAR_QUEUE_LEN
] = {
2848 .procname
= "unres_qlen",
2849 .maxlen
= sizeof(int),
2851 .proc_handler
= proc_unres_qlen
,
2853 [NEIGH_VAR_QUEUE_LEN_BYTES
] = {
2854 .procname
= "unres_qlen_bytes",
2855 .maxlen
= sizeof(int),
2857 .proc_handler
= proc_dointvec
,
2859 [NEIGH_VAR_PROXY_QLEN
] = {
2860 .procname
= "proxy_qlen",
2861 .maxlen
= sizeof(int),
2863 .proc_handler
= proc_dointvec
,
2865 [NEIGH_VAR_ANYCAST_DELAY
] = {
2866 .procname
= "anycast_delay",
2867 .maxlen
= sizeof(int),
2869 .proc_handler
= proc_dointvec_userhz_jiffies
,
2871 [NEIGH_VAR_PROXY_DELAY
] = {
2872 .procname
= "proxy_delay",
2873 .maxlen
= sizeof(int),
2875 .proc_handler
= proc_dointvec_userhz_jiffies
,
2877 [NEIGH_VAR_LOCKTIME
] = {
2878 .procname
= "locktime",
2879 .maxlen
= sizeof(int),
2881 .proc_handler
= proc_dointvec_userhz_jiffies
,
2883 [NEIGH_VAR_RETRANS_TIME_MS
] = {
2884 .procname
= "retrans_time_ms",
2885 .maxlen
= sizeof(int),
2887 .proc_handler
= proc_dointvec_ms_jiffies
,
2889 [NEIGH_VAR_BASE_REACHABLE_TIME_MS
] = {
2890 .procname
= "base_reachable_time_ms",
2891 .maxlen
= sizeof(int),
2893 .proc_handler
= proc_dointvec_ms_jiffies
,
2895 [NEIGH_VAR_GC_INTERVAL
] = {
2896 .procname
= "gc_interval",
2897 .maxlen
= sizeof(int),
2899 .proc_handler
= proc_dointvec_jiffies
,
2901 [NEIGH_VAR_GC_THRESH1
] = {
2902 .procname
= "gc_thresh1",
2903 .maxlen
= sizeof(int),
2905 .proc_handler
= proc_dointvec
,
2907 [NEIGH_VAR_GC_THRESH2
] = {
2908 .procname
= "gc_thresh2",
2909 .maxlen
= sizeof(int),
2911 .proc_handler
= proc_dointvec
,
2913 [NEIGH_VAR_GC_THRESH3
] = {
2914 .procname
= "gc_thresh3",
2915 .maxlen
= sizeof(int),
2917 .proc_handler
= proc_dointvec
,
2923 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
2924 char *p_name
, proc_handler
*handler
)
2926 struct neigh_sysctl_table
*t
;
2927 const char *dev_name_source
= NULL
;
2928 char neigh_path
[ sizeof("net//neigh/") + IFNAMSIZ
+ IFNAMSIZ
];
2930 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
2934 t
->neigh_vars
[NEIGH_VAR_MCAST_PROBE
].data
= &p
->mcast_probes
;
2935 t
->neigh_vars
[NEIGH_VAR_UCAST_PROBE
].data
= &p
->ucast_probes
;
2936 t
->neigh_vars
[NEIGH_VAR_APP_PROBE
].data
= &p
->app_probes
;
2937 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].data
= &p
->retrans_time
;
2938 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].data
= &p
->base_reachable_time
;
2939 t
->neigh_vars
[NEIGH_VAR_DELAY_PROBE_TIME
].data
= &p
->delay_probe_time
;
2940 t
->neigh_vars
[NEIGH_VAR_GC_STALETIME
].data
= &p
->gc_staletime
;
2941 t
->neigh_vars
[NEIGH_VAR_QUEUE_LEN
].data
= &p
->queue_len_bytes
;
2942 t
->neigh_vars
[NEIGH_VAR_QUEUE_LEN_BYTES
].data
= &p
->queue_len_bytes
;
2943 t
->neigh_vars
[NEIGH_VAR_PROXY_QLEN
].data
= &p
->proxy_qlen
;
2944 t
->neigh_vars
[NEIGH_VAR_ANYCAST_DELAY
].data
= &p
->anycast_delay
;
2945 t
->neigh_vars
[NEIGH_VAR_PROXY_DELAY
].data
= &p
->proxy_delay
;
2946 t
->neigh_vars
[NEIGH_VAR_LOCKTIME
].data
= &p
->locktime
;
2947 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].data
= &p
->retrans_time
;
2948 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].data
= &p
->base_reachable_time
;
2951 dev_name_source
= dev
->name
;
2952 /* Terminate the table early */
2953 memset(&t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
], 0,
2954 sizeof(t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
]));
2956 dev_name_source
= "default";
2957 t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
].data
= (int *)(p
+ 1);
2958 t
->neigh_vars
[NEIGH_VAR_GC_THRESH1
].data
= (int *)(p
+ 1) + 1;
2959 t
->neigh_vars
[NEIGH_VAR_GC_THRESH2
].data
= (int *)(p
+ 1) + 2;
2960 t
->neigh_vars
[NEIGH_VAR_GC_THRESH3
].data
= (int *)(p
+ 1) + 3;
2966 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].proc_handler
= handler
;
2967 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].extra1
= dev
;
2969 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
= handler
;
2970 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].extra1
= dev
;
2971 /* RetransTime (in milliseconds)*/
2972 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].proc_handler
= handler
;
2973 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].extra1
= dev
;
2974 /* ReachableTime (in milliseconds) */
2975 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
= handler
;
2976 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].extra1
= dev
;
2979 snprintf(neigh_path
, sizeof(neigh_path
), "net/%s/neigh/%s",
2980 p_name
, dev_name_source
);
2982 register_net_sysctl(neigh_parms_net(p
), neigh_path
, t
->neigh_vars
);
2983 if (!t
->sysctl_header
)
2986 p
->sysctl_table
= t
;
2994 EXPORT_SYMBOL(neigh_sysctl_register
);
2996 void neigh_sysctl_unregister(struct neigh_parms
*p
)
2998 if (p
->sysctl_table
) {
2999 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
3000 p
->sysctl_table
= NULL
;
3001 unregister_net_sysctl_table(t
->sysctl_header
);
3005 EXPORT_SYMBOL(neigh_sysctl_unregister
);
3007 #endif /* CONFIG_SYSCTL */
3009 static int __init
neigh_init(void)
3011 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
, NULL
);
3012 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
, NULL
);
3013 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, NULL
, neigh_dump_info
, NULL
);
3015 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
,
3017 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
, NULL
);
3022 subsys_initcall(neigh_init
);