neigh: Kill neigh_ops->hh_output
[deliverable/linux.git] / net / core / neighbour.c
... / ...
CommitLineData
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#include <linux/slab.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/socket.h>
23#include <linux/netdevice.h>
24#include <linux/proc_fs.h>
25#ifdef CONFIG_SYSCTL
26#include <linux/sysctl.h>
27#endif
28#include <linux/times.h>
29#include <net/net_namespace.h>
30#include <net/neighbour.h>
31#include <net/dst.h>
32#include <net/sock.h>
33#include <net/netevent.h>
34#include <net/netlink.h>
35#include <linux/rtnetlink.h>
36#include <linux/random.h>
37#include <linux/string.h>
38#include <linux/log2.h>
39
40#define NEIGH_DEBUG 1
41
42#define NEIGH_PRINTK(x...) printk(x)
43#define NEIGH_NOPRINTK(x...) do { ; } while(0)
44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47#if NEIGH_DEBUG >= 1
48#undef NEIGH_PRINTK1
49#define NEIGH_PRINTK1 NEIGH_PRINTK
50#endif
51#if NEIGH_DEBUG >= 2
52#undef NEIGH_PRINTK2
53#define NEIGH_PRINTK2 NEIGH_PRINTK
54#endif
55
56#define PNEIGH_HASHMASK 0xF
57
58static void neigh_timer_handler(unsigned long arg);
59static void __neigh_notify(struct neighbour *n, int type, int flags);
60static void neigh_update_notify(struct neighbour *neigh);
61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63static struct neigh_table *neigh_tables;
64#ifdef CONFIG_PROC_FS
65static const struct file_operations neigh_stat_seq_fops;
66#endif
67
68/*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99static DEFINE_RWLOCK(neigh_tbl_lock);
100
101static int neigh_blackhole(struct sk_buff *skb)
102{
103 kfree_skb(skb);
104 return -ENETDOWN;
105}
106
107static void neigh_cleanup_and_release(struct neighbour *neigh)
108{
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114}
115
116/*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122unsigned long neigh_rand_reach_time(unsigned long base)
123{
124 return base ? (net_random() % base) + (base >> 1) : 0;
125}
126EXPORT_SYMBOL(neigh_rand_reach_time);
127
128
129static int neigh_forced_gc(struct neigh_table *tbl)
130{
131 int shrunk = 0;
132 int i;
133 struct neigh_hash_table *nht;
134
135 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
136
137 write_lock_bh(&tbl->lock);
138 nht = rcu_dereference_protected(tbl->nht,
139 lockdep_is_held(&tbl->lock));
140 for (i = 0; i < (1 << nht->hash_shift); i++) {
141 struct neighbour *n;
142 struct neighbour __rcu **np;
143
144 np = &nht->hash_buckets[i];
145 while ((n = rcu_dereference_protected(*np,
146 lockdep_is_held(&tbl->lock))) != NULL) {
147 /* Neighbour record may be discarded if:
148 * - nobody refers to it.
149 * - it is not permanent
150 */
151 write_lock(&n->lock);
152 if (atomic_read(&n->refcnt) == 1 &&
153 !(n->nud_state & NUD_PERMANENT)) {
154 rcu_assign_pointer(*np,
155 rcu_dereference_protected(n->next,
156 lockdep_is_held(&tbl->lock)));
157 n->dead = 1;
158 shrunk = 1;
159 write_unlock(&n->lock);
160 neigh_cleanup_and_release(n);
161 continue;
162 }
163 write_unlock(&n->lock);
164 np = &n->next;
165 }
166 }
167
168 tbl->last_flush = jiffies;
169
170 write_unlock_bh(&tbl->lock);
171
172 return shrunk;
173}
174
175static void neigh_add_timer(struct neighbour *n, unsigned long when)
176{
177 neigh_hold(n);
178 if (unlikely(mod_timer(&n->timer, when))) {
179 printk("NEIGH: BUG, double timer add, state is %x\n",
180 n->nud_state);
181 dump_stack();
182 }
183}
184
185static int neigh_del_timer(struct neighbour *n)
186{
187 if ((n->nud_state & NUD_IN_TIMER) &&
188 del_timer(&n->timer)) {
189 neigh_release(n);
190 return 1;
191 }
192 return 0;
193}
194
195static void pneigh_queue_purge(struct sk_buff_head *list)
196{
197 struct sk_buff *skb;
198
199 while ((skb = skb_dequeue(list)) != NULL) {
200 dev_put(skb->dev);
201 kfree_skb(skb);
202 }
203}
204
205static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
206{
207 int i;
208 struct neigh_hash_table *nht;
209
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
212
213 for (i = 0; i < (1 << nht->hash_shift); i++) {
214 struct neighbour *n;
215 struct neighbour __rcu **np = &nht->hash_buckets[i];
216
217 while ((n = rcu_dereference_protected(*np,
218 lockdep_is_held(&tbl->lock))) != NULL) {
219 if (dev && n->dev != dev) {
220 np = &n->next;
221 continue;
222 }
223 rcu_assign_pointer(*np,
224 rcu_dereference_protected(n->next,
225 lockdep_is_held(&tbl->lock)));
226 write_lock(&n->lock);
227 neigh_del_timer(n);
228 n->dead = 1;
229
230 if (atomic_read(&n->refcnt) != 1) {
231 /* The most unpleasant situation.
232 We must destroy neighbour entry,
233 but someone still uses it.
234
235 The destroy will be delayed until
236 the last user releases us, but
237 we must kill timers etc. and move
238 it to safe state.
239 */
240 skb_queue_purge(&n->arp_queue);
241 n->output = neigh_blackhole;
242 if (n->nud_state & NUD_VALID)
243 n->nud_state = NUD_NOARP;
244 else
245 n->nud_state = NUD_NONE;
246 NEIGH_PRINTK2("neigh %p is stray.\n", n);
247 }
248 write_unlock(&n->lock);
249 neigh_cleanup_and_release(n);
250 }
251 }
252}
253
254void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
255{
256 write_lock_bh(&tbl->lock);
257 neigh_flush_dev(tbl, dev);
258 write_unlock_bh(&tbl->lock);
259}
260EXPORT_SYMBOL(neigh_changeaddr);
261
262int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
263{
264 write_lock_bh(&tbl->lock);
265 neigh_flush_dev(tbl, dev);
266 pneigh_ifdown(tbl, dev);
267 write_unlock_bh(&tbl->lock);
268
269 del_timer_sync(&tbl->proxy_timer);
270 pneigh_queue_purge(&tbl->proxy_queue);
271 return 0;
272}
273EXPORT_SYMBOL(neigh_ifdown);
274
275static struct neighbour *neigh_alloc(struct neigh_table *tbl)
276{
277 struct neighbour *n = NULL;
278 unsigned long now = jiffies;
279 int entries;
280
281 entries = atomic_inc_return(&tbl->entries) - 1;
282 if (entries >= tbl->gc_thresh3 ||
283 (entries >= tbl->gc_thresh2 &&
284 time_after(now, tbl->last_flush + 5 * HZ))) {
285 if (!neigh_forced_gc(tbl) &&
286 entries >= tbl->gc_thresh3)
287 goto out_entries;
288 }
289
290 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
291 if (!n)
292 goto out_entries;
293
294 skb_queue_head_init(&n->arp_queue);
295 rwlock_init(&n->lock);
296 seqlock_init(&n->ha_lock);
297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole;
300 seqlock_init(&n->hh.hh_lock);
301 n->parms = neigh_parms_clone(&tbl->parms);
302 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
303
304 NEIGH_CACHE_STAT_INC(tbl, allocs);
305 n->tbl = tbl;
306 atomic_set(&n->refcnt, 1);
307 n->dead = 1;
308out:
309 return n;
310
311out_entries:
312 atomic_dec(&tbl->entries);
313 goto out;
314}
315
316static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
317{
318 size_t size = (1 << shift) * sizeof(struct neighbour *);
319 struct neigh_hash_table *ret;
320 struct neighbour __rcu **buckets;
321
322 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
323 if (!ret)
324 return NULL;
325 if (size <= PAGE_SIZE)
326 buckets = kzalloc(size, GFP_ATOMIC);
327 else
328 buckets = (struct neighbour __rcu **)
329 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
330 get_order(size));
331 if (!buckets) {
332 kfree(ret);
333 return NULL;
334 }
335 ret->hash_buckets = buckets;
336 ret->hash_shift = shift;
337 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
338 ret->hash_rnd |= 1;
339 return ret;
340}
341
342static void neigh_hash_free_rcu(struct rcu_head *head)
343{
344 struct neigh_hash_table *nht = container_of(head,
345 struct neigh_hash_table,
346 rcu);
347 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
348 struct neighbour __rcu **buckets = nht->hash_buckets;
349
350 if (size <= PAGE_SIZE)
351 kfree(buckets);
352 else
353 free_pages((unsigned long)buckets, get_order(size));
354 kfree(nht);
355}
356
357static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
358 unsigned long new_shift)
359{
360 unsigned int i, hash;
361 struct neigh_hash_table *new_nht, *old_nht;
362
363 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
364
365 old_nht = rcu_dereference_protected(tbl->nht,
366 lockdep_is_held(&tbl->lock));
367 new_nht = neigh_hash_alloc(new_shift);
368 if (!new_nht)
369 return old_nht;
370
371 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
372 struct neighbour *n, *next;
373
374 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
375 lockdep_is_held(&tbl->lock));
376 n != NULL;
377 n = next) {
378 hash = tbl->hash(n->primary_key, n->dev,
379 new_nht->hash_rnd);
380
381 hash >>= (32 - new_nht->hash_shift);
382 next = rcu_dereference_protected(n->next,
383 lockdep_is_held(&tbl->lock));
384
385 rcu_assign_pointer(n->next,
386 rcu_dereference_protected(
387 new_nht->hash_buckets[hash],
388 lockdep_is_held(&tbl->lock)));
389 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
390 }
391 }
392
393 rcu_assign_pointer(tbl->nht, new_nht);
394 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
395 return new_nht;
396}
397
398struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
399 struct net_device *dev)
400{
401 struct neighbour *n;
402 int key_len = tbl->key_len;
403 u32 hash_val;
404 struct neigh_hash_table *nht;
405
406 NEIGH_CACHE_STAT_INC(tbl, lookups);
407
408 rcu_read_lock_bh();
409 nht = rcu_dereference_bh(tbl->nht);
410 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
411
412 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
413 n != NULL;
414 n = rcu_dereference_bh(n->next)) {
415 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
416 if (!atomic_inc_not_zero(&n->refcnt))
417 n = NULL;
418 NEIGH_CACHE_STAT_INC(tbl, hits);
419 break;
420 }
421 }
422
423 rcu_read_unlock_bh();
424 return n;
425}
426EXPORT_SYMBOL(neigh_lookup);
427
428struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
429 const void *pkey)
430{
431 struct neighbour *n;
432 int key_len = tbl->key_len;
433 u32 hash_val;
434 struct neigh_hash_table *nht;
435
436 NEIGH_CACHE_STAT_INC(tbl, lookups);
437
438 rcu_read_lock_bh();
439 nht = rcu_dereference_bh(tbl->nht);
440 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
441
442 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
443 n != NULL;
444 n = rcu_dereference_bh(n->next)) {
445 if (!memcmp(n->primary_key, pkey, key_len) &&
446 net_eq(dev_net(n->dev), net)) {
447 if (!atomic_inc_not_zero(&n->refcnt))
448 n = NULL;
449 NEIGH_CACHE_STAT_INC(tbl, hits);
450 break;
451 }
452 }
453
454 rcu_read_unlock_bh();
455 return n;
456}
457EXPORT_SYMBOL(neigh_lookup_nodev);
458
459struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
460 struct net_device *dev)
461{
462 u32 hash_val;
463 int key_len = tbl->key_len;
464 int error;
465 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
466 struct neigh_hash_table *nht;
467
468 if (!n) {
469 rc = ERR_PTR(-ENOBUFS);
470 goto out;
471 }
472
473 memcpy(n->primary_key, pkey, key_len);
474 n->dev = dev;
475 dev_hold(dev);
476
477 /* Protocol specific setup. */
478 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
479 rc = ERR_PTR(error);
480 goto out_neigh_release;
481 }
482
483 /* Device specific setup. */
484 if (n->parms->neigh_setup &&
485 (error = n->parms->neigh_setup(n)) < 0) {
486 rc = ERR_PTR(error);
487 goto out_neigh_release;
488 }
489
490 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
491
492 write_lock_bh(&tbl->lock);
493 nht = rcu_dereference_protected(tbl->nht,
494 lockdep_is_held(&tbl->lock));
495
496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
498
499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
500
501 if (n->parms->dead) {
502 rc = ERR_PTR(-EINVAL);
503 goto out_tbl_unlock;
504 }
505
506 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
507 lockdep_is_held(&tbl->lock));
508 n1 != NULL;
509 n1 = rcu_dereference_protected(n1->next,
510 lockdep_is_held(&tbl->lock))) {
511 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
512 neigh_hold(n1);
513 rc = n1;
514 goto out_tbl_unlock;
515 }
516 }
517
518 n->dead = 0;
519 neigh_hold(n);
520 rcu_assign_pointer(n->next,
521 rcu_dereference_protected(nht->hash_buckets[hash_val],
522 lockdep_is_held(&tbl->lock)));
523 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
524 write_unlock_bh(&tbl->lock);
525 NEIGH_PRINTK2("neigh %p is created.\n", n);
526 rc = n;
527out:
528 return rc;
529out_tbl_unlock:
530 write_unlock_bh(&tbl->lock);
531out_neigh_release:
532 neigh_release(n);
533 goto out;
534}
535EXPORT_SYMBOL(neigh_create);
536
537static u32 pneigh_hash(const void *pkey, int key_len)
538{
539 u32 hash_val = *(u32 *)(pkey + key_len - 4);
540 hash_val ^= (hash_val >> 16);
541 hash_val ^= hash_val >> 8;
542 hash_val ^= hash_val >> 4;
543 hash_val &= PNEIGH_HASHMASK;
544 return hash_val;
545}
546
547static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
548 struct net *net,
549 const void *pkey,
550 int key_len,
551 struct net_device *dev)
552{
553 while (n) {
554 if (!memcmp(n->key, pkey, key_len) &&
555 net_eq(pneigh_net(n), net) &&
556 (n->dev == dev || !n->dev))
557 return n;
558 n = n->next;
559 }
560 return NULL;
561}
562
563struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
564 struct net *net, const void *pkey, struct net_device *dev)
565{
566 int key_len = tbl->key_len;
567 u32 hash_val = pneigh_hash(pkey, key_len);
568
569 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
570 net, pkey, key_len, dev);
571}
572EXPORT_SYMBOL_GPL(__pneigh_lookup);
573
574struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
575 struct net *net, const void *pkey,
576 struct net_device *dev, int creat)
577{
578 struct pneigh_entry *n;
579 int key_len = tbl->key_len;
580 u32 hash_val = pneigh_hash(pkey, key_len);
581
582 read_lock_bh(&tbl->lock);
583 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
584 net, pkey, key_len, dev);
585 read_unlock_bh(&tbl->lock);
586
587 if (n || !creat)
588 goto out;
589
590 ASSERT_RTNL();
591
592 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
593 if (!n)
594 goto out;
595
596 write_pnet(&n->net, hold_net(net));
597 memcpy(n->key, pkey, key_len);
598 n->dev = dev;
599 if (dev)
600 dev_hold(dev);
601
602 if (tbl->pconstructor && tbl->pconstructor(n)) {
603 if (dev)
604 dev_put(dev);
605 release_net(net);
606 kfree(n);
607 n = NULL;
608 goto out;
609 }
610
611 write_lock_bh(&tbl->lock);
612 n->next = tbl->phash_buckets[hash_val];
613 tbl->phash_buckets[hash_val] = n;
614 write_unlock_bh(&tbl->lock);
615out:
616 return n;
617}
618EXPORT_SYMBOL(pneigh_lookup);
619
620
621int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
622 struct net_device *dev)
623{
624 struct pneigh_entry *n, **np;
625 int key_len = tbl->key_len;
626 u32 hash_val = pneigh_hash(pkey, key_len);
627
628 write_lock_bh(&tbl->lock);
629 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
630 np = &n->next) {
631 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
632 net_eq(pneigh_net(n), net)) {
633 *np = n->next;
634 write_unlock_bh(&tbl->lock);
635 if (tbl->pdestructor)
636 tbl->pdestructor(n);
637 if (n->dev)
638 dev_put(n->dev);
639 release_net(pneigh_net(n));
640 kfree(n);
641 return 0;
642 }
643 }
644 write_unlock_bh(&tbl->lock);
645 return -ENOENT;
646}
647
648static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
649{
650 struct pneigh_entry *n, **np;
651 u32 h;
652
653 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
654 np = &tbl->phash_buckets[h];
655 while ((n = *np) != NULL) {
656 if (!dev || n->dev == dev) {
657 *np = n->next;
658 if (tbl->pdestructor)
659 tbl->pdestructor(n);
660 if (n->dev)
661 dev_put(n->dev);
662 release_net(pneigh_net(n));
663 kfree(n);
664 continue;
665 }
666 np = &n->next;
667 }
668 }
669 return -ENOENT;
670}
671
672static void neigh_parms_destroy(struct neigh_parms *parms);
673
674static inline void neigh_parms_put(struct neigh_parms *parms)
675{
676 if (atomic_dec_and_test(&parms->refcnt))
677 neigh_parms_destroy(parms);
678}
679
680static void neigh_destroy_rcu(struct rcu_head *head)
681{
682 struct neighbour *neigh = container_of(head, struct neighbour, rcu);
683
684 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
685}
686/*
687 * neighbour must already be out of the table;
688 *
689 */
690void neigh_destroy(struct neighbour *neigh)
691{
692 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
693
694 if (!neigh->dead) {
695 printk(KERN_WARNING
696 "Destroying alive neighbour %p\n", neigh);
697 dump_stack();
698 return;
699 }
700
701 if (neigh_del_timer(neigh))
702 printk(KERN_WARNING "Impossible event.\n");
703
704 skb_queue_purge(&neigh->arp_queue);
705
706 dev_put(neigh->dev);
707 neigh_parms_put(neigh->parms);
708
709 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
710
711 atomic_dec(&neigh->tbl->entries);
712 call_rcu(&neigh->rcu, neigh_destroy_rcu);
713}
714EXPORT_SYMBOL(neigh_destroy);
715
716/* Neighbour state is suspicious;
717 disable fast path.
718
719 Called with write_locked neigh.
720 */
721static void neigh_suspect(struct neighbour *neigh)
722{
723 struct hh_cache *hh;
724
725 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
726
727 neigh->output = neigh->ops->output;
728
729 hh = &neigh->hh;
730 if (hh->hh_len)
731 hh->hh_output = neigh->ops->output;
732}
733
734/* Neighbour state is OK;
735 enable fast path.
736
737 Called with write_locked neigh.
738 */
739static void neigh_connect(struct neighbour *neigh)
740{
741 struct hh_cache *hh;
742
743 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
744
745 neigh->output = neigh->ops->connected_output;
746
747 hh = &neigh->hh;
748 if (hh->hh_len)
749 hh->hh_output = dev_queue_xmit;
750}
751
752static void neigh_periodic_work(struct work_struct *work)
753{
754 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
755 struct neighbour *n;
756 struct neighbour __rcu **np;
757 unsigned int i;
758 struct neigh_hash_table *nht;
759
760 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
761
762 write_lock_bh(&tbl->lock);
763 nht = rcu_dereference_protected(tbl->nht,
764 lockdep_is_held(&tbl->lock));
765
766 /*
767 * periodically recompute ReachableTime from random function
768 */
769
770 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
771 struct neigh_parms *p;
772 tbl->last_rand = jiffies;
773 for (p = &tbl->parms; p; p = p->next)
774 p->reachable_time =
775 neigh_rand_reach_time(p->base_reachable_time);
776 }
777
778 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
779 np = &nht->hash_buckets[i];
780
781 while ((n = rcu_dereference_protected(*np,
782 lockdep_is_held(&tbl->lock))) != NULL) {
783 unsigned int state;
784
785 write_lock(&n->lock);
786
787 state = n->nud_state;
788 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
789 write_unlock(&n->lock);
790 goto next_elt;
791 }
792
793 if (time_before(n->used, n->confirmed))
794 n->used = n->confirmed;
795
796 if (atomic_read(&n->refcnt) == 1 &&
797 (state == NUD_FAILED ||
798 time_after(jiffies, n->used + n->parms->gc_staletime))) {
799 *np = n->next;
800 n->dead = 1;
801 write_unlock(&n->lock);
802 neigh_cleanup_and_release(n);
803 continue;
804 }
805 write_unlock(&n->lock);
806
807next_elt:
808 np = &n->next;
809 }
810 /*
811 * It's fine to release lock here, even if hash table
812 * grows while we are preempted.
813 */
814 write_unlock_bh(&tbl->lock);
815 cond_resched();
816 write_lock_bh(&tbl->lock);
817 }
818 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
819 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
820 * base_reachable_time.
821 */
822 schedule_delayed_work(&tbl->gc_work,
823 tbl->parms.base_reachable_time >> 1);
824 write_unlock_bh(&tbl->lock);
825}
826
827static __inline__ int neigh_max_probes(struct neighbour *n)
828{
829 struct neigh_parms *p = n->parms;
830 return (n->nud_state & NUD_PROBE) ?
831 p->ucast_probes :
832 p->ucast_probes + p->app_probes + p->mcast_probes;
833}
834
835static void neigh_invalidate(struct neighbour *neigh)
836 __releases(neigh->lock)
837 __acquires(neigh->lock)
838{
839 struct sk_buff *skb;
840
841 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
842 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
843 neigh->updated = jiffies;
844
845 /* It is very thin place. report_unreachable is very complicated
846 routine. Particularly, it can hit the same neighbour entry!
847
848 So that, we try to be accurate and avoid dead loop. --ANK
849 */
850 while (neigh->nud_state == NUD_FAILED &&
851 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
852 write_unlock(&neigh->lock);
853 neigh->ops->error_report(neigh, skb);
854 write_lock(&neigh->lock);
855 }
856 skb_queue_purge(&neigh->arp_queue);
857}
858
859/* Called when a timer expires for a neighbour entry. */
860
861static void neigh_timer_handler(unsigned long arg)
862{
863 unsigned long now, next;
864 struct neighbour *neigh = (struct neighbour *)arg;
865 unsigned state;
866 int notify = 0;
867
868 write_lock(&neigh->lock);
869
870 state = neigh->nud_state;
871 now = jiffies;
872 next = now + HZ;
873
874 if (!(state & NUD_IN_TIMER)) {
875#ifndef CONFIG_SMP
876 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
877#endif
878 goto out;
879 }
880
881 if (state & NUD_REACHABLE) {
882 if (time_before_eq(now,
883 neigh->confirmed + neigh->parms->reachable_time)) {
884 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
885 next = neigh->confirmed + neigh->parms->reachable_time;
886 } else if (time_before_eq(now,
887 neigh->used + neigh->parms->delay_probe_time)) {
888 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
889 neigh->nud_state = NUD_DELAY;
890 neigh->updated = jiffies;
891 neigh_suspect(neigh);
892 next = now + neigh->parms->delay_probe_time;
893 } else {
894 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
895 neigh->nud_state = NUD_STALE;
896 neigh->updated = jiffies;
897 neigh_suspect(neigh);
898 notify = 1;
899 }
900 } else if (state & NUD_DELAY) {
901 if (time_before_eq(now,
902 neigh->confirmed + neigh->parms->delay_probe_time)) {
903 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
904 neigh->nud_state = NUD_REACHABLE;
905 neigh->updated = jiffies;
906 neigh_connect(neigh);
907 notify = 1;
908 next = neigh->confirmed + neigh->parms->reachable_time;
909 } else {
910 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
911 neigh->nud_state = NUD_PROBE;
912 neigh->updated = jiffies;
913 atomic_set(&neigh->probes, 0);
914 next = now + neigh->parms->retrans_time;
915 }
916 } else {
917 /* NUD_PROBE|NUD_INCOMPLETE */
918 next = now + neigh->parms->retrans_time;
919 }
920
921 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
922 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
923 neigh->nud_state = NUD_FAILED;
924 notify = 1;
925 neigh_invalidate(neigh);
926 }
927
928 if (neigh->nud_state & NUD_IN_TIMER) {
929 if (time_before(next, jiffies + HZ/2))
930 next = jiffies + HZ/2;
931 if (!mod_timer(&neigh->timer, next))
932 neigh_hold(neigh);
933 }
934 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
935 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
936 /* keep skb alive even if arp_queue overflows */
937 if (skb)
938 skb = skb_copy(skb, GFP_ATOMIC);
939 write_unlock(&neigh->lock);
940 neigh->ops->solicit(neigh, skb);
941 atomic_inc(&neigh->probes);
942 kfree_skb(skb);
943 } else {
944out:
945 write_unlock(&neigh->lock);
946 }
947
948 if (notify)
949 neigh_update_notify(neigh);
950
951 neigh_release(neigh);
952}
953
954int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
955{
956 int rc;
957 unsigned long now;
958
959 write_lock_bh(&neigh->lock);
960
961 rc = 0;
962 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
963 goto out_unlock_bh;
964
965 now = jiffies;
966
967 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
968 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
969 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
970 neigh->nud_state = NUD_INCOMPLETE;
971 neigh->updated = jiffies;
972 neigh_add_timer(neigh, now + 1);
973 } else {
974 neigh->nud_state = NUD_FAILED;
975 neigh->updated = jiffies;
976 write_unlock_bh(&neigh->lock);
977
978 kfree_skb(skb);
979 return 1;
980 }
981 } else if (neigh->nud_state & NUD_STALE) {
982 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
983 neigh->nud_state = NUD_DELAY;
984 neigh->updated = jiffies;
985 neigh_add_timer(neigh,
986 jiffies + neigh->parms->delay_probe_time);
987 }
988
989 if (neigh->nud_state == NUD_INCOMPLETE) {
990 if (skb) {
991 if (skb_queue_len(&neigh->arp_queue) >=
992 neigh->parms->queue_len) {
993 struct sk_buff *buff;
994 buff = __skb_dequeue(&neigh->arp_queue);
995 kfree_skb(buff);
996 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
997 }
998 skb_dst_force(skb);
999 __skb_queue_tail(&neigh->arp_queue, skb);
1000 }
1001 rc = 1;
1002 }
1003out_unlock_bh:
1004 write_unlock_bh(&neigh->lock);
1005 return rc;
1006}
1007EXPORT_SYMBOL(__neigh_event_send);
1008
1009static void neigh_update_hhs(struct neighbour *neigh)
1010{
1011 struct hh_cache *hh;
1012 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1013 = NULL;
1014
1015 if (neigh->dev->header_ops)
1016 update = neigh->dev->header_ops->cache_update;
1017
1018 if (update) {
1019 hh = &neigh->hh;
1020 if (hh->hh_len) {
1021 write_seqlock_bh(&hh->hh_lock);
1022 update(hh, neigh->dev, neigh->ha);
1023 write_sequnlock_bh(&hh->hh_lock);
1024 }
1025 }
1026}
1027
1028
1029
1030/* Generic update routine.
1031 -- lladdr is new lladdr or NULL, if it is not supplied.
1032 -- new is new state.
1033 -- flags
1034 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1035 if it is different.
1036 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1037 lladdr instead of overriding it
1038 if it is different.
1039 It also allows to retain current state
1040 if lladdr is unchanged.
1041 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1042
1043 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1044 NTF_ROUTER flag.
1045 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1046 a router.
1047
1048 Caller MUST hold reference count on the entry.
1049 */
1050
1051int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1052 u32 flags)
1053{
1054 u8 old;
1055 int err;
1056 int notify = 0;
1057 struct net_device *dev;
1058 int update_isrouter = 0;
1059
1060 write_lock_bh(&neigh->lock);
1061
1062 dev = neigh->dev;
1063 old = neigh->nud_state;
1064 err = -EPERM;
1065
1066 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1067 (old & (NUD_NOARP | NUD_PERMANENT)))
1068 goto out;
1069
1070 if (!(new & NUD_VALID)) {
1071 neigh_del_timer(neigh);
1072 if (old & NUD_CONNECTED)
1073 neigh_suspect(neigh);
1074 neigh->nud_state = new;
1075 err = 0;
1076 notify = old & NUD_VALID;
1077 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1078 (new & NUD_FAILED)) {
1079 neigh_invalidate(neigh);
1080 notify = 1;
1081 }
1082 goto out;
1083 }
1084
1085 /* Compare new lladdr with cached one */
1086 if (!dev->addr_len) {
1087 /* First case: device needs no address. */
1088 lladdr = neigh->ha;
1089 } else if (lladdr) {
1090 /* The second case: if something is already cached
1091 and a new address is proposed:
1092 - compare new & old
1093 - if they are different, check override flag
1094 */
1095 if ((old & NUD_VALID) &&
1096 !memcmp(lladdr, neigh->ha, dev->addr_len))
1097 lladdr = neigh->ha;
1098 } else {
1099 /* No address is supplied; if we know something,
1100 use it, otherwise discard the request.
1101 */
1102 err = -EINVAL;
1103 if (!(old & NUD_VALID))
1104 goto out;
1105 lladdr = neigh->ha;
1106 }
1107
1108 if (new & NUD_CONNECTED)
1109 neigh->confirmed = jiffies;
1110 neigh->updated = jiffies;
1111
1112 /* If entry was valid and address is not changed,
1113 do not change entry state, if new one is STALE.
1114 */
1115 err = 0;
1116 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1117 if (old & NUD_VALID) {
1118 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1119 update_isrouter = 0;
1120 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1121 (old & NUD_CONNECTED)) {
1122 lladdr = neigh->ha;
1123 new = NUD_STALE;
1124 } else
1125 goto out;
1126 } else {
1127 if (lladdr == neigh->ha && new == NUD_STALE &&
1128 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1129 (old & NUD_CONNECTED))
1130 )
1131 new = old;
1132 }
1133 }
1134
1135 if (new != old) {
1136 neigh_del_timer(neigh);
1137 if (new & NUD_IN_TIMER)
1138 neigh_add_timer(neigh, (jiffies +
1139 ((new & NUD_REACHABLE) ?
1140 neigh->parms->reachable_time :
1141 0)));
1142 neigh->nud_state = new;
1143 }
1144
1145 if (lladdr != neigh->ha) {
1146 write_seqlock(&neigh->ha_lock);
1147 memcpy(&neigh->ha, lladdr, dev->addr_len);
1148 write_sequnlock(&neigh->ha_lock);
1149 neigh_update_hhs(neigh);
1150 if (!(new & NUD_CONNECTED))
1151 neigh->confirmed = jiffies -
1152 (neigh->parms->base_reachable_time << 1);
1153 notify = 1;
1154 }
1155 if (new == old)
1156 goto out;
1157 if (new & NUD_CONNECTED)
1158 neigh_connect(neigh);
1159 else
1160 neigh_suspect(neigh);
1161 if (!(old & NUD_VALID)) {
1162 struct sk_buff *skb;
1163
1164 /* Again: avoid dead loop if something went wrong */
1165
1166 while (neigh->nud_state & NUD_VALID &&
1167 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1168 struct neighbour *n1 = neigh;
1169 write_unlock_bh(&neigh->lock);
1170 /* On shaper/eql skb->dst->neighbour != neigh :( */
1171 if (skb_dst(skb) && skb_dst(skb)->neighbour)
1172 n1 = skb_dst(skb)->neighbour;
1173 n1->output(skb);
1174 write_lock_bh(&neigh->lock);
1175 }
1176 skb_queue_purge(&neigh->arp_queue);
1177 }
1178out:
1179 if (update_isrouter) {
1180 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1181 (neigh->flags | NTF_ROUTER) :
1182 (neigh->flags & ~NTF_ROUTER);
1183 }
1184 write_unlock_bh(&neigh->lock);
1185
1186 if (notify)
1187 neigh_update_notify(neigh);
1188
1189 return err;
1190}
1191EXPORT_SYMBOL(neigh_update);
1192
1193struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1194 u8 *lladdr, void *saddr,
1195 struct net_device *dev)
1196{
1197 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1198 lladdr || !dev->addr_len);
1199 if (neigh)
1200 neigh_update(neigh, lladdr, NUD_STALE,
1201 NEIGH_UPDATE_F_OVERRIDE);
1202 return neigh;
1203}
1204EXPORT_SYMBOL(neigh_event_ns);
1205
1206/* called with read_lock_bh(&n->lock); */
1207static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1208{
1209 struct net_device *dev = dst->dev;
1210 __be16 prot = dst->ops->protocol;
1211 struct hh_cache *hh = &n->hh;
1212
1213 write_lock_bh(&n->lock);
1214
1215 /* Only one thread can come in here and initialize the
1216 * hh_cache entry.
1217 */
1218 if (hh->hh_len)
1219 goto end;
1220
1221 if (dev->header_ops->cache(n, hh, prot))
1222 goto end;
1223
1224 if (n->nud_state & NUD_CONNECTED)
1225 hh->hh_output = dev_queue_xmit;
1226 else
1227 hh->hh_output = n->ops->output;
1228
1229end:
1230 write_unlock_bh(&n->lock);
1231}
1232
1233/* This function can be used in contexts, where only old dev_queue_xmit
1234 * worked, f.e. if you want to override normal output path (eql, shaper),
1235 * but resolution is not made yet.
1236 */
1237
1238int neigh_compat_output(struct sk_buff *skb)
1239{
1240 struct net_device *dev = skb->dev;
1241
1242 __skb_pull(skb, skb_network_offset(skb));
1243
1244 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1245 skb->len) < 0 &&
1246 dev->header_ops->rebuild(skb))
1247 return 0;
1248
1249 return dev_queue_xmit(skb);
1250}
1251EXPORT_SYMBOL(neigh_compat_output);
1252
1253/* Slow and careful. */
1254
1255int neigh_resolve_output(struct sk_buff *skb)
1256{
1257 struct dst_entry *dst = skb_dst(skb);
1258 struct neighbour *neigh;
1259 int rc = 0;
1260
1261 if (!dst || !(neigh = dst->neighbour))
1262 goto discard;
1263
1264 __skb_pull(skb, skb_network_offset(skb));
1265
1266 if (!neigh_event_send(neigh, skb)) {
1267 int err;
1268 struct net_device *dev = neigh->dev;
1269 unsigned int seq;
1270
1271 if (dev->header_ops->cache && !neigh->hh.hh_len)
1272 neigh_hh_init(neigh, dst);
1273
1274 do {
1275 seq = read_seqbegin(&neigh->ha_lock);
1276 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1277 neigh->ha, NULL, skb->len);
1278 } while (read_seqretry(&neigh->ha_lock, seq));
1279
1280 if (err >= 0)
1281 rc = neigh->ops->queue_xmit(skb);
1282 else
1283 goto out_kfree_skb;
1284 }
1285out:
1286 return rc;
1287discard:
1288 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1289 dst, dst ? dst->neighbour : NULL);
1290out_kfree_skb:
1291 rc = -EINVAL;
1292 kfree_skb(skb);
1293 goto out;
1294}
1295EXPORT_SYMBOL(neigh_resolve_output);
1296
1297/* As fast as possible without hh cache */
1298
1299int neigh_connected_output(struct sk_buff *skb)
1300{
1301 int err;
1302 struct dst_entry *dst = skb_dst(skb);
1303 struct neighbour *neigh = dst->neighbour;
1304 struct net_device *dev = neigh->dev;
1305 unsigned int seq;
1306
1307 __skb_pull(skb, skb_network_offset(skb));
1308
1309 do {
1310 seq = read_seqbegin(&neigh->ha_lock);
1311 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1312 neigh->ha, NULL, skb->len);
1313 } while (read_seqretry(&neigh->ha_lock, seq));
1314
1315 if (err >= 0)
1316 err = neigh->ops->queue_xmit(skb);
1317 else {
1318 err = -EINVAL;
1319 kfree_skb(skb);
1320 }
1321 return err;
1322}
1323EXPORT_SYMBOL(neigh_connected_output);
1324
1325static void neigh_proxy_process(unsigned long arg)
1326{
1327 struct neigh_table *tbl = (struct neigh_table *)arg;
1328 long sched_next = 0;
1329 unsigned long now = jiffies;
1330 struct sk_buff *skb, *n;
1331
1332 spin_lock(&tbl->proxy_queue.lock);
1333
1334 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1335 long tdif = NEIGH_CB(skb)->sched_next - now;
1336
1337 if (tdif <= 0) {
1338 struct net_device *dev = skb->dev;
1339 __skb_unlink(skb, &tbl->proxy_queue);
1340 if (tbl->proxy_redo && netif_running(dev))
1341 tbl->proxy_redo(skb);
1342 else
1343 kfree_skb(skb);
1344
1345 dev_put(dev);
1346 } else if (!sched_next || tdif < sched_next)
1347 sched_next = tdif;
1348 }
1349 del_timer(&tbl->proxy_timer);
1350 if (sched_next)
1351 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1352 spin_unlock(&tbl->proxy_queue.lock);
1353}
1354
1355void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1356 struct sk_buff *skb)
1357{
1358 unsigned long now = jiffies;
1359 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1360
1361 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1362 kfree_skb(skb);
1363 return;
1364 }
1365
1366 NEIGH_CB(skb)->sched_next = sched_next;
1367 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1368
1369 spin_lock(&tbl->proxy_queue.lock);
1370 if (del_timer(&tbl->proxy_timer)) {
1371 if (time_before(tbl->proxy_timer.expires, sched_next))
1372 sched_next = tbl->proxy_timer.expires;
1373 }
1374 skb_dst_drop(skb);
1375 dev_hold(skb->dev);
1376 __skb_queue_tail(&tbl->proxy_queue, skb);
1377 mod_timer(&tbl->proxy_timer, sched_next);
1378 spin_unlock(&tbl->proxy_queue.lock);
1379}
1380EXPORT_SYMBOL(pneigh_enqueue);
1381
1382static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1383 struct net *net, int ifindex)
1384{
1385 struct neigh_parms *p;
1386
1387 for (p = &tbl->parms; p; p = p->next) {
1388 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1389 (!p->dev && !ifindex))
1390 return p;
1391 }
1392
1393 return NULL;
1394}
1395
1396struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1397 struct neigh_table *tbl)
1398{
1399 struct neigh_parms *p, *ref;
1400 struct net *net = dev_net(dev);
1401 const struct net_device_ops *ops = dev->netdev_ops;
1402
1403 ref = lookup_neigh_parms(tbl, net, 0);
1404 if (!ref)
1405 return NULL;
1406
1407 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1408 if (p) {
1409 p->tbl = tbl;
1410 atomic_set(&p->refcnt, 1);
1411 p->reachable_time =
1412 neigh_rand_reach_time(p->base_reachable_time);
1413
1414 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1415 kfree(p);
1416 return NULL;
1417 }
1418
1419 dev_hold(dev);
1420 p->dev = dev;
1421 write_pnet(&p->net, hold_net(net));
1422 p->sysctl_table = NULL;
1423 write_lock_bh(&tbl->lock);
1424 p->next = tbl->parms.next;
1425 tbl->parms.next = p;
1426 write_unlock_bh(&tbl->lock);
1427 }
1428 return p;
1429}
1430EXPORT_SYMBOL(neigh_parms_alloc);
1431
1432static void neigh_rcu_free_parms(struct rcu_head *head)
1433{
1434 struct neigh_parms *parms =
1435 container_of(head, struct neigh_parms, rcu_head);
1436
1437 neigh_parms_put(parms);
1438}
1439
1440void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1441{
1442 struct neigh_parms **p;
1443
1444 if (!parms || parms == &tbl->parms)
1445 return;
1446 write_lock_bh(&tbl->lock);
1447 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1448 if (*p == parms) {
1449 *p = parms->next;
1450 parms->dead = 1;
1451 write_unlock_bh(&tbl->lock);
1452 if (parms->dev)
1453 dev_put(parms->dev);
1454 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1455 return;
1456 }
1457 }
1458 write_unlock_bh(&tbl->lock);
1459 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1460}
1461EXPORT_SYMBOL(neigh_parms_release);
1462
1463static void neigh_parms_destroy(struct neigh_parms *parms)
1464{
1465 release_net(neigh_parms_net(parms));
1466 kfree(parms);
1467}
1468
1469static struct lock_class_key neigh_table_proxy_queue_class;
1470
1471void neigh_table_init_no_netlink(struct neigh_table *tbl)
1472{
1473 unsigned long now = jiffies;
1474 unsigned long phsize;
1475
1476 write_pnet(&tbl->parms.net, &init_net);
1477 atomic_set(&tbl->parms.refcnt, 1);
1478 tbl->parms.reachable_time =
1479 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1480
1481 if (!tbl->kmem_cachep)
1482 tbl->kmem_cachep =
1483 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1484 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1485 NULL);
1486 tbl->stats = alloc_percpu(struct neigh_statistics);
1487 if (!tbl->stats)
1488 panic("cannot create neighbour cache statistics");
1489
1490#ifdef CONFIG_PROC_FS
1491 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1492 &neigh_stat_seq_fops, tbl))
1493 panic("cannot create neighbour proc dir entry");
1494#endif
1495
1496 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1497
1498 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1499 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1500
1501 if (!tbl->nht || !tbl->phash_buckets)
1502 panic("cannot allocate neighbour cache hashes");
1503
1504 rwlock_init(&tbl->lock);
1505 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1506 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1507 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1508 skb_queue_head_init_class(&tbl->proxy_queue,
1509 &neigh_table_proxy_queue_class);
1510
1511 tbl->last_flush = now;
1512 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1513}
1514EXPORT_SYMBOL(neigh_table_init_no_netlink);
1515
1516void neigh_table_init(struct neigh_table *tbl)
1517{
1518 struct neigh_table *tmp;
1519
1520 neigh_table_init_no_netlink(tbl);
1521 write_lock(&neigh_tbl_lock);
1522 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1523 if (tmp->family == tbl->family)
1524 break;
1525 }
1526 tbl->next = neigh_tables;
1527 neigh_tables = tbl;
1528 write_unlock(&neigh_tbl_lock);
1529
1530 if (unlikely(tmp)) {
1531 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1532 "family %d\n", tbl->family);
1533 dump_stack();
1534 }
1535}
1536EXPORT_SYMBOL(neigh_table_init);
1537
1538int neigh_table_clear(struct neigh_table *tbl)
1539{
1540 struct neigh_table **tp;
1541
1542 /* It is not clean... Fix it to unload IPv6 module safely */
1543 cancel_delayed_work_sync(&tbl->gc_work);
1544 del_timer_sync(&tbl->proxy_timer);
1545 pneigh_queue_purge(&tbl->proxy_queue);
1546 neigh_ifdown(tbl, NULL);
1547 if (atomic_read(&tbl->entries))
1548 printk(KERN_CRIT "neighbour leakage\n");
1549 write_lock(&neigh_tbl_lock);
1550 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1551 if (*tp == tbl) {
1552 *tp = tbl->next;
1553 break;
1554 }
1555 }
1556 write_unlock(&neigh_tbl_lock);
1557
1558 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1559 neigh_hash_free_rcu);
1560 tbl->nht = NULL;
1561
1562 kfree(tbl->phash_buckets);
1563 tbl->phash_buckets = NULL;
1564
1565 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1566
1567 free_percpu(tbl->stats);
1568 tbl->stats = NULL;
1569
1570 kmem_cache_destroy(tbl->kmem_cachep);
1571 tbl->kmem_cachep = NULL;
1572
1573 return 0;
1574}
1575EXPORT_SYMBOL(neigh_table_clear);
1576
1577static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1578{
1579 struct net *net = sock_net(skb->sk);
1580 struct ndmsg *ndm;
1581 struct nlattr *dst_attr;
1582 struct neigh_table *tbl;
1583 struct net_device *dev = NULL;
1584 int err = -EINVAL;
1585
1586 ASSERT_RTNL();
1587 if (nlmsg_len(nlh) < sizeof(*ndm))
1588 goto out;
1589
1590 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1591 if (dst_attr == NULL)
1592 goto out;
1593
1594 ndm = nlmsg_data(nlh);
1595 if (ndm->ndm_ifindex) {
1596 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1597 if (dev == NULL) {
1598 err = -ENODEV;
1599 goto out;
1600 }
1601 }
1602
1603 read_lock(&neigh_tbl_lock);
1604 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1605 struct neighbour *neigh;
1606
1607 if (tbl->family != ndm->ndm_family)
1608 continue;
1609 read_unlock(&neigh_tbl_lock);
1610
1611 if (nla_len(dst_attr) < tbl->key_len)
1612 goto out;
1613
1614 if (ndm->ndm_flags & NTF_PROXY) {
1615 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1616 goto out;
1617 }
1618
1619 if (dev == NULL)
1620 goto out;
1621
1622 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1623 if (neigh == NULL) {
1624 err = -ENOENT;
1625 goto out;
1626 }
1627
1628 err = neigh_update(neigh, NULL, NUD_FAILED,
1629 NEIGH_UPDATE_F_OVERRIDE |
1630 NEIGH_UPDATE_F_ADMIN);
1631 neigh_release(neigh);
1632 goto out;
1633 }
1634 read_unlock(&neigh_tbl_lock);
1635 err = -EAFNOSUPPORT;
1636
1637out:
1638 return err;
1639}
1640
1641static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1642{
1643 struct net *net = sock_net(skb->sk);
1644 struct ndmsg *ndm;
1645 struct nlattr *tb[NDA_MAX+1];
1646 struct neigh_table *tbl;
1647 struct net_device *dev = NULL;
1648 int err;
1649
1650 ASSERT_RTNL();
1651 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1652 if (err < 0)
1653 goto out;
1654
1655 err = -EINVAL;
1656 if (tb[NDA_DST] == NULL)
1657 goto out;
1658
1659 ndm = nlmsg_data(nlh);
1660 if (ndm->ndm_ifindex) {
1661 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1662 if (dev == NULL) {
1663 err = -ENODEV;
1664 goto out;
1665 }
1666
1667 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1668 goto out;
1669 }
1670
1671 read_lock(&neigh_tbl_lock);
1672 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1673 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1674 struct neighbour *neigh;
1675 void *dst, *lladdr;
1676
1677 if (tbl->family != ndm->ndm_family)
1678 continue;
1679 read_unlock(&neigh_tbl_lock);
1680
1681 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1682 goto out;
1683 dst = nla_data(tb[NDA_DST]);
1684 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1685
1686 if (ndm->ndm_flags & NTF_PROXY) {
1687 struct pneigh_entry *pn;
1688
1689 err = -ENOBUFS;
1690 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1691 if (pn) {
1692 pn->flags = ndm->ndm_flags;
1693 err = 0;
1694 }
1695 goto out;
1696 }
1697
1698 if (dev == NULL)
1699 goto out;
1700
1701 neigh = neigh_lookup(tbl, dst, dev);
1702 if (neigh == NULL) {
1703 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1704 err = -ENOENT;
1705 goto out;
1706 }
1707
1708 neigh = __neigh_lookup_errno(tbl, dst, dev);
1709 if (IS_ERR(neigh)) {
1710 err = PTR_ERR(neigh);
1711 goto out;
1712 }
1713 } else {
1714 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1715 err = -EEXIST;
1716 neigh_release(neigh);
1717 goto out;
1718 }
1719
1720 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1721 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1722 }
1723
1724 if (ndm->ndm_flags & NTF_USE) {
1725 neigh_event_send(neigh, NULL);
1726 err = 0;
1727 } else
1728 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1729 neigh_release(neigh);
1730 goto out;
1731 }
1732
1733 read_unlock(&neigh_tbl_lock);
1734 err = -EAFNOSUPPORT;
1735out:
1736 return err;
1737}
1738
1739static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1740{
1741 struct nlattr *nest;
1742
1743 nest = nla_nest_start(skb, NDTA_PARMS);
1744 if (nest == NULL)
1745 return -ENOBUFS;
1746
1747 if (parms->dev)
1748 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1749
1750 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1751 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1752 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1753 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1754 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1755 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1756 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1757 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1758 parms->base_reachable_time);
1759 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1760 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1761 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1762 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1763 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1764 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1765
1766 return nla_nest_end(skb, nest);
1767
1768nla_put_failure:
1769 nla_nest_cancel(skb, nest);
1770 return -EMSGSIZE;
1771}
1772
1773static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1774 u32 pid, u32 seq, int type, int flags)
1775{
1776 struct nlmsghdr *nlh;
1777 struct ndtmsg *ndtmsg;
1778
1779 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1780 if (nlh == NULL)
1781 return -EMSGSIZE;
1782
1783 ndtmsg = nlmsg_data(nlh);
1784
1785 read_lock_bh(&tbl->lock);
1786 ndtmsg->ndtm_family = tbl->family;
1787 ndtmsg->ndtm_pad1 = 0;
1788 ndtmsg->ndtm_pad2 = 0;
1789
1790 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1791 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1792 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1793 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1794 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1795
1796 {
1797 unsigned long now = jiffies;
1798 unsigned int flush_delta = now - tbl->last_flush;
1799 unsigned int rand_delta = now - tbl->last_rand;
1800 struct neigh_hash_table *nht;
1801 struct ndt_config ndc = {
1802 .ndtc_key_len = tbl->key_len,
1803 .ndtc_entry_size = tbl->entry_size,
1804 .ndtc_entries = atomic_read(&tbl->entries),
1805 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1806 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1807 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1808 };
1809
1810 rcu_read_lock_bh();
1811 nht = rcu_dereference_bh(tbl->nht);
1812 ndc.ndtc_hash_rnd = nht->hash_rnd;
1813 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1814 rcu_read_unlock_bh();
1815
1816 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1817 }
1818
1819 {
1820 int cpu;
1821 struct ndt_stats ndst;
1822
1823 memset(&ndst, 0, sizeof(ndst));
1824
1825 for_each_possible_cpu(cpu) {
1826 struct neigh_statistics *st;
1827
1828 st = per_cpu_ptr(tbl->stats, cpu);
1829 ndst.ndts_allocs += st->allocs;
1830 ndst.ndts_destroys += st->destroys;
1831 ndst.ndts_hash_grows += st->hash_grows;
1832 ndst.ndts_res_failed += st->res_failed;
1833 ndst.ndts_lookups += st->lookups;
1834 ndst.ndts_hits += st->hits;
1835 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1836 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1837 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1838 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1839 }
1840
1841 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1842 }
1843
1844 BUG_ON(tbl->parms.dev);
1845 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1846 goto nla_put_failure;
1847
1848 read_unlock_bh(&tbl->lock);
1849 return nlmsg_end(skb, nlh);
1850
1851nla_put_failure:
1852 read_unlock_bh(&tbl->lock);
1853 nlmsg_cancel(skb, nlh);
1854 return -EMSGSIZE;
1855}
1856
1857static int neightbl_fill_param_info(struct sk_buff *skb,
1858 struct neigh_table *tbl,
1859 struct neigh_parms *parms,
1860 u32 pid, u32 seq, int type,
1861 unsigned int flags)
1862{
1863 struct ndtmsg *ndtmsg;
1864 struct nlmsghdr *nlh;
1865
1866 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1867 if (nlh == NULL)
1868 return -EMSGSIZE;
1869
1870 ndtmsg = nlmsg_data(nlh);
1871
1872 read_lock_bh(&tbl->lock);
1873 ndtmsg->ndtm_family = tbl->family;
1874 ndtmsg->ndtm_pad1 = 0;
1875 ndtmsg->ndtm_pad2 = 0;
1876
1877 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1878 neightbl_fill_parms(skb, parms) < 0)
1879 goto errout;
1880
1881 read_unlock_bh(&tbl->lock);
1882 return nlmsg_end(skb, nlh);
1883errout:
1884 read_unlock_bh(&tbl->lock);
1885 nlmsg_cancel(skb, nlh);
1886 return -EMSGSIZE;
1887}
1888
1889static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1890 [NDTA_NAME] = { .type = NLA_STRING },
1891 [NDTA_THRESH1] = { .type = NLA_U32 },
1892 [NDTA_THRESH2] = { .type = NLA_U32 },
1893 [NDTA_THRESH3] = { .type = NLA_U32 },
1894 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1895 [NDTA_PARMS] = { .type = NLA_NESTED },
1896};
1897
1898static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1899 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1900 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1901 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1902 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1903 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1904 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1905 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1906 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1907 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1908 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1909 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1910 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1911 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1912};
1913
1914static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1915{
1916 struct net *net = sock_net(skb->sk);
1917 struct neigh_table *tbl;
1918 struct ndtmsg *ndtmsg;
1919 struct nlattr *tb[NDTA_MAX+1];
1920 int err;
1921
1922 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1923 nl_neightbl_policy);
1924 if (err < 0)
1925 goto errout;
1926
1927 if (tb[NDTA_NAME] == NULL) {
1928 err = -EINVAL;
1929 goto errout;
1930 }
1931
1932 ndtmsg = nlmsg_data(nlh);
1933 read_lock(&neigh_tbl_lock);
1934 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1935 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1936 continue;
1937
1938 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1939 break;
1940 }
1941
1942 if (tbl == NULL) {
1943 err = -ENOENT;
1944 goto errout_locked;
1945 }
1946
1947 /*
1948 * We acquire tbl->lock to be nice to the periodic timers and
1949 * make sure they always see a consistent set of values.
1950 */
1951 write_lock_bh(&tbl->lock);
1952
1953 if (tb[NDTA_PARMS]) {
1954 struct nlattr *tbp[NDTPA_MAX+1];
1955 struct neigh_parms *p;
1956 int i, ifindex = 0;
1957
1958 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1959 nl_ntbl_parm_policy);
1960 if (err < 0)
1961 goto errout_tbl_lock;
1962
1963 if (tbp[NDTPA_IFINDEX])
1964 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1965
1966 p = lookup_neigh_parms(tbl, net, ifindex);
1967 if (p == NULL) {
1968 err = -ENOENT;
1969 goto errout_tbl_lock;
1970 }
1971
1972 for (i = 1; i <= NDTPA_MAX; i++) {
1973 if (tbp[i] == NULL)
1974 continue;
1975
1976 switch (i) {
1977 case NDTPA_QUEUE_LEN:
1978 p->queue_len = nla_get_u32(tbp[i]);
1979 break;
1980 case NDTPA_PROXY_QLEN:
1981 p->proxy_qlen = nla_get_u32(tbp[i]);
1982 break;
1983 case NDTPA_APP_PROBES:
1984 p->app_probes = nla_get_u32(tbp[i]);
1985 break;
1986 case NDTPA_UCAST_PROBES:
1987 p->ucast_probes = nla_get_u32(tbp[i]);
1988 break;
1989 case NDTPA_MCAST_PROBES:
1990 p->mcast_probes = nla_get_u32(tbp[i]);
1991 break;
1992 case NDTPA_BASE_REACHABLE_TIME:
1993 p->base_reachable_time = nla_get_msecs(tbp[i]);
1994 break;
1995 case NDTPA_GC_STALETIME:
1996 p->gc_staletime = nla_get_msecs(tbp[i]);
1997 break;
1998 case NDTPA_DELAY_PROBE_TIME:
1999 p->delay_probe_time = nla_get_msecs(tbp[i]);
2000 break;
2001 case NDTPA_RETRANS_TIME:
2002 p->retrans_time = nla_get_msecs(tbp[i]);
2003 break;
2004 case NDTPA_ANYCAST_DELAY:
2005 p->anycast_delay = nla_get_msecs(tbp[i]);
2006 break;
2007 case NDTPA_PROXY_DELAY:
2008 p->proxy_delay = nla_get_msecs(tbp[i]);
2009 break;
2010 case NDTPA_LOCKTIME:
2011 p->locktime = nla_get_msecs(tbp[i]);
2012 break;
2013 }
2014 }
2015 }
2016
2017 if (tb[NDTA_THRESH1])
2018 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2019
2020 if (tb[NDTA_THRESH2])
2021 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2022
2023 if (tb[NDTA_THRESH3])
2024 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2025
2026 if (tb[NDTA_GC_INTERVAL])
2027 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2028
2029 err = 0;
2030
2031errout_tbl_lock:
2032 write_unlock_bh(&tbl->lock);
2033errout_locked:
2034 read_unlock(&neigh_tbl_lock);
2035errout:
2036 return err;
2037}
2038
2039static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2040{
2041 struct net *net = sock_net(skb->sk);
2042 int family, tidx, nidx = 0;
2043 int tbl_skip = cb->args[0];
2044 int neigh_skip = cb->args[1];
2045 struct neigh_table *tbl;
2046
2047 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2048
2049 read_lock(&neigh_tbl_lock);
2050 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2051 struct neigh_parms *p;
2052
2053 if (tidx < tbl_skip || (family && tbl->family != family))
2054 continue;
2055
2056 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2057 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2058 NLM_F_MULTI) <= 0)
2059 break;
2060
2061 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2062 if (!net_eq(neigh_parms_net(p), net))
2063 continue;
2064
2065 if (nidx < neigh_skip)
2066 goto next;
2067
2068 if (neightbl_fill_param_info(skb, tbl, p,
2069 NETLINK_CB(cb->skb).pid,
2070 cb->nlh->nlmsg_seq,
2071 RTM_NEWNEIGHTBL,
2072 NLM_F_MULTI) <= 0)
2073 goto out;
2074 next:
2075 nidx++;
2076 }
2077
2078 neigh_skip = 0;
2079 }
2080out:
2081 read_unlock(&neigh_tbl_lock);
2082 cb->args[0] = tidx;
2083 cb->args[1] = nidx;
2084
2085 return skb->len;
2086}
2087
2088static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2089 u32 pid, u32 seq, int type, unsigned int flags)
2090{
2091 unsigned long now = jiffies;
2092 struct nda_cacheinfo ci;
2093 struct nlmsghdr *nlh;
2094 struct ndmsg *ndm;
2095
2096 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2097 if (nlh == NULL)
2098 return -EMSGSIZE;
2099
2100 ndm = nlmsg_data(nlh);
2101 ndm->ndm_family = neigh->ops->family;
2102 ndm->ndm_pad1 = 0;
2103 ndm->ndm_pad2 = 0;
2104 ndm->ndm_flags = neigh->flags;
2105 ndm->ndm_type = neigh->type;
2106 ndm->ndm_ifindex = neigh->dev->ifindex;
2107
2108 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2109
2110 read_lock_bh(&neigh->lock);
2111 ndm->ndm_state = neigh->nud_state;
2112 if (neigh->nud_state & NUD_VALID) {
2113 char haddr[MAX_ADDR_LEN];
2114
2115 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2116 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2117 read_unlock_bh(&neigh->lock);
2118 goto nla_put_failure;
2119 }
2120 }
2121
2122 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2123 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2124 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2125 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2126 read_unlock_bh(&neigh->lock);
2127
2128 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2129 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2130
2131 return nlmsg_end(skb, nlh);
2132
2133nla_put_failure:
2134 nlmsg_cancel(skb, nlh);
2135 return -EMSGSIZE;
2136}
2137
2138static void neigh_update_notify(struct neighbour *neigh)
2139{
2140 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2141 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2142}
2143
2144static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2145 struct netlink_callback *cb)
2146{
2147 struct net *net = sock_net(skb->sk);
2148 struct neighbour *n;
2149 int rc, h, s_h = cb->args[1];
2150 int idx, s_idx = idx = cb->args[2];
2151 struct neigh_hash_table *nht;
2152
2153 rcu_read_lock_bh();
2154 nht = rcu_dereference_bh(tbl->nht);
2155
2156 for (h = 0; h < (1 << nht->hash_shift); h++) {
2157 if (h < s_h)
2158 continue;
2159 if (h > s_h)
2160 s_idx = 0;
2161 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2162 n != NULL;
2163 n = rcu_dereference_bh(n->next)) {
2164 if (!net_eq(dev_net(n->dev), net))
2165 continue;
2166 if (idx < s_idx)
2167 goto next;
2168 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2169 cb->nlh->nlmsg_seq,
2170 RTM_NEWNEIGH,
2171 NLM_F_MULTI) <= 0) {
2172 rc = -1;
2173 goto out;
2174 }
2175next:
2176 idx++;
2177 }
2178 }
2179 rc = skb->len;
2180out:
2181 rcu_read_unlock_bh();
2182 cb->args[1] = h;
2183 cb->args[2] = idx;
2184 return rc;
2185}
2186
2187static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2188{
2189 struct neigh_table *tbl;
2190 int t, family, s_t;
2191
2192 read_lock(&neigh_tbl_lock);
2193 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2194 s_t = cb->args[0];
2195
2196 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2197 if (t < s_t || (family && tbl->family != family))
2198 continue;
2199 if (t > s_t)
2200 memset(&cb->args[1], 0, sizeof(cb->args) -
2201 sizeof(cb->args[0]));
2202 if (neigh_dump_table(tbl, skb, cb) < 0)
2203 break;
2204 }
2205 read_unlock(&neigh_tbl_lock);
2206
2207 cb->args[0] = t;
2208 return skb->len;
2209}
2210
2211void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2212{
2213 int chain;
2214 struct neigh_hash_table *nht;
2215
2216 rcu_read_lock_bh();
2217 nht = rcu_dereference_bh(tbl->nht);
2218
2219 read_lock(&tbl->lock); /* avoid resizes */
2220 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2221 struct neighbour *n;
2222
2223 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2224 n != NULL;
2225 n = rcu_dereference_bh(n->next))
2226 cb(n, cookie);
2227 }
2228 read_unlock(&tbl->lock);
2229 rcu_read_unlock_bh();
2230}
2231EXPORT_SYMBOL(neigh_for_each);
2232
2233/* The tbl->lock must be held as a writer and BH disabled. */
2234void __neigh_for_each_release(struct neigh_table *tbl,
2235 int (*cb)(struct neighbour *))
2236{
2237 int chain;
2238 struct neigh_hash_table *nht;
2239
2240 nht = rcu_dereference_protected(tbl->nht,
2241 lockdep_is_held(&tbl->lock));
2242 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2243 struct neighbour *n;
2244 struct neighbour __rcu **np;
2245
2246 np = &nht->hash_buckets[chain];
2247 while ((n = rcu_dereference_protected(*np,
2248 lockdep_is_held(&tbl->lock))) != NULL) {
2249 int release;
2250
2251 write_lock(&n->lock);
2252 release = cb(n);
2253 if (release) {
2254 rcu_assign_pointer(*np,
2255 rcu_dereference_protected(n->next,
2256 lockdep_is_held(&tbl->lock)));
2257 n->dead = 1;
2258 } else
2259 np = &n->next;
2260 write_unlock(&n->lock);
2261 if (release)
2262 neigh_cleanup_and_release(n);
2263 }
2264 }
2265}
2266EXPORT_SYMBOL(__neigh_for_each_release);
2267
2268#ifdef CONFIG_PROC_FS
2269
2270static struct neighbour *neigh_get_first(struct seq_file *seq)
2271{
2272 struct neigh_seq_state *state = seq->private;
2273 struct net *net = seq_file_net(seq);
2274 struct neigh_hash_table *nht = state->nht;
2275 struct neighbour *n = NULL;
2276 int bucket = state->bucket;
2277
2278 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2279 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2280 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2281
2282 while (n) {
2283 if (!net_eq(dev_net(n->dev), net))
2284 goto next;
2285 if (state->neigh_sub_iter) {
2286 loff_t fakep = 0;
2287 void *v;
2288
2289 v = state->neigh_sub_iter(state, n, &fakep);
2290 if (!v)
2291 goto next;
2292 }
2293 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2294 break;
2295 if (n->nud_state & ~NUD_NOARP)
2296 break;
2297next:
2298 n = rcu_dereference_bh(n->next);
2299 }
2300
2301 if (n)
2302 break;
2303 }
2304 state->bucket = bucket;
2305
2306 return n;
2307}
2308
2309static struct neighbour *neigh_get_next(struct seq_file *seq,
2310 struct neighbour *n,
2311 loff_t *pos)
2312{
2313 struct neigh_seq_state *state = seq->private;
2314 struct net *net = seq_file_net(seq);
2315 struct neigh_hash_table *nht = state->nht;
2316
2317 if (state->neigh_sub_iter) {
2318 void *v = state->neigh_sub_iter(state, n, pos);
2319 if (v)
2320 return n;
2321 }
2322 n = rcu_dereference_bh(n->next);
2323
2324 while (1) {
2325 while (n) {
2326 if (!net_eq(dev_net(n->dev), net))
2327 goto next;
2328 if (state->neigh_sub_iter) {
2329 void *v = state->neigh_sub_iter(state, n, pos);
2330 if (v)
2331 return n;
2332 goto next;
2333 }
2334 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2335 break;
2336
2337 if (n->nud_state & ~NUD_NOARP)
2338 break;
2339next:
2340 n = rcu_dereference_bh(n->next);
2341 }
2342
2343 if (n)
2344 break;
2345
2346 if (++state->bucket >= (1 << nht->hash_shift))
2347 break;
2348
2349 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2350 }
2351
2352 if (n && pos)
2353 --(*pos);
2354 return n;
2355}
2356
2357static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2358{
2359 struct neighbour *n = neigh_get_first(seq);
2360
2361 if (n) {
2362 --(*pos);
2363 while (*pos) {
2364 n = neigh_get_next(seq, n, pos);
2365 if (!n)
2366 break;
2367 }
2368 }
2369 return *pos ? NULL : n;
2370}
2371
2372static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2373{
2374 struct neigh_seq_state *state = seq->private;
2375 struct net *net = seq_file_net(seq);
2376 struct neigh_table *tbl = state->tbl;
2377 struct pneigh_entry *pn = NULL;
2378 int bucket = state->bucket;
2379
2380 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2381 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2382 pn = tbl->phash_buckets[bucket];
2383 while (pn && !net_eq(pneigh_net(pn), net))
2384 pn = pn->next;
2385 if (pn)
2386 break;
2387 }
2388 state->bucket = bucket;
2389
2390 return pn;
2391}
2392
2393static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2394 struct pneigh_entry *pn,
2395 loff_t *pos)
2396{
2397 struct neigh_seq_state *state = seq->private;
2398 struct net *net = seq_file_net(seq);
2399 struct neigh_table *tbl = state->tbl;
2400
2401 pn = pn->next;
2402 while (!pn) {
2403 if (++state->bucket > PNEIGH_HASHMASK)
2404 break;
2405 pn = tbl->phash_buckets[state->bucket];
2406 while (pn && !net_eq(pneigh_net(pn), net))
2407 pn = pn->next;
2408 if (pn)
2409 break;
2410 }
2411
2412 if (pn && pos)
2413 --(*pos);
2414
2415 return pn;
2416}
2417
2418static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2419{
2420 struct pneigh_entry *pn = pneigh_get_first(seq);
2421
2422 if (pn) {
2423 --(*pos);
2424 while (*pos) {
2425 pn = pneigh_get_next(seq, pn, pos);
2426 if (!pn)
2427 break;
2428 }
2429 }
2430 return *pos ? NULL : pn;
2431}
2432
2433static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2434{
2435 struct neigh_seq_state *state = seq->private;
2436 void *rc;
2437 loff_t idxpos = *pos;
2438
2439 rc = neigh_get_idx(seq, &idxpos);
2440 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2441 rc = pneigh_get_idx(seq, &idxpos);
2442
2443 return rc;
2444}
2445
2446void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2447 __acquires(rcu_bh)
2448{
2449 struct neigh_seq_state *state = seq->private;
2450
2451 state->tbl = tbl;
2452 state->bucket = 0;
2453 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2454
2455 rcu_read_lock_bh();
2456 state->nht = rcu_dereference_bh(tbl->nht);
2457
2458 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2459}
2460EXPORT_SYMBOL(neigh_seq_start);
2461
2462void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2463{
2464 struct neigh_seq_state *state;
2465 void *rc;
2466
2467 if (v == SEQ_START_TOKEN) {
2468 rc = neigh_get_first(seq);
2469 goto out;
2470 }
2471
2472 state = seq->private;
2473 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2474 rc = neigh_get_next(seq, v, NULL);
2475 if (rc)
2476 goto out;
2477 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2478 rc = pneigh_get_first(seq);
2479 } else {
2480 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2481 rc = pneigh_get_next(seq, v, NULL);
2482 }
2483out:
2484 ++(*pos);
2485 return rc;
2486}
2487EXPORT_SYMBOL(neigh_seq_next);
2488
2489void neigh_seq_stop(struct seq_file *seq, void *v)
2490 __releases(rcu_bh)
2491{
2492 rcu_read_unlock_bh();
2493}
2494EXPORT_SYMBOL(neigh_seq_stop);
2495
2496/* statistics via seq_file */
2497
2498static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2499{
2500 struct neigh_table *tbl = seq->private;
2501 int cpu;
2502
2503 if (*pos == 0)
2504 return SEQ_START_TOKEN;
2505
2506 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2507 if (!cpu_possible(cpu))
2508 continue;
2509 *pos = cpu+1;
2510 return per_cpu_ptr(tbl->stats, cpu);
2511 }
2512 return NULL;
2513}
2514
2515static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2516{
2517 struct neigh_table *tbl = seq->private;
2518 int cpu;
2519
2520 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2521 if (!cpu_possible(cpu))
2522 continue;
2523 *pos = cpu+1;
2524 return per_cpu_ptr(tbl->stats, cpu);
2525 }
2526 return NULL;
2527}
2528
2529static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2530{
2531
2532}
2533
2534static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2535{
2536 struct neigh_table *tbl = seq->private;
2537 struct neigh_statistics *st = v;
2538
2539 if (v == SEQ_START_TOKEN) {
2540 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2541 return 0;
2542 }
2543
2544 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2545 "%08lx %08lx %08lx %08lx %08lx\n",
2546 atomic_read(&tbl->entries),
2547
2548 st->allocs,
2549 st->destroys,
2550 st->hash_grows,
2551
2552 st->lookups,
2553 st->hits,
2554
2555 st->res_failed,
2556
2557 st->rcv_probes_mcast,
2558 st->rcv_probes_ucast,
2559
2560 st->periodic_gc_runs,
2561 st->forced_gc_runs,
2562 st->unres_discards
2563 );
2564
2565 return 0;
2566}
2567
2568static const struct seq_operations neigh_stat_seq_ops = {
2569 .start = neigh_stat_seq_start,
2570 .next = neigh_stat_seq_next,
2571 .stop = neigh_stat_seq_stop,
2572 .show = neigh_stat_seq_show,
2573};
2574
2575static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2576{
2577 int ret = seq_open(file, &neigh_stat_seq_ops);
2578
2579 if (!ret) {
2580 struct seq_file *sf = file->private_data;
2581 sf->private = PDE(inode)->data;
2582 }
2583 return ret;
2584};
2585
2586static const struct file_operations neigh_stat_seq_fops = {
2587 .owner = THIS_MODULE,
2588 .open = neigh_stat_seq_open,
2589 .read = seq_read,
2590 .llseek = seq_lseek,
2591 .release = seq_release,
2592};
2593
2594#endif /* CONFIG_PROC_FS */
2595
2596static inline size_t neigh_nlmsg_size(void)
2597{
2598 return NLMSG_ALIGN(sizeof(struct ndmsg))
2599 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2600 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2601 + nla_total_size(sizeof(struct nda_cacheinfo))
2602 + nla_total_size(4); /* NDA_PROBES */
2603}
2604
2605static void __neigh_notify(struct neighbour *n, int type, int flags)
2606{
2607 struct net *net = dev_net(n->dev);
2608 struct sk_buff *skb;
2609 int err = -ENOBUFS;
2610
2611 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2612 if (skb == NULL)
2613 goto errout;
2614
2615 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2616 if (err < 0) {
2617 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2618 WARN_ON(err == -EMSGSIZE);
2619 kfree_skb(skb);
2620 goto errout;
2621 }
2622 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2623 return;
2624errout:
2625 if (err < 0)
2626 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2627}
2628
2629#ifdef CONFIG_ARPD
2630void neigh_app_ns(struct neighbour *n)
2631{
2632 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2633}
2634EXPORT_SYMBOL(neigh_app_ns);
2635#endif /* CONFIG_ARPD */
2636
2637#ifdef CONFIG_SYSCTL
2638
2639#define NEIGH_VARS_MAX 19
2640
2641static struct neigh_sysctl_table {
2642 struct ctl_table_header *sysctl_header;
2643 struct ctl_table neigh_vars[NEIGH_VARS_MAX];
2644 char *dev_name;
2645} neigh_sysctl_template __read_mostly = {
2646 .neigh_vars = {
2647 {
2648 .procname = "mcast_solicit",
2649 .maxlen = sizeof(int),
2650 .mode = 0644,
2651 .proc_handler = proc_dointvec,
2652 },
2653 {
2654 .procname = "ucast_solicit",
2655 .maxlen = sizeof(int),
2656 .mode = 0644,
2657 .proc_handler = proc_dointvec,
2658 },
2659 {
2660 .procname = "app_solicit",
2661 .maxlen = sizeof(int),
2662 .mode = 0644,
2663 .proc_handler = proc_dointvec,
2664 },
2665 {
2666 .procname = "retrans_time",
2667 .maxlen = sizeof(int),
2668 .mode = 0644,
2669 .proc_handler = proc_dointvec_userhz_jiffies,
2670 },
2671 {
2672 .procname = "base_reachable_time",
2673 .maxlen = sizeof(int),
2674 .mode = 0644,
2675 .proc_handler = proc_dointvec_jiffies,
2676 },
2677 {
2678 .procname = "delay_first_probe_time",
2679 .maxlen = sizeof(int),
2680 .mode = 0644,
2681 .proc_handler = proc_dointvec_jiffies,
2682 },
2683 {
2684 .procname = "gc_stale_time",
2685 .maxlen = sizeof(int),
2686 .mode = 0644,
2687 .proc_handler = proc_dointvec_jiffies,
2688 },
2689 {
2690 .procname = "unres_qlen",
2691 .maxlen = sizeof(int),
2692 .mode = 0644,
2693 .proc_handler = proc_dointvec,
2694 },
2695 {
2696 .procname = "proxy_qlen",
2697 .maxlen = sizeof(int),
2698 .mode = 0644,
2699 .proc_handler = proc_dointvec,
2700 },
2701 {
2702 .procname = "anycast_delay",
2703 .maxlen = sizeof(int),
2704 .mode = 0644,
2705 .proc_handler = proc_dointvec_userhz_jiffies,
2706 },
2707 {
2708 .procname = "proxy_delay",
2709 .maxlen = sizeof(int),
2710 .mode = 0644,
2711 .proc_handler = proc_dointvec_userhz_jiffies,
2712 },
2713 {
2714 .procname = "locktime",
2715 .maxlen = sizeof(int),
2716 .mode = 0644,
2717 .proc_handler = proc_dointvec_userhz_jiffies,
2718 },
2719 {
2720 .procname = "retrans_time_ms",
2721 .maxlen = sizeof(int),
2722 .mode = 0644,
2723 .proc_handler = proc_dointvec_ms_jiffies,
2724 },
2725 {
2726 .procname = "base_reachable_time_ms",
2727 .maxlen = sizeof(int),
2728 .mode = 0644,
2729 .proc_handler = proc_dointvec_ms_jiffies,
2730 },
2731 {
2732 .procname = "gc_interval",
2733 .maxlen = sizeof(int),
2734 .mode = 0644,
2735 .proc_handler = proc_dointvec_jiffies,
2736 },
2737 {
2738 .procname = "gc_thresh1",
2739 .maxlen = sizeof(int),
2740 .mode = 0644,
2741 .proc_handler = proc_dointvec,
2742 },
2743 {
2744 .procname = "gc_thresh2",
2745 .maxlen = sizeof(int),
2746 .mode = 0644,
2747 .proc_handler = proc_dointvec,
2748 },
2749 {
2750 .procname = "gc_thresh3",
2751 .maxlen = sizeof(int),
2752 .mode = 0644,
2753 .proc_handler = proc_dointvec,
2754 },
2755 {},
2756 },
2757};
2758
2759int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2760 char *p_name, proc_handler *handler)
2761{
2762 struct neigh_sysctl_table *t;
2763 const char *dev_name_source = NULL;
2764
2765#define NEIGH_CTL_PATH_ROOT 0
2766#define NEIGH_CTL_PATH_PROTO 1
2767#define NEIGH_CTL_PATH_NEIGH 2
2768#define NEIGH_CTL_PATH_DEV 3
2769
2770 struct ctl_path neigh_path[] = {
2771 { .procname = "net", },
2772 { .procname = "proto", },
2773 { .procname = "neigh", },
2774 { .procname = "default", },
2775 { },
2776 };
2777
2778 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2779 if (!t)
2780 goto err;
2781
2782 t->neigh_vars[0].data = &p->mcast_probes;
2783 t->neigh_vars[1].data = &p->ucast_probes;
2784 t->neigh_vars[2].data = &p->app_probes;
2785 t->neigh_vars[3].data = &p->retrans_time;
2786 t->neigh_vars[4].data = &p->base_reachable_time;
2787 t->neigh_vars[5].data = &p->delay_probe_time;
2788 t->neigh_vars[6].data = &p->gc_staletime;
2789 t->neigh_vars[7].data = &p->queue_len;
2790 t->neigh_vars[8].data = &p->proxy_qlen;
2791 t->neigh_vars[9].data = &p->anycast_delay;
2792 t->neigh_vars[10].data = &p->proxy_delay;
2793 t->neigh_vars[11].data = &p->locktime;
2794 t->neigh_vars[12].data = &p->retrans_time;
2795 t->neigh_vars[13].data = &p->base_reachable_time;
2796
2797 if (dev) {
2798 dev_name_source = dev->name;
2799 /* Terminate the table early */
2800 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2801 } else {
2802 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2803 t->neigh_vars[14].data = (int *)(p + 1);
2804 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2805 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2806 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2807 }
2808
2809
2810 if (handler) {
2811 /* RetransTime */
2812 t->neigh_vars[3].proc_handler = handler;
2813 t->neigh_vars[3].extra1 = dev;
2814 /* ReachableTime */
2815 t->neigh_vars[4].proc_handler = handler;
2816 t->neigh_vars[4].extra1 = dev;
2817 /* RetransTime (in milliseconds)*/
2818 t->neigh_vars[12].proc_handler = handler;
2819 t->neigh_vars[12].extra1 = dev;
2820 /* ReachableTime (in milliseconds) */
2821 t->neigh_vars[13].proc_handler = handler;
2822 t->neigh_vars[13].extra1 = dev;
2823 }
2824
2825 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2826 if (!t->dev_name)
2827 goto free;
2828
2829 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2830 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2831
2832 t->sysctl_header =
2833 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2834 if (!t->sysctl_header)
2835 goto free_procname;
2836
2837 p->sysctl_table = t;
2838 return 0;
2839
2840free_procname:
2841 kfree(t->dev_name);
2842free:
2843 kfree(t);
2844err:
2845 return -ENOBUFS;
2846}
2847EXPORT_SYMBOL(neigh_sysctl_register);
2848
2849void neigh_sysctl_unregister(struct neigh_parms *p)
2850{
2851 if (p->sysctl_table) {
2852 struct neigh_sysctl_table *t = p->sysctl_table;
2853 p->sysctl_table = NULL;
2854 unregister_sysctl_table(t->sysctl_header);
2855 kfree(t->dev_name);
2856 kfree(t);
2857 }
2858}
2859EXPORT_SYMBOL(neigh_sysctl_unregister);
2860
2861#endif /* CONFIG_SYSCTL */
2862
2863static int __init neigh_init(void)
2864{
2865 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
2866 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
2867 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
2868
2869 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
2870 NULL);
2871 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
2872
2873 return 0;
2874}
2875
2876subsys_initcall(neigh_init);
2877
This page took 0.035047 seconds and 5 git commands to generate.