[NET] NEIGHBOUR: Make each EXPORT_SYMBOL{,_GPL}() immediately follow its function...
[deliverable/linux.git] / net / core / neighbour.c
... / ...
CommitLineData
1/*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/socket.h>
22#include <linux/netdevice.h>
23#include <linux/proc_fs.h>
24#ifdef CONFIG_SYSCTL
25#include <linux/sysctl.h>
26#endif
27#include <linux/times.h>
28#include <net/net_namespace.h>
29#include <net/neighbour.h>
30#include <net/dst.h>
31#include <net/sock.h>
32#include <net/netevent.h>
33#include <net/netlink.h>
34#include <linux/rtnetlink.h>
35#include <linux/random.h>
36#include <linux/string.h>
37#include <linux/log2.h>
38
39#define NEIGH_DEBUG 1
40
41#define NEIGH_PRINTK(x...) printk(x)
42#define NEIGH_NOPRINTK(x...) do { ; } while(0)
43#define NEIGH_PRINTK0 NEIGH_PRINTK
44#define NEIGH_PRINTK1 NEIGH_NOPRINTK
45#define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47#if NEIGH_DEBUG >= 1
48#undef NEIGH_PRINTK1
49#define NEIGH_PRINTK1 NEIGH_PRINTK
50#endif
51#if NEIGH_DEBUG >= 2
52#undef NEIGH_PRINTK2
53#define NEIGH_PRINTK2 NEIGH_PRINTK
54#endif
55
56#define PNEIGH_HASHMASK 0xF
57
58static void neigh_timer_handler(unsigned long arg);
59static void __neigh_notify(struct neighbour *n, int type, int flags);
60static void neigh_update_notify(struct neighbour *neigh);
61static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63static struct neigh_table *neigh_tables;
64#ifdef CONFIG_PROC_FS
65static const struct file_operations neigh_stat_seq_fops;
66#endif
67
68/*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99static DEFINE_RWLOCK(neigh_tbl_lock);
100
101static int neigh_blackhole(struct sk_buff *skb)
102{
103 kfree_skb(skb);
104 return -ENETDOWN;
105}
106
107static void neigh_cleanup_and_release(struct neighbour *neigh)
108{
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114}
115
116/*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122unsigned long neigh_rand_reach_time(unsigned long base)
123{
124 return (base ? (net_random() % base) + (base >> 1) : 0);
125}
126EXPORT_SYMBOL(neigh_rand_reach_time);
127
128
129static int neigh_forced_gc(struct neigh_table *tbl)
130{
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166}
167
168static void neigh_add_timer(struct neighbour *n, unsigned long when)
169{
170 neigh_hold(n);
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
173 n->nud_state);
174 dump_stack();
175 }
176}
177
178static int neigh_del_timer(struct neighbour *n)
179{
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
182 neigh_release(n);
183 return 1;
184 }
185 return 0;
186}
187
188static void pneigh_queue_purge(struct sk_buff_head *list)
189{
190 struct sk_buff *skb;
191
192 while ((skb = skb_dequeue(list)) != NULL) {
193 dev_put(skb->dev);
194 kfree_skb(skb);
195 }
196}
197
198static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
199{
200 int i;
201
202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
204
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
207 np = &n->next;
208 continue;
209 }
210 *np = n->next;
211 write_lock(&n->lock);
212 neigh_del_timer(n);
213 n->dead = 1;
214
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
219
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
223 it to safe state.
224 */
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
229 else
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 }
233 write_unlock(&n->lock);
234 neigh_cleanup_and_release(n);
235 }
236 }
237}
238
239void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240{
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
244}
245EXPORT_SYMBOL(neigh_changeaddr);
246
247int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
248{
249 write_lock_bh(&tbl->lock);
250 neigh_flush_dev(tbl, dev);
251 pneigh_ifdown(tbl, dev);
252 write_unlock_bh(&tbl->lock);
253
254 del_timer_sync(&tbl->proxy_timer);
255 pneigh_queue_purge(&tbl->proxy_queue);
256 return 0;
257}
258EXPORT_SYMBOL(neigh_ifdown);
259
260static struct neighbour *neigh_alloc(struct neigh_table *tbl)
261{
262 struct neighbour *n = NULL;
263 unsigned long now = jiffies;
264 int entries;
265
266 entries = atomic_inc_return(&tbl->entries) - 1;
267 if (entries >= tbl->gc_thresh3 ||
268 (entries >= tbl->gc_thresh2 &&
269 time_after(now, tbl->last_flush + 5 * HZ))) {
270 if (!neigh_forced_gc(tbl) &&
271 entries >= tbl->gc_thresh3)
272 goto out_entries;
273 }
274
275 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
276 if (!n)
277 goto out_entries;
278
279 skb_queue_head_init(&n->arp_queue);
280 rwlock_init(&n->lock);
281 n->updated = n->used = now;
282 n->nud_state = NUD_NONE;
283 n->output = neigh_blackhole;
284 n->parms = neigh_parms_clone(&tbl->parms);
285 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
286
287 NEIGH_CACHE_STAT_INC(tbl, allocs);
288 n->tbl = tbl;
289 atomic_set(&n->refcnt, 1);
290 n->dead = 1;
291out:
292 return n;
293
294out_entries:
295 atomic_dec(&tbl->entries);
296 goto out;
297}
298
299static struct neighbour **neigh_hash_alloc(unsigned int entries)
300{
301 unsigned long size = entries * sizeof(struct neighbour *);
302 struct neighbour **ret;
303
304 if (size <= PAGE_SIZE) {
305 ret = kzalloc(size, GFP_ATOMIC);
306 } else {
307 ret = (struct neighbour **)
308 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
309 }
310 return ret;
311}
312
313static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314{
315 unsigned long size = entries * sizeof(struct neighbour *);
316
317 if (size <= PAGE_SIZE)
318 kfree(hash);
319 else
320 free_pages((unsigned long)hash, get_order(size));
321}
322
323static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324{
325 struct neighbour **new_hash, **old_hash;
326 unsigned int i, new_hash_mask, old_entries;
327
328 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329
330 BUG_ON(!is_power_of_2(new_entries));
331 new_hash = neigh_hash_alloc(new_entries);
332 if (!new_hash)
333 return;
334
335 old_entries = tbl->hash_mask + 1;
336 new_hash_mask = new_entries - 1;
337 old_hash = tbl->hash_buckets;
338
339 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 for (i = 0; i < old_entries; i++) {
341 struct neighbour *n, *next;
342
343 for (n = old_hash[i]; n; n = next) {
344 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345
346 hash_val &= new_hash_mask;
347 next = n->next;
348
349 n->next = new_hash[hash_val];
350 new_hash[hash_val] = n;
351 }
352 }
353 tbl->hash_buckets = new_hash;
354 tbl->hash_mask = new_hash_mask;
355
356 neigh_hash_free(old_hash, old_entries);
357}
358
359struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 struct net_device *dev)
361{
362 struct neighbour *n;
363 int key_len = tbl->key_len;
364 u32 hash_val;
365
366 NEIGH_CACHE_STAT_INC(tbl, lookups);
367
368 read_lock_bh(&tbl->lock);
369 hash_val = tbl->hash(pkey, dev);
370 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
371 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
372 neigh_hold(n);
373 NEIGH_CACHE_STAT_INC(tbl, hits);
374 break;
375 }
376 }
377 read_unlock_bh(&tbl->lock);
378 return n;
379}
380EXPORT_SYMBOL(neigh_lookup);
381
382struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
383 const void *pkey)
384{
385 struct neighbour *n;
386 int key_len = tbl->key_len;
387 u32 hash_val;
388
389 NEIGH_CACHE_STAT_INC(tbl, lookups);
390
391 read_lock_bh(&tbl->lock);
392 hash_val = tbl->hash(pkey, NULL);
393 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
394 if (!memcmp(n->primary_key, pkey, key_len) &&
395 net_eq(dev_net(n->dev), net)) {
396 neigh_hold(n);
397 NEIGH_CACHE_STAT_INC(tbl, hits);
398 break;
399 }
400 }
401 read_unlock_bh(&tbl->lock);
402 return n;
403}
404EXPORT_SYMBOL(neigh_lookup_nodev);
405
406struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
407 struct net_device *dev)
408{
409 u32 hash_val;
410 int key_len = tbl->key_len;
411 int error;
412 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
413
414 if (!n) {
415 rc = ERR_PTR(-ENOBUFS);
416 goto out;
417 }
418
419 memcpy(n->primary_key, pkey, key_len);
420 n->dev = dev;
421 dev_hold(dev);
422
423 /* Protocol specific setup. */
424 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
425 rc = ERR_PTR(error);
426 goto out_neigh_release;
427 }
428
429 /* Device specific setup. */
430 if (n->parms->neigh_setup &&
431 (error = n->parms->neigh_setup(n)) < 0) {
432 rc = ERR_PTR(error);
433 goto out_neigh_release;
434 }
435
436 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
437
438 write_lock_bh(&tbl->lock);
439
440 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
441 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
442
443 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
444
445 if (n->parms->dead) {
446 rc = ERR_PTR(-EINVAL);
447 goto out_tbl_unlock;
448 }
449
450 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
451 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
452 neigh_hold(n1);
453 rc = n1;
454 goto out_tbl_unlock;
455 }
456 }
457
458 n->next = tbl->hash_buckets[hash_val];
459 tbl->hash_buckets[hash_val] = n;
460 n->dead = 0;
461 neigh_hold(n);
462 write_unlock_bh(&tbl->lock);
463 NEIGH_PRINTK2("neigh %p is created.\n", n);
464 rc = n;
465out:
466 return rc;
467out_tbl_unlock:
468 write_unlock_bh(&tbl->lock);
469out_neigh_release:
470 neigh_release(n);
471 goto out;
472}
473EXPORT_SYMBOL(neigh_create);
474
475struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
476 struct net *net, const void *pkey, struct net_device *dev)
477{
478 struct pneigh_entry *n;
479 int key_len = tbl->key_len;
480 u32 hash_val = *(u32 *)(pkey + key_len - 4);
481
482 hash_val ^= (hash_val >> 16);
483 hash_val ^= hash_val >> 8;
484 hash_val ^= hash_val >> 4;
485 hash_val &= PNEIGH_HASHMASK;
486
487 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
488 if (!memcmp(n->key, pkey, key_len) &&
489 (pneigh_net(n) == net) &&
490 (n->dev == dev || !n->dev))
491 break;
492 }
493
494 return n;
495}
496EXPORT_SYMBOL_GPL(__pneigh_lookup);
497
498struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
499 struct net *net, const void *pkey,
500 struct net_device *dev, int creat)
501{
502 struct pneigh_entry *n;
503 int key_len = tbl->key_len;
504 u32 hash_val = *(u32 *)(pkey + key_len - 4);
505
506 hash_val ^= (hash_val >> 16);
507 hash_val ^= hash_val >> 8;
508 hash_val ^= hash_val >> 4;
509 hash_val &= PNEIGH_HASHMASK;
510
511 read_lock_bh(&tbl->lock);
512
513 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
514 if (!memcmp(n->key, pkey, key_len) &&
515 net_eq(pneigh_net(n), net) &&
516 (n->dev == dev || !n->dev)) {
517 read_unlock_bh(&tbl->lock);
518 goto out;
519 }
520 }
521 read_unlock_bh(&tbl->lock);
522 n = NULL;
523 if (!creat)
524 goto out;
525
526 ASSERT_RTNL();
527
528 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
529 if (!n)
530 goto out;
531
532#ifdef CONFIG_NET_NS
533 n->net = hold_net(net);
534#endif
535 memcpy(n->key, pkey, key_len);
536 n->dev = dev;
537 if (dev)
538 dev_hold(dev);
539
540 if (tbl->pconstructor && tbl->pconstructor(n)) {
541 if (dev)
542 dev_put(dev);
543 release_net(net);
544 kfree(n);
545 n = NULL;
546 goto out;
547 }
548
549 write_lock_bh(&tbl->lock);
550 n->next = tbl->phash_buckets[hash_val];
551 tbl->phash_buckets[hash_val] = n;
552 write_unlock_bh(&tbl->lock);
553out:
554 return n;
555}
556EXPORT_SYMBOL(pneigh_lookup);
557
558
559int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
560 struct net_device *dev)
561{
562 struct pneigh_entry *n, **np;
563 int key_len = tbl->key_len;
564 u32 hash_val = *(u32 *)(pkey + key_len - 4);
565
566 hash_val ^= (hash_val >> 16);
567 hash_val ^= hash_val >> 8;
568 hash_val ^= hash_val >> 4;
569 hash_val &= PNEIGH_HASHMASK;
570
571 write_lock_bh(&tbl->lock);
572 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
573 np = &n->next) {
574 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
575 net_eq(pneigh_net(n), net)) {
576 *np = n->next;
577 write_unlock_bh(&tbl->lock);
578 if (tbl->pdestructor)
579 tbl->pdestructor(n);
580 if (n->dev)
581 dev_put(n->dev);
582 release_net(pneigh_net(n));
583 kfree(n);
584 return 0;
585 }
586 }
587 write_unlock_bh(&tbl->lock);
588 return -ENOENT;
589}
590
591static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
592{
593 struct pneigh_entry *n, **np;
594 u32 h;
595
596 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
597 np = &tbl->phash_buckets[h];
598 while ((n = *np) != NULL) {
599 if (!dev || n->dev == dev) {
600 *np = n->next;
601 if (tbl->pdestructor)
602 tbl->pdestructor(n);
603 if (n->dev)
604 dev_put(n->dev);
605 release_net(pneigh_net(n));
606 kfree(n);
607 continue;
608 }
609 np = &n->next;
610 }
611 }
612 return -ENOENT;
613}
614
615static void neigh_parms_destroy(struct neigh_parms *parms);
616
617static inline void neigh_parms_put(struct neigh_parms *parms)
618{
619 if (atomic_dec_and_test(&parms->refcnt))
620 neigh_parms_destroy(parms);
621}
622
623/*
624 * neighbour must already be out of the table;
625 *
626 */
627void neigh_destroy(struct neighbour *neigh)
628{
629 struct hh_cache *hh;
630
631 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
632
633 if (!neigh->dead) {
634 printk(KERN_WARNING
635 "Destroying alive neighbour %p\n", neigh);
636 dump_stack();
637 return;
638 }
639
640 if (neigh_del_timer(neigh))
641 printk(KERN_WARNING "Impossible event.\n");
642
643 while ((hh = neigh->hh) != NULL) {
644 neigh->hh = hh->hh_next;
645 hh->hh_next = NULL;
646
647 write_seqlock_bh(&hh->hh_lock);
648 hh->hh_output = neigh_blackhole;
649 write_sequnlock_bh(&hh->hh_lock);
650 if (atomic_dec_and_test(&hh->hh_refcnt))
651 kfree(hh);
652 }
653
654 skb_queue_purge(&neigh->arp_queue);
655
656 dev_put(neigh->dev);
657 neigh_parms_put(neigh->parms);
658
659 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
660
661 atomic_dec(&neigh->tbl->entries);
662 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
663}
664EXPORT_SYMBOL(neigh_destroy);
665
666/* Neighbour state is suspicious;
667 disable fast path.
668
669 Called with write_locked neigh.
670 */
671static void neigh_suspect(struct neighbour *neigh)
672{
673 struct hh_cache *hh;
674
675 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
676
677 neigh->output = neigh->ops->output;
678
679 for (hh = neigh->hh; hh; hh = hh->hh_next)
680 hh->hh_output = neigh->ops->output;
681}
682
683/* Neighbour state is OK;
684 enable fast path.
685
686 Called with write_locked neigh.
687 */
688static void neigh_connect(struct neighbour *neigh)
689{
690 struct hh_cache *hh;
691
692 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
693
694 neigh->output = neigh->ops->connected_output;
695
696 for (hh = neigh->hh; hh; hh = hh->hh_next)
697 hh->hh_output = neigh->ops->hh_output;
698}
699
700static void neigh_periodic_timer(unsigned long arg)
701{
702 struct neigh_table *tbl = (struct neigh_table *)arg;
703 struct neighbour *n, **np;
704 unsigned long expire, now = jiffies;
705
706 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
707
708 write_lock(&tbl->lock);
709
710 /*
711 * periodically recompute ReachableTime from random function
712 */
713
714 if (time_after(now, tbl->last_rand + 300 * HZ)) {
715 struct neigh_parms *p;
716 tbl->last_rand = now;
717 for (p = &tbl->parms; p; p = p->next)
718 p->reachable_time =
719 neigh_rand_reach_time(p->base_reachable_time);
720 }
721
722 np = &tbl->hash_buckets[tbl->hash_chain_gc];
723 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
724
725 while ((n = *np) != NULL) {
726 unsigned int state;
727
728 write_lock(&n->lock);
729
730 state = n->nud_state;
731 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
732 write_unlock(&n->lock);
733 goto next_elt;
734 }
735
736 if (time_before(n->used, n->confirmed))
737 n->used = n->confirmed;
738
739 if (atomic_read(&n->refcnt) == 1 &&
740 (state == NUD_FAILED ||
741 time_after(now, n->used + n->parms->gc_staletime))) {
742 *np = n->next;
743 n->dead = 1;
744 write_unlock(&n->lock);
745 neigh_cleanup_and_release(n);
746 continue;
747 }
748 write_unlock(&n->lock);
749
750next_elt:
751 np = &n->next;
752 }
753
754 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
755 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
756 * base_reachable_time.
757 */
758 expire = tbl->parms.base_reachable_time >> 1;
759 expire /= (tbl->hash_mask + 1);
760 if (!expire)
761 expire = 1;
762
763 if (expire>HZ)
764 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
765 else
766 mod_timer(&tbl->gc_timer, now + expire);
767
768 write_unlock(&tbl->lock);
769}
770
771static __inline__ int neigh_max_probes(struct neighbour *n)
772{
773 struct neigh_parms *p = n->parms;
774 return (n->nud_state & NUD_PROBE ?
775 p->ucast_probes :
776 p->ucast_probes + p->app_probes + p->mcast_probes);
777}
778
779/* Called when a timer expires for a neighbour entry. */
780
781static void neigh_timer_handler(unsigned long arg)
782{
783 unsigned long now, next;
784 struct neighbour *neigh = (struct neighbour *)arg;
785 unsigned state;
786 int notify = 0;
787
788 write_lock(&neigh->lock);
789
790 state = neigh->nud_state;
791 now = jiffies;
792 next = now + HZ;
793
794 if (!(state & NUD_IN_TIMER)) {
795#ifndef CONFIG_SMP
796 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
797#endif
798 goto out;
799 }
800
801 if (state & NUD_REACHABLE) {
802 if (time_before_eq(now,
803 neigh->confirmed + neigh->parms->reachable_time)) {
804 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
805 next = neigh->confirmed + neigh->parms->reachable_time;
806 } else if (time_before_eq(now,
807 neigh->used + neigh->parms->delay_probe_time)) {
808 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
809 neigh->nud_state = NUD_DELAY;
810 neigh->updated = jiffies;
811 neigh_suspect(neigh);
812 next = now + neigh->parms->delay_probe_time;
813 } else {
814 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
815 neigh->nud_state = NUD_STALE;
816 neigh->updated = jiffies;
817 neigh_suspect(neigh);
818 notify = 1;
819 }
820 } else if (state & NUD_DELAY) {
821 if (time_before_eq(now,
822 neigh->confirmed + neigh->parms->delay_probe_time)) {
823 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
824 neigh->nud_state = NUD_REACHABLE;
825 neigh->updated = jiffies;
826 neigh_connect(neigh);
827 notify = 1;
828 next = neigh->confirmed + neigh->parms->reachable_time;
829 } else {
830 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
831 neigh->nud_state = NUD_PROBE;
832 neigh->updated = jiffies;
833 atomic_set(&neigh->probes, 0);
834 next = now + neigh->parms->retrans_time;
835 }
836 } else {
837 /* NUD_PROBE|NUD_INCOMPLETE */
838 next = now + neigh->parms->retrans_time;
839 }
840
841 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
842 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
843 struct sk_buff *skb;
844
845 neigh->nud_state = NUD_FAILED;
846 neigh->updated = jiffies;
847 notify = 1;
848 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
849 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
850
851 /* It is very thin place. report_unreachable is very complicated
852 routine. Particularly, it can hit the same neighbour entry!
853
854 So that, we try to be accurate and avoid dead loop. --ANK
855 */
856 while (neigh->nud_state == NUD_FAILED &&
857 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
858 write_unlock(&neigh->lock);
859 neigh->ops->error_report(neigh, skb);
860 write_lock(&neigh->lock);
861 }
862 skb_queue_purge(&neigh->arp_queue);
863 }
864
865 if (neigh->nud_state & NUD_IN_TIMER) {
866 if (time_before(next, jiffies + HZ/2))
867 next = jiffies + HZ/2;
868 if (!mod_timer(&neigh->timer, next))
869 neigh_hold(neigh);
870 }
871 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
872 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
873 /* keep skb alive even if arp_queue overflows */
874 if (skb)
875 skb = skb_copy(skb, GFP_ATOMIC);
876 write_unlock(&neigh->lock);
877 neigh->ops->solicit(neigh, skb);
878 atomic_inc(&neigh->probes);
879 if (skb)
880 kfree_skb(skb);
881 } else {
882out:
883 write_unlock(&neigh->lock);
884 }
885
886 if (notify)
887 neigh_update_notify(neigh);
888
889 neigh_release(neigh);
890}
891
892int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
893{
894 int rc;
895 unsigned long now;
896
897 write_lock_bh(&neigh->lock);
898
899 rc = 0;
900 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
901 goto out_unlock_bh;
902
903 now = jiffies;
904
905 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
906 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
907 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
908 neigh->nud_state = NUD_INCOMPLETE;
909 neigh->updated = jiffies;
910 neigh_add_timer(neigh, now + 1);
911 } else {
912 neigh->nud_state = NUD_FAILED;
913 neigh->updated = jiffies;
914 write_unlock_bh(&neigh->lock);
915
916 if (skb)
917 kfree_skb(skb);
918 return 1;
919 }
920 } else if (neigh->nud_state & NUD_STALE) {
921 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
922 neigh->nud_state = NUD_DELAY;
923 neigh->updated = jiffies;
924 neigh_add_timer(neigh,
925 jiffies + neigh->parms->delay_probe_time);
926 }
927
928 if (neigh->nud_state == NUD_INCOMPLETE) {
929 if (skb) {
930 if (skb_queue_len(&neigh->arp_queue) >=
931 neigh->parms->queue_len) {
932 struct sk_buff *buff;
933 buff = neigh->arp_queue.next;
934 __skb_unlink(buff, &neigh->arp_queue);
935 kfree_skb(buff);
936 }
937 __skb_queue_tail(&neigh->arp_queue, skb);
938 }
939 rc = 1;
940 }
941out_unlock_bh:
942 write_unlock_bh(&neigh->lock);
943 return rc;
944}
945EXPORT_SYMBOL(__neigh_event_send);
946
947static void neigh_update_hhs(struct neighbour *neigh)
948{
949 struct hh_cache *hh;
950 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
951 = neigh->dev->header_ops->cache_update;
952
953 if (update) {
954 for (hh = neigh->hh; hh; hh = hh->hh_next) {
955 write_seqlock_bh(&hh->hh_lock);
956 update(hh, neigh->dev, neigh->ha);
957 write_sequnlock_bh(&hh->hh_lock);
958 }
959 }
960}
961
962
963
964/* Generic update routine.
965 -- lladdr is new lladdr or NULL, if it is not supplied.
966 -- new is new state.
967 -- flags
968 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
969 if it is different.
970 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
971 lladdr instead of overriding it
972 if it is different.
973 It also allows to retain current state
974 if lladdr is unchanged.
975 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
976
977 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
978 NTF_ROUTER flag.
979 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
980 a router.
981
982 Caller MUST hold reference count on the entry.
983 */
984
985int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
986 u32 flags)
987{
988 u8 old;
989 int err;
990 int notify = 0;
991 struct net_device *dev;
992 int update_isrouter = 0;
993
994 write_lock_bh(&neigh->lock);
995
996 dev = neigh->dev;
997 old = neigh->nud_state;
998 err = -EPERM;
999
1000 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1001 (old & (NUD_NOARP | NUD_PERMANENT)))
1002 goto out;
1003
1004 if (!(new & NUD_VALID)) {
1005 neigh_del_timer(neigh);
1006 if (old & NUD_CONNECTED)
1007 neigh_suspect(neigh);
1008 neigh->nud_state = new;
1009 err = 0;
1010 notify = old & NUD_VALID;
1011 goto out;
1012 }
1013
1014 /* Compare new lladdr with cached one */
1015 if (!dev->addr_len) {
1016 /* First case: device needs no address. */
1017 lladdr = neigh->ha;
1018 } else if (lladdr) {
1019 /* The second case: if something is already cached
1020 and a new address is proposed:
1021 - compare new & old
1022 - if they are different, check override flag
1023 */
1024 if ((old & NUD_VALID) &&
1025 !memcmp(lladdr, neigh->ha, dev->addr_len))
1026 lladdr = neigh->ha;
1027 } else {
1028 /* No address is supplied; if we know something,
1029 use it, otherwise discard the request.
1030 */
1031 err = -EINVAL;
1032 if (!(old & NUD_VALID))
1033 goto out;
1034 lladdr = neigh->ha;
1035 }
1036
1037 if (new & NUD_CONNECTED)
1038 neigh->confirmed = jiffies;
1039 neigh->updated = jiffies;
1040
1041 /* If entry was valid and address is not changed,
1042 do not change entry state, if new one is STALE.
1043 */
1044 err = 0;
1045 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1046 if (old & NUD_VALID) {
1047 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1048 update_isrouter = 0;
1049 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1050 (old & NUD_CONNECTED)) {
1051 lladdr = neigh->ha;
1052 new = NUD_STALE;
1053 } else
1054 goto out;
1055 } else {
1056 if (lladdr == neigh->ha && new == NUD_STALE &&
1057 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1058 (old & NUD_CONNECTED))
1059 )
1060 new = old;
1061 }
1062 }
1063
1064 if (new != old) {
1065 neigh_del_timer(neigh);
1066 if (new & NUD_IN_TIMER)
1067 neigh_add_timer(neigh, (jiffies +
1068 ((new & NUD_REACHABLE) ?
1069 neigh->parms->reachable_time :
1070 0)));
1071 neigh->nud_state = new;
1072 }
1073
1074 if (lladdr != neigh->ha) {
1075 memcpy(&neigh->ha, lladdr, dev->addr_len);
1076 neigh_update_hhs(neigh);
1077 if (!(new & NUD_CONNECTED))
1078 neigh->confirmed = jiffies -
1079 (neigh->parms->base_reachable_time << 1);
1080 notify = 1;
1081 }
1082 if (new == old)
1083 goto out;
1084 if (new & NUD_CONNECTED)
1085 neigh_connect(neigh);
1086 else
1087 neigh_suspect(neigh);
1088 if (!(old & NUD_VALID)) {
1089 struct sk_buff *skb;
1090
1091 /* Again: avoid dead loop if something went wrong */
1092
1093 while (neigh->nud_state & NUD_VALID &&
1094 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1095 struct neighbour *n1 = neigh;
1096 write_unlock_bh(&neigh->lock);
1097 /* On shaper/eql skb->dst->neighbour != neigh :( */
1098 if (skb->dst && skb->dst->neighbour)
1099 n1 = skb->dst->neighbour;
1100 n1->output(skb);
1101 write_lock_bh(&neigh->lock);
1102 }
1103 skb_queue_purge(&neigh->arp_queue);
1104 }
1105out:
1106 if (update_isrouter) {
1107 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1108 (neigh->flags | NTF_ROUTER) :
1109 (neigh->flags & ~NTF_ROUTER);
1110 }
1111 write_unlock_bh(&neigh->lock);
1112
1113 if (notify)
1114 neigh_update_notify(neigh);
1115
1116 return err;
1117}
1118EXPORT_SYMBOL(neigh_update);
1119
1120struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1121 u8 *lladdr, void *saddr,
1122 struct net_device *dev)
1123{
1124 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1125 lladdr || !dev->addr_len);
1126 if (neigh)
1127 neigh_update(neigh, lladdr, NUD_STALE,
1128 NEIGH_UPDATE_F_OVERRIDE);
1129 return neigh;
1130}
1131EXPORT_SYMBOL(neigh_event_ns);
1132
1133static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1134 __be16 protocol)
1135{
1136 struct hh_cache *hh;
1137 struct net_device *dev = dst->dev;
1138
1139 for (hh = n->hh; hh; hh = hh->hh_next)
1140 if (hh->hh_type == protocol)
1141 break;
1142
1143 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1144 seqlock_init(&hh->hh_lock);
1145 hh->hh_type = protocol;
1146 atomic_set(&hh->hh_refcnt, 0);
1147 hh->hh_next = NULL;
1148
1149 if (dev->header_ops->cache(n, hh)) {
1150 kfree(hh);
1151 hh = NULL;
1152 } else {
1153 atomic_inc(&hh->hh_refcnt);
1154 hh->hh_next = n->hh;
1155 n->hh = hh;
1156 if (n->nud_state & NUD_CONNECTED)
1157 hh->hh_output = n->ops->hh_output;
1158 else
1159 hh->hh_output = n->ops->output;
1160 }
1161 }
1162 if (hh) {
1163 atomic_inc(&hh->hh_refcnt);
1164 dst->hh = hh;
1165 }
1166}
1167
1168/* This function can be used in contexts, where only old dev_queue_xmit
1169 worked, f.e. if you want to override normal output path (eql, shaper),
1170 but resolution is not made yet.
1171 */
1172
1173int neigh_compat_output(struct sk_buff *skb)
1174{
1175 struct net_device *dev = skb->dev;
1176
1177 __skb_pull(skb, skb_network_offset(skb));
1178
1179 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1180 skb->len) < 0 &&
1181 dev->header_ops->rebuild(skb))
1182 return 0;
1183
1184 return dev_queue_xmit(skb);
1185}
1186EXPORT_SYMBOL(neigh_compat_output);
1187
1188/* Slow and careful. */
1189
1190int neigh_resolve_output(struct sk_buff *skb)
1191{
1192 struct dst_entry *dst = skb->dst;
1193 struct neighbour *neigh;
1194 int rc = 0;
1195
1196 if (!dst || !(neigh = dst->neighbour))
1197 goto discard;
1198
1199 __skb_pull(skb, skb_network_offset(skb));
1200
1201 if (!neigh_event_send(neigh, skb)) {
1202 int err;
1203 struct net_device *dev = neigh->dev;
1204 if (dev->header_ops->cache && !dst->hh) {
1205 write_lock_bh(&neigh->lock);
1206 if (!dst->hh)
1207 neigh_hh_init(neigh, dst, dst->ops->protocol);
1208 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1209 neigh->ha, NULL, skb->len);
1210 write_unlock_bh(&neigh->lock);
1211 } else {
1212 read_lock_bh(&neigh->lock);
1213 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1214 neigh->ha, NULL, skb->len);
1215 read_unlock_bh(&neigh->lock);
1216 }
1217 if (err >= 0)
1218 rc = neigh->ops->queue_xmit(skb);
1219 else
1220 goto out_kfree_skb;
1221 }
1222out:
1223 return rc;
1224discard:
1225 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1226 dst, dst ? dst->neighbour : NULL);
1227out_kfree_skb:
1228 rc = -EINVAL;
1229 kfree_skb(skb);
1230 goto out;
1231}
1232EXPORT_SYMBOL(neigh_resolve_output);
1233
1234/* As fast as possible without hh cache */
1235
1236int neigh_connected_output(struct sk_buff *skb)
1237{
1238 int err;
1239 struct dst_entry *dst = skb->dst;
1240 struct neighbour *neigh = dst->neighbour;
1241 struct net_device *dev = neigh->dev;
1242
1243 __skb_pull(skb, skb_network_offset(skb));
1244
1245 read_lock_bh(&neigh->lock);
1246 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1247 neigh->ha, NULL, skb->len);
1248 read_unlock_bh(&neigh->lock);
1249 if (err >= 0)
1250 err = neigh->ops->queue_xmit(skb);
1251 else {
1252 err = -EINVAL;
1253 kfree_skb(skb);
1254 }
1255 return err;
1256}
1257EXPORT_SYMBOL(neigh_connected_output);
1258
1259static void neigh_proxy_process(unsigned long arg)
1260{
1261 struct neigh_table *tbl = (struct neigh_table *)arg;
1262 long sched_next = 0;
1263 unsigned long now = jiffies;
1264 struct sk_buff *skb;
1265
1266 spin_lock(&tbl->proxy_queue.lock);
1267
1268 skb = tbl->proxy_queue.next;
1269
1270 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1271 struct sk_buff *back = skb;
1272 long tdif = NEIGH_CB(back)->sched_next - now;
1273
1274 skb = skb->next;
1275 if (tdif <= 0) {
1276 struct net_device *dev = back->dev;
1277 __skb_unlink(back, &tbl->proxy_queue);
1278 if (tbl->proxy_redo && netif_running(dev))
1279 tbl->proxy_redo(back);
1280 else
1281 kfree_skb(back);
1282
1283 dev_put(dev);
1284 } else if (!sched_next || tdif < sched_next)
1285 sched_next = tdif;
1286 }
1287 del_timer(&tbl->proxy_timer);
1288 if (sched_next)
1289 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1290 spin_unlock(&tbl->proxy_queue.lock);
1291}
1292
1293void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1294 struct sk_buff *skb)
1295{
1296 unsigned long now = jiffies;
1297 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1298
1299 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1300 kfree_skb(skb);
1301 return;
1302 }
1303
1304 NEIGH_CB(skb)->sched_next = sched_next;
1305 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1306
1307 spin_lock(&tbl->proxy_queue.lock);
1308 if (del_timer(&tbl->proxy_timer)) {
1309 if (time_before(tbl->proxy_timer.expires, sched_next))
1310 sched_next = tbl->proxy_timer.expires;
1311 }
1312 dst_release(skb->dst);
1313 skb->dst = NULL;
1314 dev_hold(skb->dev);
1315 __skb_queue_tail(&tbl->proxy_queue, skb);
1316 mod_timer(&tbl->proxy_timer, sched_next);
1317 spin_unlock(&tbl->proxy_queue.lock);
1318}
1319EXPORT_SYMBOL(pneigh_enqueue);
1320
1321static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1322 struct net *net, int ifindex)
1323{
1324 struct neigh_parms *p;
1325
1326 for (p = &tbl->parms; p; p = p->next) {
1327 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1328 (!p->dev && !ifindex))
1329 return p;
1330 }
1331
1332 return NULL;
1333}
1334
1335struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1336 struct neigh_table *tbl)
1337{
1338 struct neigh_parms *p, *ref;
1339 struct net *net;
1340
1341 net = dev_net(dev);
1342 ref = lookup_neigh_params(tbl, net, 0);
1343 if (!ref)
1344 return NULL;
1345
1346 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1347 if (p) {
1348 p->tbl = tbl;
1349 atomic_set(&p->refcnt, 1);
1350 INIT_RCU_HEAD(&p->rcu_head);
1351 p->reachable_time =
1352 neigh_rand_reach_time(p->base_reachable_time);
1353
1354 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1355 kfree(p);
1356 return NULL;
1357 }
1358
1359 dev_hold(dev);
1360 p->dev = dev;
1361#ifdef CONFIG_NET_NS
1362 p->net = hold_net(net);
1363#endif
1364 p->sysctl_table = NULL;
1365 write_lock_bh(&tbl->lock);
1366 p->next = tbl->parms.next;
1367 tbl->parms.next = p;
1368 write_unlock_bh(&tbl->lock);
1369 }
1370 return p;
1371}
1372EXPORT_SYMBOL(neigh_parms_alloc);
1373
1374static void neigh_rcu_free_parms(struct rcu_head *head)
1375{
1376 struct neigh_parms *parms =
1377 container_of(head, struct neigh_parms, rcu_head);
1378
1379 neigh_parms_put(parms);
1380}
1381
1382void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1383{
1384 struct neigh_parms **p;
1385
1386 if (!parms || parms == &tbl->parms)
1387 return;
1388 write_lock_bh(&tbl->lock);
1389 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1390 if (*p == parms) {
1391 *p = parms->next;
1392 parms->dead = 1;
1393 write_unlock_bh(&tbl->lock);
1394 if (parms->dev)
1395 dev_put(parms->dev);
1396 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1397 return;
1398 }
1399 }
1400 write_unlock_bh(&tbl->lock);
1401 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1402}
1403EXPORT_SYMBOL(neigh_parms_release);
1404
1405static void neigh_parms_destroy(struct neigh_parms *parms)
1406{
1407 release_net(neigh_parms_net(parms));
1408 kfree(parms);
1409}
1410
1411static struct lock_class_key neigh_table_proxy_queue_class;
1412
1413void neigh_table_init_no_netlink(struct neigh_table *tbl)
1414{
1415 unsigned long now = jiffies;
1416 unsigned long phsize;
1417
1418#ifdef CONFIG_NET_NS
1419 tbl->parms.net = &init_net;
1420#endif
1421 atomic_set(&tbl->parms.refcnt, 1);
1422 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1423 tbl->parms.reachable_time =
1424 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1425
1426 if (!tbl->kmem_cachep)
1427 tbl->kmem_cachep =
1428 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1429 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1430 NULL);
1431 tbl->stats = alloc_percpu(struct neigh_statistics);
1432 if (!tbl->stats)
1433 panic("cannot create neighbour cache statistics");
1434
1435#ifdef CONFIG_PROC_FS
1436 tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat,
1437 &neigh_stat_seq_fops);
1438 if (!tbl->pde)
1439 panic("cannot create neighbour proc dir entry");
1440 tbl->pde->data = tbl;
1441#endif
1442
1443 tbl->hash_mask = 1;
1444 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1445
1446 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1447 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1448
1449 if (!tbl->hash_buckets || !tbl->phash_buckets)
1450 panic("cannot allocate neighbour cache hashes");
1451
1452 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1453
1454 rwlock_init(&tbl->lock);
1455 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1456 tbl->gc_timer.expires = now + 1;
1457 add_timer(&tbl->gc_timer);
1458
1459 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1460 skb_queue_head_init_class(&tbl->proxy_queue,
1461 &neigh_table_proxy_queue_class);
1462
1463 tbl->last_flush = now;
1464 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1465}
1466EXPORT_SYMBOL(neigh_table_init_no_netlink);
1467
1468void neigh_table_init(struct neigh_table *tbl)
1469{
1470 struct neigh_table *tmp;
1471
1472 neigh_table_init_no_netlink(tbl);
1473 write_lock(&neigh_tbl_lock);
1474 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1475 if (tmp->family == tbl->family)
1476 break;
1477 }
1478 tbl->next = neigh_tables;
1479 neigh_tables = tbl;
1480 write_unlock(&neigh_tbl_lock);
1481
1482 if (unlikely(tmp)) {
1483 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1484 "family %d\n", tbl->family);
1485 dump_stack();
1486 }
1487}
1488EXPORT_SYMBOL(neigh_table_init);
1489
1490int neigh_table_clear(struct neigh_table *tbl)
1491{
1492 struct neigh_table **tp;
1493
1494 /* It is not clean... Fix it to unload IPv6 module safely */
1495 del_timer_sync(&tbl->gc_timer);
1496 del_timer_sync(&tbl->proxy_timer);
1497 pneigh_queue_purge(&tbl->proxy_queue);
1498 neigh_ifdown(tbl, NULL);
1499 if (atomic_read(&tbl->entries))
1500 printk(KERN_CRIT "neighbour leakage\n");
1501 write_lock(&neigh_tbl_lock);
1502 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1503 if (*tp == tbl) {
1504 *tp = tbl->next;
1505 break;
1506 }
1507 }
1508 write_unlock(&neigh_tbl_lock);
1509
1510 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1511 tbl->hash_buckets = NULL;
1512
1513 kfree(tbl->phash_buckets);
1514 tbl->phash_buckets = NULL;
1515
1516 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1517
1518 free_percpu(tbl->stats);
1519 tbl->stats = NULL;
1520
1521 kmem_cache_destroy(tbl->kmem_cachep);
1522 tbl->kmem_cachep = NULL;
1523
1524 return 0;
1525}
1526EXPORT_SYMBOL(neigh_table_clear);
1527
1528static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1529{
1530 struct net *net = sock_net(skb->sk);
1531 struct ndmsg *ndm;
1532 struct nlattr *dst_attr;
1533 struct neigh_table *tbl;
1534 struct net_device *dev = NULL;
1535 int err = -EINVAL;
1536
1537 if (nlmsg_len(nlh) < sizeof(*ndm))
1538 goto out;
1539
1540 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1541 if (dst_attr == NULL)
1542 goto out;
1543
1544 ndm = nlmsg_data(nlh);
1545 if (ndm->ndm_ifindex) {
1546 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1547 if (dev == NULL) {
1548 err = -ENODEV;
1549 goto out;
1550 }
1551 }
1552
1553 read_lock(&neigh_tbl_lock);
1554 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1555 struct neighbour *neigh;
1556
1557 if (tbl->family != ndm->ndm_family)
1558 continue;
1559 read_unlock(&neigh_tbl_lock);
1560
1561 if (nla_len(dst_attr) < tbl->key_len)
1562 goto out_dev_put;
1563
1564 if (ndm->ndm_flags & NTF_PROXY) {
1565 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1566 goto out_dev_put;
1567 }
1568
1569 if (dev == NULL)
1570 goto out_dev_put;
1571
1572 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1573 if (neigh == NULL) {
1574 err = -ENOENT;
1575 goto out_dev_put;
1576 }
1577
1578 err = neigh_update(neigh, NULL, NUD_FAILED,
1579 NEIGH_UPDATE_F_OVERRIDE |
1580 NEIGH_UPDATE_F_ADMIN);
1581 neigh_release(neigh);
1582 goto out_dev_put;
1583 }
1584 read_unlock(&neigh_tbl_lock);
1585 err = -EAFNOSUPPORT;
1586
1587out_dev_put:
1588 if (dev)
1589 dev_put(dev);
1590out:
1591 return err;
1592}
1593
1594static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1595{
1596 struct net *net = sock_net(skb->sk);
1597 struct ndmsg *ndm;
1598 struct nlattr *tb[NDA_MAX+1];
1599 struct neigh_table *tbl;
1600 struct net_device *dev = NULL;
1601 int err;
1602
1603 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1604 if (err < 0)
1605 goto out;
1606
1607 err = -EINVAL;
1608 if (tb[NDA_DST] == NULL)
1609 goto out;
1610
1611 ndm = nlmsg_data(nlh);
1612 if (ndm->ndm_ifindex) {
1613 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1614 if (dev == NULL) {
1615 err = -ENODEV;
1616 goto out;
1617 }
1618
1619 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1620 goto out_dev_put;
1621 }
1622
1623 read_lock(&neigh_tbl_lock);
1624 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1625 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1626 struct neighbour *neigh;
1627 void *dst, *lladdr;
1628
1629 if (tbl->family != ndm->ndm_family)
1630 continue;
1631 read_unlock(&neigh_tbl_lock);
1632
1633 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1634 goto out_dev_put;
1635 dst = nla_data(tb[NDA_DST]);
1636 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1637
1638 if (ndm->ndm_flags & NTF_PROXY) {
1639 struct pneigh_entry *pn;
1640
1641 err = -ENOBUFS;
1642 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1643 if (pn) {
1644 pn->flags = ndm->ndm_flags;
1645 err = 0;
1646 }
1647 goto out_dev_put;
1648 }
1649
1650 if (dev == NULL)
1651 goto out_dev_put;
1652
1653 neigh = neigh_lookup(tbl, dst, dev);
1654 if (neigh == NULL) {
1655 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1656 err = -ENOENT;
1657 goto out_dev_put;
1658 }
1659
1660 neigh = __neigh_lookup_errno(tbl, dst, dev);
1661 if (IS_ERR(neigh)) {
1662 err = PTR_ERR(neigh);
1663 goto out_dev_put;
1664 }
1665 } else {
1666 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1667 err = -EEXIST;
1668 neigh_release(neigh);
1669 goto out_dev_put;
1670 }
1671
1672 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1673 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1674 }
1675
1676 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1677 neigh_release(neigh);
1678 goto out_dev_put;
1679 }
1680
1681 read_unlock(&neigh_tbl_lock);
1682 err = -EAFNOSUPPORT;
1683
1684out_dev_put:
1685 if (dev)
1686 dev_put(dev);
1687out:
1688 return err;
1689}
1690
1691static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1692{
1693 struct nlattr *nest;
1694
1695 nest = nla_nest_start(skb, NDTA_PARMS);
1696 if (nest == NULL)
1697 return -ENOBUFS;
1698
1699 if (parms->dev)
1700 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1701
1702 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1703 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1704 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1705 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1706 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1707 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1708 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1709 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1710 parms->base_reachable_time);
1711 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1712 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1713 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1714 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1715 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1716 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1717
1718 return nla_nest_end(skb, nest);
1719
1720nla_put_failure:
1721 return nla_nest_cancel(skb, nest);
1722}
1723
1724static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1725 u32 pid, u32 seq, int type, int flags)
1726{
1727 struct nlmsghdr *nlh;
1728 struct ndtmsg *ndtmsg;
1729
1730 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1731 if (nlh == NULL)
1732 return -EMSGSIZE;
1733
1734 ndtmsg = nlmsg_data(nlh);
1735
1736 read_lock_bh(&tbl->lock);
1737 ndtmsg->ndtm_family = tbl->family;
1738 ndtmsg->ndtm_pad1 = 0;
1739 ndtmsg->ndtm_pad2 = 0;
1740
1741 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1742 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1743 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1744 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1745 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1746
1747 {
1748 unsigned long now = jiffies;
1749 unsigned int flush_delta = now - tbl->last_flush;
1750 unsigned int rand_delta = now - tbl->last_rand;
1751
1752 struct ndt_config ndc = {
1753 .ndtc_key_len = tbl->key_len,
1754 .ndtc_entry_size = tbl->entry_size,
1755 .ndtc_entries = atomic_read(&tbl->entries),
1756 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1757 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1758 .ndtc_hash_rnd = tbl->hash_rnd,
1759 .ndtc_hash_mask = tbl->hash_mask,
1760 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1761 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1762 };
1763
1764 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1765 }
1766
1767 {
1768 int cpu;
1769 struct ndt_stats ndst;
1770
1771 memset(&ndst, 0, sizeof(ndst));
1772
1773 for_each_possible_cpu(cpu) {
1774 struct neigh_statistics *st;
1775
1776 st = per_cpu_ptr(tbl->stats, cpu);
1777 ndst.ndts_allocs += st->allocs;
1778 ndst.ndts_destroys += st->destroys;
1779 ndst.ndts_hash_grows += st->hash_grows;
1780 ndst.ndts_res_failed += st->res_failed;
1781 ndst.ndts_lookups += st->lookups;
1782 ndst.ndts_hits += st->hits;
1783 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1784 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1785 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1786 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1787 }
1788
1789 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1790 }
1791
1792 BUG_ON(tbl->parms.dev);
1793 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1794 goto nla_put_failure;
1795
1796 read_unlock_bh(&tbl->lock);
1797 return nlmsg_end(skb, nlh);
1798
1799nla_put_failure:
1800 read_unlock_bh(&tbl->lock);
1801 nlmsg_cancel(skb, nlh);
1802 return -EMSGSIZE;
1803}
1804
1805static int neightbl_fill_param_info(struct sk_buff *skb,
1806 struct neigh_table *tbl,
1807 struct neigh_parms *parms,
1808 u32 pid, u32 seq, int type,
1809 unsigned int flags)
1810{
1811 struct ndtmsg *ndtmsg;
1812 struct nlmsghdr *nlh;
1813
1814 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1815 if (nlh == NULL)
1816 return -EMSGSIZE;
1817
1818 ndtmsg = nlmsg_data(nlh);
1819
1820 read_lock_bh(&tbl->lock);
1821 ndtmsg->ndtm_family = tbl->family;
1822 ndtmsg->ndtm_pad1 = 0;
1823 ndtmsg->ndtm_pad2 = 0;
1824
1825 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1826 neightbl_fill_parms(skb, parms) < 0)
1827 goto errout;
1828
1829 read_unlock_bh(&tbl->lock);
1830 return nlmsg_end(skb, nlh);
1831errout:
1832 read_unlock_bh(&tbl->lock);
1833 nlmsg_cancel(skb, nlh);
1834 return -EMSGSIZE;
1835}
1836
1837static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1838 [NDTA_NAME] = { .type = NLA_STRING },
1839 [NDTA_THRESH1] = { .type = NLA_U32 },
1840 [NDTA_THRESH2] = { .type = NLA_U32 },
1841 [NDTA_THRESH3] = { .type = NLA_U32 },
1842 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1843 [NDTA_PARMS] = { .type = NLA_NESTED },
1844};
1845
1846static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1847 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1848 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1849 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1850 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1851 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1852 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1853 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1854 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1855 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1856 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1857 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1858 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1859 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1860};
1861
1862static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1863{
1864 struct net *net = sock_net(skb->sk);
1865 struct neigh_table *tbl;
1866 struct ndtmsg *ndtmsg;
1867 struct nlattr *tb[NDTA_MAX+1];
1868 int err;
1869
1870 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1871 nl_neightbl_policy);
1872 if (err < 0)
1873 goto errout;
1874
1875 if (tb[NDTA_NAME] == NULL) {
1876 err = -EINVAL;
1877 goto errout;
1878 }
1879
1880 ndtmsg = nlmsg_data(nlh);
1881 read_lock(&neigh_tbl_lock);
1882 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1883 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1884 continue;
1885
1886 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1887 break;
1888 }
1889
1890 if (tbl == NULL) {
1891 err = -ENOENT;
1892 goto errout_locked;
1893 }
1894
1895 /*
1896 * We acquire tbl->lock to be nice to the periodic timers and
1897 * make sure they always see a consistent set of values.
1898 */
1899 write_lock_bh(&tbl->lock);
1900
1901 if (tb[NDTA_PARMS]) {
1902 struct nlattr *tbp[NDTPA_MAX+1];
1903 struct neigh_parms *p;
1904 int i, ifindex = 0;
1905
1906 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1907 nl_ntbl_parm_policy);
1908 if (err < 0)
1909 goto errout_tbl_lock;
1910
1911 if (tbp[NDTPA_IFINDEX])
1912 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1913
1914 p = lookup_neigh_params(tbl, net, ifindex);
1915 if (p == NULL) {
1916 err = -ENOENT;
1917 goto errout_tbl_lock;
1918 }
1919
1920 for (i = 1; i <= NDTPA_MAX; i++) {
1921 if (tbp[i] == NULL)
1922 continue;
1923
1924 switch (i) {
1925 case NDTPA_QUEUE_LEN:
1926 p->queue_len = nla_get_u32(tbp[i]);
1927 break;
1928 case NDTPA_PROXY_QLEN:
1929 p->proxy_qlen = nla_get_u32(tbp[i]);
1930 break;
1931 case NDTPA_APP_PROBES:
1932 p->app_probes = nla_get_u32(tbp[i]);
1933 break;
1934 case NDTPA_UCAST_PROBES:
1935 p->ucast_probes = nla_get_u32(tbp[i]);
1936 break;
1937 case NDTPA_MCAST_PROBES:
1938 p->mcast_probes = nla_get_u32(tbp[i]);
1939 break;
1940 case NDTPA_BASE_REACHABLE_TIME:
1941 p->base_reachable_time = nla_get_msecs(tbp[i]);
1942 break;
1943 case NDTPA_GC_STALETIME:
1944 p->gc_staletime = nla_get_msecs(tbp[i]);
1945 break;
1946 case NDTPA_DELAY_PROBE_TIME:
1947 p->delay_probe_time = nla_get_msecs(tbp[i]);
1948 break;
1949 case NDTPA_RETRANS_TIME:
1950 p->retrans_time = nla_get_msecs(tbp[i]);
1951 break;
1952 case NDTPA_ANYCAST_DELAY:
1953 p->anycast_delay = nla_get_msecs(tbp[i]);
1954 break;
1955 case NDTPA_PROXY_DELAY:
1956 p->proxy_delay = nla_get_msecs(tbp[i]);
1957 break;
1958 case NDTPA_LOCKTIME:
1959 p->locktime = nla_get_msecs(tbp[i]);
1960 break;
1961 }
1962 }
1963 }
1964
1965 if (tb[NDTA_THRESH1])
1966 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1967
1968 if (tb[NDTA_THRESH2])
1969 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1970
1971 if (tb[NDTA_THRESH3])
1972 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1973
1974 if (tb[NDTA_GC_INTERVAL])
1975 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1976
1977 err = 0;
1978
1979errout_tbl_lock:
1980 write_unlock_bh(&tbl->lock);
1981errout_locked:
1982 read_unlock(&neigh_tbl_lock);
1983errout:
1984 return err;
1985}
1986
1987static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1988{
1989 struct net *net = sock_net(skb->sk);
1990 int family, tidx, nidx = 0;
1991 int tbl_skip = cb->args[0];
1992 int neigh_skip = cb->args[1];
1993 struct neigh_table *tbl;
1994
1995 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1996
1997 read_lock(&neigh_tbl_lock);
1998 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1999 struct neigh_parms *p;
2000
2001 if (tidx < tbl_skip || (family && tbl->family != family))
2002 continue;
2003
2004 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2005 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2006 NLM_F_MULTI) <= 0)
2007 break;
2008
2009 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2010 if (!net_eq(neigh_parms_net(p), net))
2011 continue;
2012
2013 if (nidx++ < neigh_skip)
2014 continue;
2015
2016 if (neightbl_fill_param_info(skb, tbl, p,
2017 NETLINK_CB(cb->skb).pid,
2018 cb->nlh->nlmsg_seq,
2019 RTM_NEWNEIGHTBL,
2020 NLM_F_MULTI) <= 0)
2021 goto out;
2022 }
2023
2024 neigh_skip = 0;
2025 }
2026out:
2027 read_unlock(&neigh_tbl_lock);
2028 cb->args[0] = tidx;
2029 cb->args[1] = nidx;
2030
2031 return skb->len;
2032}
2033
2034static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2035 u32 pid, u32 seq, int type, unsigned int flags)
2036{
2037 unsigned long now = jiffies;
2038 struct nda_cacheinfo ci;
2039 struct nlmsghdr *nlh;
2040 struct ndmsg *ndm;
2041
2042 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2043 if (nlh == NULL)
2044 return -EMSGSIZE;
2045
2046 ndm = nlmsg_data(nlh);
2047 ndm->ndm_family = neigh->ops->family;
2048 ndm->ndm_pad1 = 0;
2049 ndm->ndm_pad2 = 0;
2050 ndm->ndm_flags = neigh->flags;
2051 ndm->ndm_type = neigh->type;
2052 ndm->ndm_ifindex = neigh->dev->ifindex;
2053
2054 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2055
2056 read_lock_bh(&neigh->lock);
2057 ndm->ndm_state = neigh->nud_state;
2058 if ((neigh->nud_state & NUD_VALID) &&
2059 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2060 read_unlock_bh(&neigh->lock);
2061 goto nla_put_failure;
2062 }
2063
2064 ci.ndm_used = now - neigh->used;
2065 ci.ndm_confirmed = now - neigh->confirmed;
2066 ci.ndm_updated = now - neigh->updated;
2067 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2068 read_unlock_bh(&neigh->lock);
2069
2070 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2071 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2072
2073 return nlmsg_end(skb, nlh);
2074
2075nla_put_failure:
2076 nlmsg_cancel(skb, nlh);
2077 return -EMSGSIZE;
2078}
2079
2080static void neigh_update_notify(struct neighbour *neigh)
2081{
2082 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2083 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2084}
2085
2086static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2087 struct netlink_callback *cb)
2088{
2089 struct net * net = sock_net(skb->sk);
2090 struct neighbour *n;
2091 int rc, h, s_h = cb->args[1];
2092 int idx, s_idx = idx = cb->args[2];
2093
2094 read_lock_bh(&tbl->lock);
2095 for (h = 0; h <= tbl->hash_mask; h++) {
2096 if (h < s_h)
2097 continue;
2098 if (h > s_h)
2099 s_idx = 0;
2100 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2101 int lidx;
2102 if (dev_net(n->dev) != net)
2103 continue;
2104 lidx = idx++;
2105 if (lidx < s_idx)
2106 continue;
2107 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2108 cb->nlh->nlmsg_seq,
2109 RTM_NEWNEIGH,
2110 NLM_F_MULTI) <= 0) {
2111 read_unlock_bh(&tbl->lock);
2112 rc = -1;
2113 goto out;
2114 }
2115 }
2116 }
2117 read_unlock_bh(&tbl->lock);
2118 rc = skb->len;
2119out:
2120 cb->args[1] = h;
2121 cb->args[2] = idx;
2122 return rc;
2123}
2124
2125static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2126{
2127 struct neigh_table *tbl;
2128 int t, family, s_t;
2129
2130 read_lock(&neigh_tbl_lock);
2131 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2132 s_t = cb->args[0];
2133
2134 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2135 if (t < s_t || (family && tbl->family != family))
2136 continue;
2137 if (t > s_t)
2138 memset(&cb->args[1], 0, sizeof(cb->args) -
2139 sizeof(cb->args[0]));
2140 if (neigh_dump_table(tbl, skb, cb) < 0)
2141 break;
2142 }
2143 read_unlock(&neigh_tbl_lock);
2144
2145 cb->args[0] = t;
2146 return skb->len;
2147}
2148
2149void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2150{
2151 int chain;
2152
2153 read_lock_bh(&tbl->lock);
2154 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2155 struct neighbour *n;
2156
2157 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2158 cb(n, cookie);
2159 }
2160 read_unlock_bh(&tbl->lock);
2161}
2162EXPORT_SYMBOL(neigh_for_each);
2163
2164/* The tbl->lock must be held as a writer and BH disabled. */
2165void __neigh_for_each_release(struct neigh_table *tbl,
2166 int (*cb)(struct neighbour *))
2167{
2168 int chain;
2169
2170 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2171 struct neighbour *n, **np;
2172
2173 np = &tbl->hash_buckets[chain];
2174 while ((n = *np) != NULL) {
2175 int release;
2176
2177 write_lock(&n->lock);
2178 release = cb(n);
2179 if (release) {
2180 *np = n->next;
2181 n->dead = 1;
2182 } else
2183 np = &n->next;
2184 write_unlock(&n->lock);
2185 if (release)
2186 neigh_cleanup_and_release(n);
2187 }
2188 }
2189}
2190EXPORT_SYMBOL(__neigh_for_each_release);
2191
2192#ifdef CONFIG_PROC_FS
2193
2194static struct neighbour *neigh_get_first(struct seq_file *seq)
2195{
2196 struct neigh_seq_state *state = seq->private;
2197 struct net *net = seq_file_net(seq);
2198 struct neigh_table *tbl = state->tbl;
2199 struct neighbour *n = NULL;
2200 int bucket = state->bucket;
2201
2202 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2203 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2204 n = tbl->hash_buckets[bucket];
2205
2206 while (n) {
2207 if (!net_eq(dev_net(n->dev), net))
2208 goto next;
2209 if (state->neigh_sub_iter) {
2210 loff_t fakep = 0;
2211 void *v;
2212
2213 v = state->neigh_sub_iter(state, n, &fakep);
2214 if (!v)
2215 goto next;
2216 }
2217 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2218 break;
2219 if (n->nud_state & ~NUD_NOARP)
2220 break;
2221 next:
2222 n = n->next;
2223 }
2224
2225 if (n)
2226 break;
2227 }
2228 state->bucket = bucket;
2229
2230 return n;
2231}
2232
2233static struct neighbour *neigh_get_next(struct seq_file *seq,
2234 struct neighbour *n,
2235 loff_t *pos)
2236{
2237 struct neigh_seq_state *state = seq->private;
2238 struct net *net = seq_file_net(seq);
2239 struct neigh_table *tbl = state->tbl;
2240
2241 if (state->neigh_sub_iter) {
2242 void *v = state->neigh_sub_iter(state, n, pos);
2243 if (v)
2244 return n;
2245 }
2246 n = n->next;
2247
2248 while (1) {
2249 while (n) {
2250 if (!net_eq(dev_net(n->dev), net))
2251 goto next;
2252 if (state->neigh_sub_iter) {
2253 void *v = state->neigh_sub_iter(state, n, pos);
2254 if (v)
2255 return n;
2256 goto next;
2257 }
2258 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2259 break;
2260
2261 if (n->nud_state & ~NUD_NOARP)
2262 break;
2263 next:
2264 n = n->next;
2265 }
2266
2267 if (n)
2268 break;
2269
2270 if (++state->bucket > tbl->hash_mask)
2271 break;
2272
2273 n = tbl->hash_buckets[state->bucket];
2274 }
2275
2276 if (n && pos)
2277 --(*pos);
2278 return n;
2279}
2280
2281static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2282{
2283 struct neighbour *n = neigh_get_first(seq);
2284
2285 if (n) {
2286 while (*pos) {
2287 n = neigh_get_next(seq, n, pos);
2288 if (!n)
2289 break;
2290 }
2291 }
2292 return *pos ? NULL : n;
2293}
2294
2295static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2296{
2297 struct neigh_seq_state *state = seq->private;
2298 struct net *net = seq_file_net(seq);
2299 struct neigh_table *tbl = state->tbl;
2300 struct pneigh_entry *pn = NULL;
2301 int bucket = state->bucket;
2302
2303 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2304 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2305 pn = tbl->phash_buckets[bucket];
2306 while (pn && !net_eq(pneigh_net(pn), net))
2307 pn = pn->next;
2308 if (pn)
2309 break;
2310 }
2311 state->bucket = bucket;
2312
2313 return pn;
2314}
2315
2316static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2317 struct pneigh_entry *pn,
2318 loff_t *pos)
2319{
2320 struct neigh_seq_state *state = seq->private;
2321 struct net *net = seq_file_net(seq);
2322 struct neigh_table *tbl = state->tbl;
2323
2324 pn = pn->next;
2325 while (!pn) {
2326 if (++state->bucket > PNEIGH_HASHMASK)
2327 break;
2328 pn = tbl->phash_buckets[state->bucket];
2329 while (pn && !net_eq(pneigh_net(pn), net))
2330 pn = pn->next;
2331 if (pn)
2332 break;
2333 }
2334
2335 if (pn && pos)
2336 --(*pos);
2337
2338 return pn;
2339}
2340
2341static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2342{
2343 struct pneigh_entry *pn = pneigh_get_first(seq);
2344
2345 if (pn) {
2346 while (*pos) {
2347 pn = pneigh_get_next(seq, pn, pos);
2348 if (!pn)
2349 break;
2350 }
2351 }
2352 return *pos ? NULL : pn;
2353}
2354
2355static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2356{
2357 struct neigh_seq_state *state = seq->private;
2358 void *rc;
2359
2360 rc = neigh_get_idx(seq, pos);
2361 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2362 rc = pneigh_get_idx(seq, pos);
2363
2364 return rc;
2365}
2366
2367void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2368 __acquires(tbl->lock)
2369{
2370 struct neigh_seq_state *state = seq->private;
2371 loff_t pos_minus_one;
2372
2373 state->tbl = tbl;
2374 state->bucket = 0;
2375 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2376
2377 read_lock_bh(&tbl->lock);
2378
2379 pos_minus_one = *pos - 1;
2380 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2381}
2382EXPORT_SYMBOL(neigh_seq_start);
2383
2384void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2385{
2386 struct neigh_seq_state *state;
2387 void *rc;
2388
2389 if (v == SEQ_START_TOKEN) {
2390 rc = neigh_get_idx(seq, pos);
2391 goto out;
2392 }
2393
2394 state = seq->private;
2395 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2396 rc = neigh_get_next(seq, v, NULL);
2397 if (rc)
2398 goto out;
2399 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2400 rc = pneigh_get_first(seq);
2401 } else {
2402 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2403 rc = pneigh_get_next(seq, v, NULL);
2404 }
2405out:
2406 ++(*pos);
2407 return rc;
2408}
2409EXPORT_SYMBOL(neigh_seq_next);
2410
2411void neigh_seq_stop(struct seq_file *seq, void *v)
2412 __releases(tbl->lock)
2413{
2414 struct neigh_seq_state *state = seq->private;
2415 struct neigh_table *tbl = state->tbl;
2416
2417 read_unlock_bh(&tbl->lock);
2418}
2419EXPORT_SYMBOL(neigh_seq_stop);
2420
2421/* statistics via seq_file */
2422
2423static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2424{
2425 struct proc_dir_entry *pde = seq->private;
2426 struct neigh_table *tbl = pde->data;
2427 int cpu;
2428
2429 if (*pos == 0)
2430 return SEQ_START_TOKEN;
2431
2432 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2433 if (!cpu_possible(cpu))
2434 continue;
2435 *pos = cpu+1;
2436 return per_cpu_ptr(tbl->stats, cpu);
2437 }
2438 return NULL;
2439}
2440
2441static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2442{
2443 struct proc_dir_entry *pde = seq->private;
2444 struct neigh_table *tbl = pde->data;
2445 int cpu;
2446
2447 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2448 if (!cpu_possible(cpu))
2449 continue;
2450 *pos = cpu+1;
2451 return per_cpu_ptr(tbl->stats, cpu);
2452 }
2453 return NULL;
2454}
2455
2456static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2457{
2458
2459}
2460
2461static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2462{
2463 struct proc_dir_entry *pde = seq->private;
2464 struct neigh_table *tbl = pde->data;
2465 struct neigh_statistics *st = v;
2466
2467 if (v == SEQ_START_TOKEN) {
2468 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2469 return 0;
2470 }
2471
2472 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2473 "%08lx %08lx %08lx %08lx\n",
2474 atomic_read(&tbl->entries),
2475
2476 st->allocs,
2477 st->destroys,
2478 st->hash_grows,
2479
2480 st->lookups,
2481 st->hits,
2482
2483 st->res_failed,
2484
2485 st->rcv_probes_mcast,
2486 st->rcv_probes_ucast,
2487
2488 st->periodic_gc_runs,
2489 st->forced_gc_runs
2490 );
2491
2492 return 0;
2493}
2494
2495static const struct seq_operations neigh_stat_seq_ops = {
2496 .start = neigh_stat_seq_start,
2497 .next = neigh_stat_seq_next,
2498 .stop = neigh_stat_seq_stop,
2499 .show = neigh_stat_seq_show,
2500};
2501
2502static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2503{
2504 int ret = seq_open(file, &neigh_stat_seq_ops);
2505
2506 if (!ret) {
2507 struct seq_file *sf = file->private_data;
2508 sf->private = PDE(inode);
2509 }
2510 return ret;
2511};
2512
2513static const struct file_operations neigh_stat_seq_fops = {
2514 .owner = THIS_MODULE,
2515 .open = neigh_stat_seq_open,
2516 .read = seq_read,
2517 .llseek = seq_lseek,
2518 .release = seq_release,
2519};
2520
2521#endif /* CONFIG_PROC_FS */
2522
2523static inline size_t neigh_nlmsg_size(void)
2524{
2525 return NLMSG_ALIGN(sizeof(struct ndmsg))
2526 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2527 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2528 + nla_total_size(sizeof(struct nda_cacheinfo))
2529 + nla_total_size(4); /* NDA_PROBES */
2530}
2531
2532static void __neigh_notify(struct neighbour *n, int type, int flags)
2533{
2534 struct net *net = dev_net(n->dev);
2535 struct sk_buff *skb;
2536 int err = -ENOBUFS;
2537
2538 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2539 if (skb == NULL)
2540 goto errout;
2541
2542 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2543 if (err < 0) {
2544 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2545 WARN_ON(err == -EMSGSIZE);
2546 kfree_skb(skb);
2547 goto errout;
2548 }
2549 err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2550errout:
2551 if (err < 0)
2552 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2553}
2554
2555#ifdef CONFIG_ARPD
2556void neigh_app_ns(struct neighbour *n)
2557{
2558 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2559}
2560EXPORT_SYMBOL(neigh_app_ns);
2561#endif /* CONFIG_ARPD */
2562
2563#ifdef CONFIG_SYSCTL
2564
2565static struct neigh_sysctl_table {
2566 struct ctl_table_header *sysctl_header;
2567 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2568 char *dev_name;
2569} neigh_sysctl_template __read_mostly = {
2570 .neigh_vars = {
2571 {
2572 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2573 .procname = "mcast_solicit",
2574 .maxlen = sizeof(int),
2575 .mode = 0644,
2576 .proc_handler = &proc_dointvec,
2577 },
2578 {
2579 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2580 .procname = "ucast_solicit",
2581 .maxlen = sizeof(int),
2582 .mode = 0644,
2583 .proc_handler = &proc_dointvec,
2584 },
2585 {
2586 .ctl_name = NET_NEIGH_APP_SOLICIT,
2587 .procname = "app_solicit",
2588 .maxlen = sizeof(int),
2589 .mode = 0644,
2590 .proc_handler = &proc_dointvec,
2591 },
2592 {
2593 .procname = "retrans_time",
2594 .maxlen = sizeof(int),
2595 .mode = 0644,
2596 .proc_handler = &proc_dointvec_userhz_jiffies,
2597 },
2598 {
2599 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2600 .procname = "base_reachable_time",
2601 .maxlen = sizeof(int),
2602 .mode = 0644,
2603 .proc_handler = &proc_dointvec_jiffies,
2604 .strategy = &sysctl_jiffies,
2605 },
2606 {
2607 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2608 .procname = "delay_first_probe_time",
2609 .maxlen = sizeof(int),
2610 .mode = 0644,
2611 .proc_handler = &proc_dointvec_jiffies,
2612 .strategy = &sysctl_jiffies,
2613 },
2614 {
2615 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2616 .procname = "gc_stale_time",
2617 .maxlen = sizeof(int),
2618 .mode = 0644,
2619 .proc_handler = &proc_dointvec_jiffies,
2620 .strategy = &sysctl_jiffies,
2621 },
2622 {
2623 .ctl_name = NET_NEIGH_UNRES_QLEN,
2624 .procname = "unres_qlen",
2625 .maxlen = sizeof(int),
2626 .mode = 0644,
2627 .proc_handler = &proc_dointvec,
2628 },
2629 {
2630 .ctl_name = NET_NEIGH_PROXY_QLEN,
2631 .procname = "proxy_qlen",
2632 .maxlen = sizeof(int),
2633 .mode = 0644,
2634 .proc_handler = &proc_dointvec,
2635 },
2636 {
2637 .procname = "anycast_delay",
2638 .maxlen = sizeof(int),
2639 .mode = 0644,
2640 .proc_handler = &proc_dointvec_userhz_jiffies,
2641 },
2642 {
2643 .procname = "proxy_delay",
2644 .maxlen = sizeof(int),
2645 .mode = 0644,
2646 .proc_handler = &proc_dointvec_userhz_jiffies,
2647 },
2648 {
2649 .procname = "locktime",
2650 .maxlen = sizeof(int),
2651 .mode = 0644,
2652 .proc_handler = &proc_dointvec_userhz_jiffies,
2653 },
2654 {
2655 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2656 .procname = "retrans_time_ms",
2657 .maxlen = sizeof(int),
2658 .mode = 0644,
2659 .proc_handler = &proc_dointvec_ms_jiffies,
2660 .strategy = &sysctl_ms_jiffies,
2661 },
2662 {
2663 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2664 .procname = "base_reachable_time_ms",
2665 .maxlen = sizeof(int),
2666 .mode = 0644,
2667 .proc_handler = &proc_dointvec_ms_jiffies,
2668 .strategy = &sysctl_ms_jiffies,
2669 },
2670 {
2671 .ctl_name = NET_NEIGH_GC_INTERVAL,
2672 .procname = "gc_interval",
2673 .maxlen = sizeof(int),
2674 .mode = 0644,
2675 .proc_handler = &proc_dointvec_jiffies,
2676 .strategy = &sysctl_jiffies,
2677 },
2678 {
2679 .ctl_name = NET_NEIGH_GC_THRESH1,
2680 .procname = "gc_thresh1",
2681 .maxlen = sizeof(int),
2682 .mode = 0644,
2683 .proc_handler = &proc_dointvec,
2684 },
2685 {
2686 .ctl_name = NET_NEIGH_GC_THRESH2,
2687 .procname = "gc_thresh2",
2688 .maxlen = sizeof(int),
2689 .mode = 0644,
2690 .proc_handler = &proc_dointvec,
2691 },
2692 {
2693 .ctl_name = NET_NEIGH_GC_THRESH3,
2694 .procname = "gc_thresh3",
2695 .maxlen = sizeof(int),
2696 .mode = 0644,
2697 .proc_handler = &proc_dointvec,
2698 },
2699 {},
2700 },
2701};
2702
2703int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2704 int p_id, int pdev_id, char *p_name,
2705 proc_handler *handler, ctl_handler *strategy)
2706{
2707 struct neigh_sysctl_table *t;
2708 const char *dev_name_source = NULL;
2709
2710#define NEIGH_CTL_PATH_ROOT 0
2711#define NEIGH_CTL_PATH_PROTO 1
2712#define NEIGH_CTL_PATH_NEIGH 2
2713#define NEIGH_CTL_PATH_DEV 3
2714
2715 struct ctl_path neigh_path[] = {
2716 { .procname = "net", .ctl_name = CTL_NET, },
2717 { .procname = "proto", .ctl_name = 0, },
2718 { .procname = "neigh", .ctl_name = 0, },
2719 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2720 { },
2721 };
2722
2723 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2724 if (!t)
2725 goto err;
2726
2727 t->neigh_vars[0].data = &p->mcast_probes;
2728 t->neigh_vars[1].data = &p->ucast_probes;
2729 t->neigh_vars[2].data = &p->app_probes;
2730 t->neigh_vars[3].data = &p->retrans_time;
2731 t->neigh_vars[4].data = &p->base_reachable_time;
2732 t->neigh_vars[5].data = &p->delay_probe_time;
2733 t->neigh_vars[6].data = &p->gc_staletime;
2734 t->neigh_vars[7].data = &p->queue_len;
2735 t->neigh_vars[8].data = &p->proxy_qlen;
2736 t->neigh_vars[9].data = &p->anycast_delay;
2737 t->neigh_vars[10].data = &p->proxy_delay;
2738 t->neigh_vars[11].data = &p->locktime;
2739 t->neigh_vars[12].data = &p->retrans_time;
2740 t->neigh_vars[13].data = &p->base_reachable_time;
2741
2742 if (dev) {
2743 dev_name_source = dev->name;
2744 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2745 /* Terminate the table early */
2746 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2747 } else {
2748 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2749 t->neigh_vars[14].data = (int *)(p + 1);
2750 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2751 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2752 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2753 }
2754
2755
2756 if (handler || strategy) {
2757 /* RetransTime */
2758 t->neigh_vars[3].proc_handler = handler;
2759 t->neigh_vars[3].strategy = strategy;
2760 t->neigh_vars[3].extra1 = dev;
2761 if (!strategy)
2762 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2763 /* ReachableTime */
2764 t->neigh_vars[4].proc_handler = handler;
2765 t->neigh_vars[4].strategy = strategy;
2766 t->neigh_vars[4].extra1 = dev;
2767 if (!strategy)
2768 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2769 /* RetransTime (in milliseconds)*/
2770 t->neigh_vars[12].proc_handler = handler;
2771 t->neigh_vars[12].strategy = strategy;
2772 t->neigh_vars[12].extra1 = dev;
2773 if (!strategy)
2774 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2775 /* ReachableTime (in milliseconds) */
2776 t->neigh_vars[13].proc_handler = handler;
2777 t->neigh_vars[13].strategy = strategy;
2778 t->neigh_vars[13].extra1 = dev;
2779 if (!strategy)
2780 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2781 }
2782
2783 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2784 if (!t->dev_name)
2785 goto free;
2786
2787 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2788 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2789 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2790 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2791
2792 t->sysctl_header =
2793 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2794 if (!t->sysctl_header)
2795 goto free_procname;
2796
2797 p->sysctl_table = t;
2798 return 0;
2799
2800free_procname:
2801 kfree(t->dev_name);
2802free:
2803 kfree(t);
2804err:
2805 return -ENOBUFS;
2806}
2807EXPORT_SYMBOL(neigh_sysctl_register);
2808
2809void neigh_sysctl_unregister(struct neigh_parms *p)
2810{
2811 if (p->sysctl_table) {
2812 struct neigh_sysctl_table *t = p->sysctl_table;
2813 p->sysctl_table = NULL;
2814 unregister_sysctl_table(t->sysctl_header);
2815 kfree(t->dev_name);
2816 kfree(t);
2817 }
2818}
2819EXPORT_SYMBOL(neigh_sysctl_unregister);
2820
2821#endif /* CONFIG_SYSCTL */
2822
2823static int __init neigh_init(void)
2824{
2825 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2826 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2827 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2828
2829 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2830 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2831
2832 return 0;
2833}
2834
2835subsys_initcall(neigh_init);
2836
This page took 0.035097 seconds and 5 git commands to generate.