5f25f4f79b8ceff86be9bce0013d9cc31515acb6
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK 0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
68
69 /*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100 static DEFINE_RWLOCK(neigh_tbl_lock);
101
102 static int neigh_blackhole(struct sk_buff *skb)
103 {
104 kfree_skb(skb);
105 return -ENETDOWN;
106 }
107
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 {
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
114 neigh_release(neigh);
115 }
116
117 /*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123 unsigned long neigh_rand_reach_time(unsigned long base)
124 {
125 return (base ? (net_random() % base) + (base >> 1) : 0);
126 }
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166 }
167
168 static int neigh_del_timer(struct neighbour *n)
169 {
170 if ((n->nud_state & NUD_IN_TIMER) &&
171 del_timer(&n->timer)) {
172 neigh_release(n);
173 return 1;
174 }
175 return 0;
176 }
177
178 static void pneigh_queue_purge(struct sk_buff_head *list)
179 {
180 struct sk_buff *skb;
181
182 while ((skb = skb_dequeue(list)) != NULL) {
183 dev_put(skb->dev);
184 kfree_skb(skb);
185 }
186 }
187
188 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
189 {
190 int i;
191
192 for (i = 0; i <= tbl->hash_mask; i++) {
193 struct neighbour *n, **np = &tbl->hash_buckets[i];
194
195 while ((n = *np) != NULL) {
196 if (dev && n->dev != dev) {
197 np = &n->next;
198 continue;
199 }
200 *np = n->next;
201 write_lock(&n->lock);
202 neigh_del_timer(n);
203 n->dead = 1;
204
205 if (atomic_read(&n->refcnt) != 1) {
206 /* The most unpleasant situation.
207 We must destroy neighbour entry,
208 but someone still uses it.
209
210 The destroy will be delayed until
211 the last user releases us, but
212 we must kill timers etc. and move
213 it to safe state.
214 */
215 skb_queue_purge(&n->arp_queue);
216 n->output = neigh_blackhole;
217 if (n->nud_state & NUD_VALID)
218 n->nud_state = NUD_NOARP;
219 else
220 n->nud_state = NUD_NONE;
221 NEIGH_PRINTK2("neigh %p is stray.\n", n);
222 }
223 write_unlock(&n->lock);
224 neigh_cleanup_and_release(n);
225 }
226 }
227 }
228
229 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
230 {
231 write_lock_bh(&tbl->lock);
232 neigh_flush_dev(tbl, dev);
233 write_unlock_bh(&tbl->lock);
234 }
235
236 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
237 {
238 write_lock_bh(&tbl->lock);
239 neigh_flush_dev(tbl, dev);
240 pneigh_ifdown(tbl, dev);
241 write_unlock_bh(&tbl->lock);
242
243 del_timer_sync(&tbl->proxy_timer);
244 pneigh_queue_purge(&tbl->proxy_queue);
245 return 0;
246 }
247
248 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
249 {
250 struct neighbour *n = NULL;
251 unsigned long now = jiffies;
252 int entries;
253
254 entries = atomic_inc_return(&tbl->entries) - 1;
255 if (entries >= tbl->gc_thresh3 ||
256 (entries >= tbl->gc_thresh2 &&
257 time_after(now, tbl->last_flush + 5 * HZ))) {
258 if (!neigh_forced_gc(tbl) &&
259 entries >= tbl->gc_thresh3)
260 goto out_entries;
261 }
262
263 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
264 if (!n)
265 goto out_entries;
266
267 skb_queue_head_init(&n->arp_queue);
268 rwlock_init(&n->lock);
269 n->updated = n->used = now;
270 n->nud_state = NUD_NONE;
271 n->output = neigh_blackhole;
272 n->parms = neigh_parms_clone(&tbl->parms);
273 init_timer(&n->timer);
274 n->timer.function = neigh_timer_handler;
275 n->timer.data = (unsigned long)n;
276
277 NEIGH_CACHE_STAT_INC(tbl, allocs);
278 n->tbl = tbl;
279 atomic_set(&n->refcnt, 1);
280 n->dead = 1;
281 out:
282 return n;
283
284 out_entries:
285 atomic_dec(&tbl->entries);
286 goto out;
287 }
288
289 static struct neighbour **neigh_hash_alloc(unsigned int entries)
290 {
291 unsigned long size = entries * sizeof(struct neighbour *);
292 struct neighbour **ret;
293
294 if (size <= PAGE_SIZE) {
295 ret = kzalloc(size, GFP_ATOMIC);
296 } else {
297 ret = (struct neighbour **)
298 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
299 }
300 return ret;
301 }
302
303 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
304 {
305 unsigned long size = entries * sizeof(struct neighbour *);
306
307 if (size <= PAGE_SIZE)
308 kfree(hash);
309 else
310 free_pages((unsigned long)hash, get_order(size));
311 }
312
313 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
314 {
315 struct neighbour **new_hash, **old_hash;
316 unsigned int i, new_hash_mask, old_entries;
317
318 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
319
320 BUG_ON(!is_power_of_2(new_entries));
321 new_hash = neigh_hash_alloc(new_entries);
322 if (!new_hash)
323 return;
324
325 old_entries = tbl->hash_mask + 1;
326 new_hash_mask = new_entries - 1;
327 old_hash = tbl->hash_buckets;
328
329 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
330 for (i = 0; i < old_entries; i++) {
331 struct neighbour *n, *next;
332
333 for (n = old_hash[i]; n; n = next) {
334 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
335
336 hash_val &= new_hash_mask;
337 next = n->next;
338
339 n->next = new_hash[hash_val];
340 new_hash[hash_val] = n;
341 }
342 }
343 tbl->hash_buckets = new_hash;
344 tbl->hash_mask = new_hash_mask;
345
346 neigh_hash_free(old_hash, old_entries);
347 }
348
349 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
350 struct net_device *dev)
351 {
352 struct neighbour *n;
353 int key_len = tbl->key_len;
354 u32 hash_val = tbl->hash(pkey, dev);
355
356 NEIGH_CACHE_STAT_INC(tbl, lookups);
357
358 read_lock_bh(&tbl->lock);
359 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
360 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
361 neigh_hold(n);
362 NEIGH_CACHE_STAT_INC(tbl, hits);
363 break;
364 }
365 }
366 read_unlock_bh(&tbl->lock);
367 return n;
368 }
369
370 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
371 {
372 struct neighbour *n;
373 int key_len = tbl->key_len;
374 u32 hash_val = tbl->hash(pkey, NULL);
375
376 NEIGH_CACHE_STAT_INC(tbl, lookups);
377
378 read_lock_bh(&tbl->lock);
379 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
380 if (!memcmp(n->primary_key, pkey, key_len)) {
381 neigh_hold(n);
382 NEIGH_CACHE_STAT_INC(tbl, hits);
383 break;
384 }
385 }
386 read_unlock_bh(&tbl->lock);
387 return n;
388 }
389
390 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
391 struct net_device *dev)
392 {
393 u32 hash_val;
394 int key_len = tbl->key_len;
395 int error;
396 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
397
398 if (!n) {
399 rc = ERR_PTR(-ENOBUFS);
400 goto out;
401 }
402
403 memcpy(n->primary_key, pkey, key_len);
404 n->dev = dev;
405 dev_hold(dev);
406
407 /* Protocol specific setup. */
408 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
409 rc = ERR_PTR(error);
410 goto out_neigh_release;
411 }
412
413 /* Device specific setup. */
414 if (n->parms->neigh_setup &&
415 (error = n->parms->neigh_setup(n)) < 0) {
416 rc = ERR_PTR(error);
417 goto out_neigh_release;
418 }
419
420 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
421
422 write_lock_bh(&tbl->lock);
423
424 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
425 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
426
427 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
428
429 if (n->parms->dead) {
430 rc = ERR_PTR(-EINVAL);
431 goto out_tbl_unlock;
432 }
433
434 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
435 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
436 neigh_hold(n1);
437 rc = n1;
438 goto out_tbl_unlock;
439 }
440 }
441
442 n->next = tbl->hash_buckets[hash_val];
443 tbl->hash_buckets[hash_val] = n;
444 n->dead = 0;
445 neigh_hold(n);
446 write_unlock_bh(&tbl->lock);
447 NEIGH_PRINTK2("neigh %p is created.\n", n);
448 rc = n;
449 out:
450 return rc;
451 out_tbl_unlock:
452 write_unlock_bh(&tbl->lock);
453 out_neigh_release:
454 neigh_release(n);
455 goto out;
456 }
457
458 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
459 struct net_device *dev, int creat)
460 {
461 struct pneigh_entry *n;
462 int key_len = tbl->key_len;
463 u32 hash_val = *(u32 *)(pkey + key_len - 4);
464
465 hash_val ^= (hash_val >> 16);
466 hash_val ^= hash_val >> 8;
467 hash_val ^= hash_val >> 4;
468 hash_val &= PNEIGH_HASHMASK;
469
470 read_lock_bh(&tbl->lock);
471
472 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
473 if (!memcmp(n->key, pkey, key_len) &&
474 (n->dev == dev || !n->dev)) {
475 read_unlock_bh(&tbl->lock);
476 goto out;
477 }
478 }
479 read_unlock_bh(&tbl->lock);
480 n = NULL;
481 if (!creat)
482 goto out;
483
484 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
485 if (!n)
486 goto out;
487
488 memcpy(n->key, pkey, key_len);
489 n->dev = dev;
490 if (dev)
491 dev_hold(dev);
492
493 if (tbl->pconstructor && tbl->pconstructor(n)) {
494 if (dev)
495 dev_put(dev);
496 kfree(n);
497 n = NULL;
498 goto out;
499 }
500
501 write_lock_bh(&tbl->lock);
502 n->next = tbl->phash_buckets[hash_val];
503 tbl->phash_buckets[hash_val] = n;
504 write_unlock_bh(&tbl->lock);
505 out:
506 return n;
507 }
508
509
510 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
511 struct net_device *dev)
512 {
513 struct pneigh_entry *n, **np;
514 int key_len = tbl->key_len;
515 u32 hash_val = *(u32 *)(pkey + key_len - 4);
516
517 hash_val ^= (hash_val >> 16);
518 hash_val ^= hash_val >> 8;
519 hash_val ^= hash_val >> 4;
520 hash_val &= PNEIGH_HASHMASK;
521
522 write_lock_bh(&tbl->lock);
523 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
524 np = &n->next) {
525 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
526 *np = n->next;
527 write_unlock_bh(&tbl->lock);
528 if (tbl->pdestructor)
529 tbl->pdestructor(n);
530 if (n->dev)
531 dev_put(n->dev);
532 kfree(n);
533 return 0;
534 }
535 }
536 write_unlock_bh(&tbl->lock);
537 return -ENOENT;
538 }
539
540 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
541 {
542 struct pneigh_entry *n, **np;
543 u32 h;
544
545 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
546 np = &tbl->phash_buckets[h];
547 while ((n = *np) != NULL) {
548 if (!dev || n->dev == dev) {
549 *np = n->next;
550 if (tbl->pdestructor)
551 tbl->pdestructor(n);
552 if (n->dev)
553 dev_put(n->dev);
554 kfree(n);
555 continue;
556 }
557 np = &n->next;
558 }
559 }
560 return -ENOENT;
561 }
562
563
564 /*
565 * neighbour must already be out of the table;
566 *
567 */
568 void neigh_destroy(struct neighbour *neigh)
569 {
570 struct hh_cache *hh;
571
572 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
573
574 if (!neigh->dead) {
575 printk(KERN_WARNING
576 "Destroying alive neighbour %p\n", neigh);
577 dump_stack();
578 return;
579 }
580
581 if (neigh_del_timer(neigh))
582 printk(KERN_WARNING "Impossible event.\n");
583
584 while ((hh = neigh->hh) != NULL) {
585 neigh->hh = hh->hh_next;
586 hh->hh_next = NULL;
587
588 write_seqlock_bh(&hh->hh_lock);
589 hh->hh_output = neigh_blackhole;
590 write_sequnlock_bh(&hh->hh_lock);
591 if (atomic_dec_and_test(&hh->hh_refcnt))
592 kfree(hh);
593 }
594
595 skb_queue_purge(&neigh->arp_queue);
596
597 dev_put(neigh->dev);
598 neigh_parms_put(neigh->parms);
599
600 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
601
602 atomic_dec(&neigh->tbl->entries);
603 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
604 }
605
606 /* Neighbour state is suspicious;
607 disable fast path.
608
609 Called with write_locked neigh.
610 */
611 static void neigh_suspect(struct neighbour *neigh)
612 {
613 struct hh_cache *hh;
614
615 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
616
617 neigh->output = neigh->ops->output;
618
619 for (hh = neigh->hh; hh; hh = hh->hh_next)
620 hh->hh_output = neigh->ops->output;
621 }
622
623 /* Neighbour state is OK;
624 enable fast path.
625
626 Called with write_locked neigh.
627 */
628 static void neigh_connect(struct neighbour *neigh)
629 {
630 struct hh_cache *hh;
631
632 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
633
634 neigh->output = neigh->ops->connected_output;
635
636 for (hh = neigh->hh; hh; hh = hh->hh_next)
637 hh->hh_output = neigh->ops->hh_output;
638 }
639
640 static void neigh_periodic_timer(unsigned long arg)
641 {
642 struct neigh_table *tbl = (struct neigh_table *)arg;
643 struct neighbour *n, **np;
644 unsigned long expire, now = jiffies;
645
646 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
647
648 write_lock(&tbl->lock);
649
650 /*
651 * periodically recompute ReachableTime from random function
652 */
653
654 if (time_after(now, tbl->last_rand + 300 * HZ)) {
655 struct neigh_parms *p;
656 tbl->last_rand = now;
657 for (p = &tbl->parms; p; p = p->next)
658 p->reachable_time =
659 neigh_rand_reach_time(p->base_reachable_time);
660 }
661
662 np = &tbl->hash_buckets[tbl->hash_chain_gc];
663 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
664
665 while ((n = *np) != NULL) {
666 unsigned int state;
667
668 write_lock(&n->lock);
669
670 state = n->nud_state;
671 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
672 write_unlock(&n->lock);
673 goto next_elt;
674 }
675
676 if (time_before(n->used, n->confirmed))
677 n->used = n->confirmed;
678
679 if (atomic_read(&n->refcnt) == 1 &&
680 (state == NUD_FAILED ||
681 time_after(now, n->used + n->parms->gc_staletime))) {
682 *np = n->next;
683 n->dead = 1;
684 write_unlock(&n->lock);
685 neigh_cleanup_and_release(n);
686 continue;
687 }
688 write_unlock(&n->lock);
689
690 next_elt:
691 np = &n->next;
692 }
693
694 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
695 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
696 * base_reachable_time.
697 */
698 expire = tbl->parms.base_reachable_time >> 1;
699 expire /= (tbl->hash_mask + 1);
700 if (!expire)
701 expire = 1;
702
703 if (expire>HZ)
704 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
705 else
706 mod_timer(&tbl->gc_timer, now + expire);
707
708 write_unlock(&tbl->lock);
709 }
710
711 static __inline__ int neigh_max_probes(struct neighbour *n)
712 {
713 struct neigh_parms *p = n->parms;
714 return (n->nud_state & NUD_PROBE ?
715 p->ucast_probes :
716 p->ucast_probes + p->app_probes + p->mcast_probes);
717 }
718
719 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
720 {
721 if (unlikely(mod_timer(&n->timer, when))) {
722 printk("NEIGH: BUG, double timer add, state is %x\n",
723 n->nud_state);
724 dump_stack();
725 }
726 }
727
728 /* Called when a timer expires for a neighbour entry. */
729
730 static void neigh_timer_handler(unsigned long arg)
731 {
732 unsigned long now, next;
733 struct neighbour *neigh = (struct neighbour *)arg;
734 unsigned state;
735 int notify = 0;
736
737 write_lock(&neigh->lock);
738
739 state = neigh->nud_state;
740 now = jiffies;
741 next = now + HZ;
742
743 if (!(state & NUD_IN_TIMER)) {
744 #ifndef CONFIG_SMP
745 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
746 #endif
747 goto out;
748 }
749
750 if (state & NUD_REACHABLE) {
751 if (time_before_eq(now,
752 neigh->confirmed + neigh->parms->reachable_time)) {
753 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
754 next = neigh->confirmed + neigh->parms->reachable_time;
755 } else if (time_before_eq(now,
756 neigh->used + neigh->parms->delay_probe_time)) {
757 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
758 neigh->nud_state = NUD_DELAY;
759 neigh->updated = jiffies;
760 neigh_suspect(neigh);
761 next = now + neigh->parms->delay_probe_time;
762 } else {
763 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
764 neigh->nud_state = NUD_STALE;
765 neigh->updated = jiffies;
766 neigh_suspect(neigh);
767 notify = 1;
768 }
769 } else if (state & NUD_DELAY) {
770 if (time_before_eq(now,
771 neigh->confirmed + neigh->parms->delay_probe_time)) {
772 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
773 neigh->nud_state = NUD_REACHABLE;
774 neigh->updated = jiffies;
775 neigh_connect(neigh);
776 notify = 1;
777 next = neigh->confirmed + neigh->parms->reachable_time;
778 } else {
779 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
780 neigh->nud_state = NUD_PROBE;
781 neigh->updated = jiffies;
782 atomic_set(&neigh->probes, 0);
783 next = now + neigh->parms->retrans_time;
784 }
785 } else {
786 /* NUD_PROBE|NUD_INCOMPLETE */
787 next = now + neigh->parms->retrans_time;
788 }
789
790 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
791 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
792 struct sk_buff *skb;
793
794 neigh->nud_state = NUD_FAILED;
795 neigh->updated = jiffies;
796 notify = 1;
797 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
798 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
799
800 /* It is very thin place. report_unreachable is very complicated
801 routine. Particularly, it can hit the same neighbour entry!
802
803 So that, we try to be accurate and avoid dead loop. --ANK
804 */
805 while (neigh->nud_state == NUD_FAILED &&
806 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
807 write_unlock(&neigh->lock);
808 neigh->ops->error_report(neigh, skb);
809 write_lock(&neigh->lock);
810 }
811 skb_queue_purge(&neigh->arp_queue);
812 }
813
814 if (neigh->nud_state & NUD_IN_TIMER) {
815 if (time_before(next, jiffies + HZ/2))
816 next = jiffies + HZ/2;
817 if (!mod_timer(&neigh->timer, next))
818 neigh_hold(neigh);
819 }
820 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
821 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
822 /* keep skb alive even if arp_queue overflows */
823 if (skb)
824 skb_get(skb);
825 write_unlock(&neigh->lock);
826 neigh->ops->solicit(neigh, skb);
827 atomic_inc(&neigh->probes);
828 if (skb)
829 kfree_skb(skb);
830 } else {
831 out:
832 write_unlock(&neigh->lock);
833 }
834
835 if (notify)
836 neigh_update_notify(neigh);
837
838 neigh_release(neigh);
839 }
840
841 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
842 {
843 int rc;
844 unsigned long now;
845
846 write_lock_bh(&neigh->lock);
847
848 rc = 0;
849 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
850 goto out_unlock_bh;
851
852 now = jiffies;
853
854 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
855 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
856 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
857 neigh->nud_state = NUD_INCOMPLETE;
858 neigh->updated = jiffies;
859 neigh_hold(neigh);
860 neigh_add_timer(neigh, now + 1);
861 } else {
862 neigh->nud_state = NUD_FAILED;
863 neigh->updated = jiffies;
864 write_unlock_bh(&neigh->lock);
865
866 if (skb)
867 kfree_skb(skb);
868 return 1;
869 }
870 } else if (neigh->nud_state & NUD_STALE) {
871 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
872 neigh_hold(neigh);
873 neigh->nud_state = NUD_DELAY;
874 neigh->updated = jiffies;
875 neigh_add_timer(neigh,
876 jiffies + neigh->parms->delay_probe_time);
877 }
878
879 if (neigh->nud_state == NUD_INCOMPLETE) {
880 if (skb) {
881 if (skb_queue_len(&neigh->arp_queue) >=
882 neigh->parms->queue_len) {
883 struct sk_buff *buff;
884 buff = neigh->arp_queue.next;
885 __skb_unlink(buff, &neigh->arp_queue);
886 kfree_skb(buff);
887 }
888 __skb_queue_tail(&neigh->arp_queue, skb);
889 }
890 rc = 1;
891 }
892 out_unlock_bh:
893 write_unlock_bh(&neigh->lock);
894 return rc;
895 }
896
897 static void neigh_update_hhs(struct neighbour *neigh)
898 {
899 struct hh_cache *hh;
900 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
901 neigh->dev->header_cache_update;
902
903 if (update) {
904 for (hh = neigh->hh; hh; hh = hh->hh_next) {
905 write_seqlock_bh(&hh->hh_lock);
906 update(hh, neigh->dev, neigh->ha);
907 write_sequnlock_bh(&hh->hh_lock);
908 }
909 }
910 }
911
912
913
914 /* Generic update routine.
915 -- lladdr is new lladdr or NULL, if it is not supplied.
916 -- new is new state.
917 -- flags
918 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
919 if it is different.
920 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
921 lladdr instead of overriding it
922 if it is different.
923 It also allows to retain current state
924 if lladdr is unchanged.
925 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
926
927 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
928 NTF_ROUTER flag.
929 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
930 a router.
931
932 Caller MUST hold reference count on the entry.
933 */
934
935 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
936 u32 flags)
937 {
938 u8 old;
939 int err;
940 int notify = 0;
941 struct net_device *dev;
942 int update_isrouter = 0;
943
944 write_lock_bh(&neigh->lock);
945
946 dev = neigh->dev;
947 old = neigh->nud_state;
948 err = -EPERM;
949
950 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
951 (old & (NUD_NOARP | NUD_PERMANENT)))
952 goto out;
953
954 if (!(new & NUD_VALID)) {
955 neigh_del_timer(neigh);
956 if (old & NUD_CONNECTED)
957 neigh_suspect(neigh);
958 neigh->nud_state = new;
959 err = 0;
960 notify = old & NUD_VALID;
961 goto out;
962 }
963
964 /* Compare new lladdr with cached one */
965 if (!dev->addr_len) {
966 /* First case: device needs no address. */
967 lladdr = neigh->ha;
968 } else if (lladdr) {
969 /* The second case: if something is already cached
970 and a new address is proposed:
971 - compare new & old
972 - if they are different, check override flag
973 */
974 if ((old & NUD_VALID) &&
975 !memcmp(lladdr, neigh->ha, dev->addr_len))
976 lladdr = neigh->ha;
977 } else {
978 /* No address is supplied; if we know something,
979 use it, otherwise discard the request.
980 */
981 err = -EINVAL;
982 if (!(old & NUD_VALID))
983 goto out;
984 lladdr = neigh->ha;
985 }
986
987 if (new & NUD_CONNECTED)
988 neigh->confirmed = jiffies;
989 neigh->updated = jiffies;
990
991 /* If entry was valid and address is not changed,
992 do not change entry state, if new one is STALE.
993 */
994 err = 0;
995 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
996 if (old & NUD_VALID) {
997 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
998 update_isrouter = 0;
999 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1000 (old & NUD_CONNECTED)) {
1001 lladdr = neigh->ha;
1002 new = NUD_STALE;
1003 } else
1004 goto out;
1005 } else {
1006 if (lladdr == neigh->ha && new == NUD_STALE &&
1007 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1008 (old & NUD_CONNECTED))
1009 )
1010 new = old;
1011 }
1012 }
1013
1014 if (new != old) {
1015 neigh_del_timer(neigh);
1016 if (new & NUD_IN_TIMER) {
1017 neigh_hold(neigh);
1018 neigh_add_timer(neigh, (jiffies +
1019 ((new & NUD_REACHABLE) ?
1020 neigh->parms->reachable_time :
1021 0)));
1022 }
1023 neigh->nud_state = new;
1024 }
1025
1026 if (lladdr != neigh->ha) {
1027 memcpy(&neigh->ha, lladdr, dev->addr_len);
1028 neigh_update_hhs(neigh);
1029 if (!(new & NUD_CONNECTED))
1030 neigh->confirmed = jiffies -
1031 (neigh->parms->base_reachable_time << 1);
1032 notify = 1;
1033 }
1034 if (new == old)
1035 goto out;
1036 if (new & NUD_CONNECTED)
1037 neigh_connect(neigh);
1038 else
1039 neigh_suspect(neigh);
1040 if (!(old & NUD_VALID)) {
1041 struct sk_buff *skb;
1042
1043 /* Again: avoid dead loop if something went wrong */
1044
1045 while (neigh->nud_state & NUD_VALID &&
1046 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 struct neighbour *n1 = neigh;
1048 write_unlock_bh(&neigh->lock);
1049 /* On shaper/eql skb->dst->neighbour != neigh :( */
1050 if (skb->dst && skb->dst->neighbour)
1051 n1 = skb->dst->neighbour;
1052 n1->output(skb);
1053 write_lock_bh(&neigh->lock);
1054 }
1055 skb_queue_purge(&neigh->arp_queue);
1056 }
1057 out:
1058 if (update_isrouter) {
1059 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1060 (neigh->flags | NTF_ROUTER) :
1061 (neigh->flags & ~NTF_ROUTER);
1062 }
1063 write_unlock_bh(&neigh->lock);
1064
1065 if (notify)
1066 neigh_update_notify(neigh);
1067
1068 return err;
1069 }
1070
1071 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1072 u8 *lladdr, void *saddr,
1073 struct net_device *dev)
1074 {
1075 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076 lladdr || !dev->addr_len);
1077 if (neigh)
1078 neigh_update(neigh, lladdr, NUD_STALE,
1079 NEIGH_UPDATE_F_OVERRIDE);
1080 return neigh;
1081 }
1082
1083 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1084 __be16 protocol)
1085 {
1086 struct hh_cache *hh;
1087 struct net_device *dev = dst->dev;
1088
1089 for (hh = n->hh; hh; hh = hh->hh_next)
1090 if (hh->hh_type == protocol)
1091 break;
1092
1093 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1094 seqlock_init(&hh->hh_lock);
1095 hh->hh_type = protocol;
1096 atomic_set(&hh->hh_refcnt, 0);
1097 hh->hh_next = NULL;
1098 if (dev->hard_header_cache(n, hh)) {
1099 kfree(hh);
1100 hh = NULL;
1101 } else {
1102 atomic_inc(&hh->hh_refcnt);
1103 hh->hh_next = n->hh;
1104 n->hh = hh;
1105 if (n->nud_state & NUD_CONNECTED)
1106 hh->hh_output = n->ops->hh_output;
1107 else
1108 hh->hh_output = n->ops->output;
1109 }
1110 }
1111 if (hh) {
1112 atomic_inc(&hh->hh_refcnt);
1113 dst->hh = hh;
1114 }
1115 }
1116
1117 /* This function can be used in contexts, where only old dev_queue_xmit
1118 worked, f.e. if you want to override normal output path (eql, shaper),
1119 but resolution is not made yet.
1120 */
1121
1122 int neigh_compat_output(struct sk_buff *skb)
1123 {
1124 struct net_device *dev = skb->dev;
1125
1126 __skb_pull(skb, skb_network_offset(skb));
1127
1128 if (dev->hard_header &&
1129 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1130 skb->len) < 0 &&
1131 dev->rebuild_header(skb))
1132 return 0;
1133
1134 return dev_queue_xmit(skb);
1135 }
1136
1137 /* Slow and careful. */
1138
1139 int neigh_resolve_output(struct sk_buff *skb)
1140 {
1141 struct dst_entry *dst = skb->dst;
1142 struct neighbour *neigh;
1143 int rc = 0;
1144
1145 if (!dst || !(neigh = dst->neighbour))
1146 goto discard;
1147
1148 __skb_pull(skb, skb_network_offset(skb));
1149
1150 if (!neigh_event_send(neigh, skb)) {
1151 int err;
1152 struct net_device *dev = neigh->dev;
1153 if (dev->hard_header_cache && !dst->hh) {
1154 write_lock_bh(&neigh->lock);
1155 if (!dst->hh)
1156 neigh_hh_init(neigh, dst, dst->ops->protocol);
1157 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1158 neigh->ha, NULL, skb->len);
1159 write_unlock_bh(&neigh->lock);
1160 } else {
1161 read_lock_bh(&neigh->lock);
1162 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1163 neigh->ha, NULL, skb->len);
1164 read_unlock_bh(&neigh->lock);
1165 }
1166 if (err >= 0)
1167 rc = neigh->ops->queue_xmit(skb);
1168 else
1169 goto out_kfree_skb;
1170 }
1171 out:
1172 return rc;
1173 discard:
1174 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1175 dst, dst ? dst->neighbour : NULL);
1176 out_kfree_skb:
1177 rc = -EINVAL;
1178 kfree_skb(skb);
1179 goto out;
1180 }
1181
1182 /* As fast as possible without hh cache */
1183
1184 int neigh_connected_output(struct sk_buff *skb)
1185 {
1186 int err;
1187 struct dst_entry *dst = skb->dst;
1188 struct neighbour *neigh = dst->neighbour;
1189 struct net_device *dev = neigh->dev;
1190
1191 __skb_pull(skb, skb_network_offset(skb));
1192
1193 read_lock_bh(&neigh->lock);
1194 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1195 neigh->ha, NULL, skb->len);
1196 read_unlock_bh(&neigh->lock);
1197 if (err >= 0)
1198 err = neigh->ops->queue_xmit(skb);
1199 else {
1200 err = -EINVAL;
1201 kfree_skb(skb);
1202 }
1203 return err;
1204 }
1205
1206 static void neigh_proxy_process(unsigned long arg)
1207 {
1208 struct neigh_table *tbl = (struct neigh_table *)arg;
1209 long sched_next = 0;
1210 unsigned long now = jiffies;
1211 struct sk_buff *skb;
1212
1213 spin_lock(&tbl->proxy_queue.lock);
1214
1215 skb = tbl->proxy_queue.next;
1216
1217 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1218 struct sk_buff *back = skb;
1219 long tdif = NEIGH_CB(back)->sched_next - now;
1220
1221 skb = skb->next;
1222 if (tdif <= 0) {
1223 struct net_device *dev = back->dev;
1224 __skb_unlink(back, &tbl->proxy_queue);
1225 if (tbl->proxy_redo && netif_running(dev))
1226 tbl->proxy_redo(back);
1227 else
1228 kfree_skb(back);
1229
1230 dev_put(dev);
1231 } else if (!sched_next || tdif < sched_next)
1232 sched_next = tdif;
1233 }
1234 del_timer(&tbl->proxy_timer);
1235 if (sched_next)
1236 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1237 spin_unlock(&tbl->proxy_queue.lock);
1238 }
1239
1240 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1241 struct sk_buff *skb)
1242 {
1243 unsigned long now = jiffies;
1244 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1245
1246 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1247 kfree_skb(skb);
1248 return;
1249 }
1250
1251 NEIGH_CB(skb)->sched_next = sched_next;
1252 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1253
1254 spin_lock(&tbl->proxy_queue.lock);
1255 if (del_timer(&tbl->proxy_timer)) {
1256 if (time_before(tbl->proxy_timer.expires, sched_next))
1257 sched_next = tbl->proxy_timer.expires;
1258 }
1259 dst_release(skb->dst);
1260 skb->dst = NULL;
1261 dev_hold(skb->dev);
1262 __skb_queue_tail(&tbl->proxy_queue, skb);
1263 mod_timer(&tbl->proxy_timer, sched_next);
1264 spin_unlock(&tbl->proxy_queue.lock);
1265 }
1266
1267
1268 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1269 struct neigh_table *tbl)
1270 {
1271 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1272
1273 if (p) {
1274 p->tbl = tbl;
1275 atomic_set(&p->refcnt, 1);
1276 INIT_RCU_HEAD(&p->rcu_head);
1277 p->reachable_time =
1278 neigh_rand_reach_time(p->base_reachable_time);
1279 if (dev) {
1280 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1281 kfree(p);
1282 return NULL;
1283 }
1284
1285 dev_hold(dev);
1286 p->dev = dev;
1287 }
1288 p->sysctl_table = NULL;
1289 write_lock_bh(&tbl->lock);
1290 p->next = tbl->parms.next;
1291 tbl->parms.next = p;
1292 write_unlock_bh(&tbl->lock);
1293 }
1294 return p;
1295 }
1296
1297 static void neigh_rcu_free_parms(struct rcu_head *head)
1298 {
1299 struct neigh_parms *parms =
1300 container_of(head, struct neigh_parms, rcu_head);
1301
1302 neigh_parms_put(parms);
1303 }
1304
1305 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1306 {
1307 struct neigh_parms **p;
1308
1309 if (!parms || parms == &tbl->parms)
1310 return;
1311 write_lock_bh(&tbl->lock);
1312 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1313 if (*p == parms) {
1314 *p = parms->next;
1315 parms->dead = 1;
1316 write_unlock_bh(&tbl->lock);
1317 if (parms->dev)
1318 dev_put(parms->dev);
1319 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1320 return;
1321 }
1322 }
1323 write_unlock_bh(&tbl->lock);
1324 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1325 }
1326
1327 void neigh_parms_destroy(struct neigh_parms *parms)
1328 {
1329 kfree(parms);
1330 }
1331
1332 static struct lock_class_key neigh_table_proxy_queue_class;
1333
1334 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1335 {
1336 unsigned long now = jiffies;
1337 unsigned long phsize;
1338
1339 atomic_set(&tbl->parms.refcnt, 1);
1340 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1341 tbl->parms.reachable_time =
1342 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1343
1344 if (!tbl->kmem_cachep)
1345 tbl->kmem_cachep =
1346 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1347 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1348 NULL);
1349 tbl->stats = alloc_percpu(struct neigh_statistics);
1350 if (!tbl->stats)
1351 panic("cannot create neighbour cache statistics");
1352
1353 #ifdef CONFIG_PROC_FS
1354 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
1355 if (!tbl->pde)
1356 panic("cannot create neighbour proc dir entry");
1357 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1358 tbl->pde->data = tbl;
1359 #endif
1360
1361 tbl->hash_mask = 1;
1362 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363
1364 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1365 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366
1367 if (!tbl->hash_buckets || !tbl->phash_buckets)
1368 panic("cannot allocate neighbour cache hashes");
1369
1370 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371
1372 rwlock_init(&tbl->lock);
1373 init_timer(&tbl->gc_timer);
1374 tbl->gc_timer.data = (unsigned long)tbl;
1375 tbl->gc_timer.function = neigh_periodic_timer;
1376 tbl->gc_timer.expires = now + 1;
1377 add_timer(&tbl->gc_timer);
1378
1379 init_timer(&tbl->proxy_timer);
1380 tbl->proxy_timer.data = (unsigned long)tbl;
1381 tbl->proxy_timer.function = neigh_proxy_process;
1382 skb_queue_head_init_class(&tbl->proxy_queue,
1383 &neigh_table_proxy_queue_class);
1384
1385 tbl->last_flush = now;
1386 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1387 }
1388
1389 void neigh_table_init(struct neigh_table *tbl)
1390 {
1391 struct neigh_table *tmp;
1392
1393 neigh_table_init_no_netlink(tbl);
1394 write_lock(&neigh_tbl_lock);
1395 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1396 if (tmp->family == tbl->family)
1397 break;
1398 }
1399 tbl->next = neigh_tables;
1400 neigh_tables = tbl;
1401 write_unlock(&neigh_tbl_lock);
1402
1403 if (unlikely(tmp)) {
1404 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1405 "family %d\n", tbl->family);
1406 dump_stack();
1407 }
1408 }
1409
1410 int neigh_table_clear(struct neigh_table *tbl)
1411 {
1412 struct neigh_table **tp;
1413
1414 /* It is not clean... Fix it to unload IPv6 module safely */
1415 del_timer_sync(&tbl->gc_timer);
1416 del_timer_sync(&tbl->proxy_timer);
1417 pneigh_queue_purge(&tbl->proxy_queue);
1418 neigh_ifdown(tbl, NULL);
1419 if (atomic_read(&tbl->entries))
1420 printk(KERN_CRIT "neighbour leakage\n");
1421 write_lock(&neigh_tbl_lock);
1422 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1423 if (*tp == tbl) {
1424 *tp = tbl->next;
1425 break;
1426 }
1427 }
1428 write_unlock(&neigh_tbl_lock);
1429
1430 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1431 tbl->hash_buckets = NULL;
1432
1433 kfree(tbl->phash_buckets);
1434 tbl->phash_buckets = NULL;
1435
1436 free_percpu(tbl->stats);
1437 tbl->stats = NULL;
1438
1439 return 0;
1440 }
1441
1442 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1443 {
1444 struct ndmsg *ndm;
1445 struct nlattr *dst_attr;
1446 struct neigh_table *tbl;
1447 struct net_device *dev = NULL;
1448 int err = -EINVAL;
1449
1450 if (nlmsg_len(nlh) < sizeof(*ndm))
1451 goto out;
1452
1453 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1454 if (dst_attr == NULL)
1455 goto out;
1456
1457 ndm = nlmsg_data(nlh);
1458 if (ndm->ndm_ifindex) {
1459 dev = dev_get_by_index(ndm->ndm_ifindex);
1460 if (dev == NULL) {
1461 err = -ENODEV;
1462 goto out;
1463 }
1464 }
1465
1466 read_lock(&neigh_tbl_lock);
1467 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1468 struct neighbour *neigh;
1469
1470 if (tbl->family != ndm->ndm_family)
1471 continue;
1472 read_unlock(&neigh_tbl_lock);
1473
1474 if (nla_len(dst_attr) < tbl->key_len)
1475 goto out_dev_put;
1476
1477 if (ndm->ndm_flags & NTF_PROXY) {
1478 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1479 goto out_dev_put;
1480 }
1481
1482 if (dev == NULL)
1483 goto out_dev_put;
1484
1485 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1486 if (neigh == NULL) {
1487 err = -ENOENT;
1488 goto out_dev_put;
1489 }
1490
1491 err = neigh_update(neigh, NULL, NUD_FAILED,
1492 NEIGH_UPDATE_F_OVERRIDE |
1493 NEIGH_UPDATE_F_ADMIN);
1494 neigh_release(neigh);
1495 goto out_dev_put;
1496 }
1497 read_unlock(&neigh_tbl_lock);
1498 err = -EAFNOSUPPORT;
1499
1500 out_dev_put:
1501 if (dev)
1502 dev_put(dev);
1503 out:
1504 return err;
1505 }
1506
1507 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1508 {
1509 struct ndmsg *ndm;
1510 struct nlattr *tb[NDA_MAX+1];
1511 struct neigh_table *tbl;
1512 struct net_device *dev = NULL;
1513 int err;
1514
1515 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1516 if (err < 0)
1517 goto out;
1518
1519 err = -EINVAL;
1520 if (tb[NDA_DST] == NULL)
1521 goto out;
1522
1523 ndm = nlmsg_data(nlh);
1524 if (ndm->ndm_ifindex) {
1525 dev = dev_get_by_index(ndm->ndm_ifindex);
1526 if (dev == NULL) {
1527 err = -ENODEV;
1528 goto out;
1529 }
1530
1531 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1532 goto out_dev_put;
1533 }
1534
1535 read_lock(&neigh_tbl_lock);
1536 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1537 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1538 struct neighbour *neigh;
1539 void *dst, *lladdr;
1540
1541 if (tbl->family != ndm->ndm_family)
1542 continue;
1543 read_unlock(&neigh_tbl_lock);
1544
1545 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1546 goto out_dev_put;
1547 dst = nla_data(tb[NDA_DST]);
1548 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1549
1550 if (ndm->ndm_flags & NTF_PROXY) {
1551 struct pneigh_entry *pn;
1552
1553 err = -ENOBUFS;
1554 pn = pneigh_lookup(tbl, dst, dev, 1);
1555 if (pn) {
1556 pn->flags = ndm->ndm_flags;
1557 err = 0;
1558 }
1559 goto out_dev_put;
1560 }
1561
1562 if (dev == NULL)
1563 goto out_dev_put;
1564
1565 neigh = neigh_lookup(tbl, dst, dev);
1566 if (neigh == NULL) {
1567 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1568 err = -ENOENT;
1569 goto out_dev_put;
1570 }
1571
1572 neigh = __neigh_lookup_errno(tbl, dst, dev);
1573 if (IS_ERR(neigh)) {
1574 err = PTR_ERR(neigh);
1575 goto out_dev_put;
1576 }
1577 } else {
1578 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1579 err = -EEXIST;
1580 neigh_release(neigh);
1581 goto out_dev_put;
1582 }
1583
1584 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1585 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1586 }
1587
1588 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1589 neigh_release(neigh);
1590 goto out_dev_put;
1591 }
1592
1593 read_unlock(&neigh_tbl_lock);
1594 err = -EAFNOSUPPORT;
1595
1596 out_dev_put:
1597 if (dev)
1598 dev_put(dev);
1599 out:
1600 return err;
1601 }
1602
1603 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1604 {
1605 struct nlattr *nest;
1606
1607 nest = nla_nest_start(skb, NDTA_PARMS);
1608 if (nest == NULL)
1609 return -ENOBUFS;
1610
1611 if (parms->dev)
1612 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1613
1614 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1615 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1616 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1617 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1618 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1619 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1620 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1621 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1622 parms->base_reachable_time);
1623 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1624 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1625 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1626 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1627 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1628 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1629
1630 return nla_nest_end(skb, nest);
1631
1632 nla_put_failure:
1633 return nla_nest_cancel(skb, nest);
1634 }
1635
1636 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1637 u32 pid, u32 seq, int type, int flags)
1638 {
1639 struct nlmsghdr *nlh;
1640 struct ndtmsg *ndtmsg;
1641
1642 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1643 if (nlh == NULL)
1644 return -EMSGSIZE;
1645
1646 ndtmsg = nlmsg_data(nlh);
1647
1648 read_lock_bh(&tbl->lock);
1649 ndtmsg->ndtm_family = tbl->family;
1650 ndtmsg->ndtm_pad1 = 0;
1651 ndtmsg->ndtm_pad2 = 0;
1652
1653 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1654 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1655 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1656 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1657 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1658
1659 {
1660 unsigned long now = jiffies;
1661 unsigned int flush_delta = now - tbl->last_flush;
1662 unsigned int rand_delta = now - tbl->last_rand;
1663
1664 struct ndt_config ndc = {
1665 .ndtc_key_len = tbl->key_len,
1666 .ndtc_entry_size = tbl->entry_size,
1667 .ndtc_entries = atomic_read(&tbl->entries),
1668 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1669 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1670 .ndtc_hash_rnd = tbl->hash_rnd,
1671 .ndtc_hash_mask = tbl->hash_mask,
1672 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1673 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1674 };
1675
1676 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1677 }
1678
1679 {
1680 int cpu;
1681 struct ndt_stats ndst;
1682
1683 memset(&ndst, 0, sizeof(ndst));
1684
1685 for_each_possible_cpu(cpu) {
1686 struct neigh_statistics *st;
1687
1688 st = per_cpu_ptr(tbl->stats, cpu);
1689 ndst.ndts_allocs += st->allocs;
1690 ndst.ndts_destroys += st->destroys;
1691 ndst.ndts_hash_grows += st->hash_grows;
1692 ndst.ndts_res_failed += st->res_failed;
1693 ndst.ndts_lookups += st->lookups;
1694 ndst.ndts_hits += st->hits;
1695 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1696 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1697 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1698 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1699 }
1700
1701 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1702 }
1703
1704 BUG_ON(tbl->parms.dev);
1705 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1706 goto nla_put_failure;
1707
1708 read_unlock_bh(&tbl->lock);
1709 return nlmsg_end(skb, nlh);
1710
1711 nla_put_failure:
1712 read_unlock_bh(&tbl->lock);
1713 nlmsg_cancel(skb, nlh);
1714 return -EMSGSIZE;
1715 }
1716
1717 static int neightbl_fill_param_info(struct sk_buff *skb,
1718 struct neigh_table *tbl,
1719 struct neigh_parms *parms,
1720 u32 pid, u32 seq, int type,
1721 unsigned int flags)
1722 {
1723 struct ndtmsg *ndtmsg;
1724 struct nlmsghdr *nlh;
1725
1726 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1727 if (nlh == NULL)
1728 return -EMSGSIZE;
1729
1730 ndtmsg = nlmsg_data(nlh);
1731
1732 read_lock_bh(&tbl->lock);
1733 ndtmsg->ndtm_family = tbl->family;
1734 ndtmsg->ndtm_pad1 = 0;
1735 ndtmsg->ndtm_pad2 = 0;
1736
1737 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1738 neightbl_fill_parms(skb, parms) < 0)
1739 goto errout;
1740
1741 read_unlock_bh(&tbl->lock);
1742 return nlmsg_end(skb, nlh);
1743 errout:
1744 read_unlock_bh(&tbl->lock);
1745 nlmsg_cancel(skb, nlh);
1746 return -EMSGSIZE;
1747 }
1748
1749 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1750 int ifindex)
1751 {
1752 struct neigh_parms *p;
1753
1754 for (p = &tbl->parms; p; p = p->next)
1755 if ((p->dev && p->dev->ifindex == ifindex) ||
1756 (!p->dev && !ifindex))
1757 return p;
1758
1759 return NULL;
1760 }
1761
1762 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1763 [NDTA_NAME] = { .type = NLA_STRING },
1764 [NDTA_THRESH1] = { .type = NLA_U32 },
1765 [NDTA_THRESH2] = { .type = NLA_U32 },
1766 [NDTA_THRESH3] = { .type = NLA_U32 },
1767 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1768 [NDTA_PARMS] = { .type = NLA_NESTED },
1769 };
1770
1771 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1772 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1773 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1774 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1775 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1776 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1777 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1778 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1779 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1780 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1781 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1782 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1783 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1784 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1785 };
1786
1787 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1788 {
1789 struct neigh_table *tbl;
1790 struct ndtmsg *ndtmsg;
1791 struct nlattr *tb[NDTA_MAX+1];
1792 int err;
1793
1794 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1795 nl_neightbl_policy);
1796 if (err < 0)
1797 goto errout;
1798
1799 if (tb[NDTA_NAME] == NULL) {
1800 err = -EINVAL;
1801 goto errout;
1802 }
1803
1804 ndtmsg = nlmsg_data(nlh);
1805 read_lock(&neigh_tbl_lock);
1806 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1807 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1808 continue;
1809
1810 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1811 break;
1812 }
1813
1814 if (tbl == NULL) {
1815 err = -ENOENT;
1816 goto errout_locked;
1817 }
1818
1819 /*
1820 * We acquire tbl->lock to be nice to the periodic timers and
1821 * make sure they always see a consistent set of values.
1822 */
1823 write_lock_bh(&tbl->lock);
1824
1825 if (tb[NDTA_PARMS]) {
1826 struct nlattr *tbp[NDTPA_MAX+1];
1827 struct neigh_parms *p;
1828 int i, ifindex = 0;
1829
1830 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1831 nl_ntbl_parm_policy);
1832 if (err < 0)
1833 goto errout_tbl_lock;
1834
1835 if (tbp[NDTPA_IFINDEX])
1836 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1837
1838 p = lookup_neigh_params(tbl, ifindex);
1839 if (p == NULL) {
1840 err = -ENOENT;
1841 goto errout_tbl_lock;
1842 }
1843
1844 for (i = 1; i <= NDTPA_MAX; i++) {
1845 if (tbp[i] == NULL)
1846 continue;
1847
1848 switch (i) {
1849 case NDTPA_QUEUE_LEN:
1850 p->queue_len = nla_get_u32(tbp[i]);
1851 break;
1852 case NDTPA_PROXY_QLEN:
1853 p->proxy_qlen = nla_get_u32(tbp[i]);
1854 break;
1855 case NDTPA_APP_PROBES:
1856 p->app_probes = nla_get_u32(tbp[i]);
1857 break;
1858 case NDTPA_UCAST_PROBES:
1859 p->ucast_probes = nla_get_u32(tbp[i]);
1860 break;
1861 case NDTPA_MCAST_PROBES:
1862 p->mcast_probes = nla_get_u32(tbp[i]);
1863 break;
1864 case NDTPA_BASE_REACHABLE_TIME:
1865 p->base_reachable_time = nla_get_msecs(tbp[i]);
1866 break;
1867 case NDTPA_GC_STALETIME:
1868 p->gc_staletime = nla_get_msecs(tbp[i]);
1869 break;
1870 case NDTPA_DELAY_PROBE_TIME:
1871 p->delay_probe_time = nla_get_msecs(tbp[i]);
1872 break;
1873 case NDTPA_RETRANS_TIME:
1874 p->retrans_time = nla_get_msecs(tbp[i]);
1875 break;
1876 case NDTPA_ANYCAST_DELAY:
1877 p->anycast_delay = nla_get_msecs(tbp[i]);
1878 break;
1879 case NDTPA_PROXY_DELAY:
1880 p->proxy_delay = nla_get_msecs(tbp[i]);
1881 break;
1882 case NDTPA_LOCKTIME:
1883 p->locktime = nla_get_msecs(tbp[i]);
1884 break;
1885 }
1886 }
1887 }
1888
1889 if (tb[NDTA_THRESH1])
1890 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1891
1892 if (tb[NDTA_THRESH2])
1893 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1894
1895 if (tb[NDTA_THRESH3])
1896 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1897
1898 if (tb[NDTA_GC_INTERVAL])
1899 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1900
1901 err = 0;
1902
1903 errout_tbl_lock:
1904 write_unlock_bh(&tbl->lock);
1905 errout_locked:
1906 read_unlock(&neigh_tbl_lock);
1907 errout:
1908 return err;
1909 }
1910
1911 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1912 {
1913 int family, tidx, nidx = 0;
1914 int tbl_skip = cb->args[0];
1915 int neigh_skip = cb->args[1];
1916 struct neigh_table *tbl;
1917
1918 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1919
1920 read_lock(&neigh_tbl_lock);
1921 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1922 struct neigh_parms *p;
1923
1924 if (tidx < tbl_skip || (family && tbl->family != family))
1925 continue;
1926
1927 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1928 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1929 NLM_F_MULTI) <= 0)
1930 break;
1931
1932 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1933 if (nidx < neigh_skip)
1934 continue;
1935
1936 if (neightbl_fill_param_info(skb, tbl, p,
1937 NETLINK_CB(cb->skb).pid,
1938 cb->nlh->nlmsg_seq,
1939 RTM_NEWNEIGHTBL,
1940 NLM_F_MULTI) <= 0)
1941 goto out;
1942 }
1943
1944 neigh_skip = 0;
1945 }
1946 out:
1947 read_unlock(&neigh_tbl_lock);
1948 cb->args[0] = tidx;
1949 cb->args[1] = nidx;
1950
1951 return skb->len;
1952 }
1953
1954 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1955 u32 pid, u32 seq, int type, unsigned int flags)
1956 {
1957 unsigned long now = jiffies;
1958 struct nda_cacheinfo ci;
1959 struct nlmsghdr *nlh;
1960 struct ndmsg *ndm;
1961
1962 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1963 if (nlh == NULL)
1964 return -EMSGSIZE;
1965
1966 ndm = nlmsg_data(nlh);
1967 ndm->ndm_family = neigh->ops->family;
1968 ndm->ndm_pad1 = 0;
1969 ndm->ndm_pad2 = 0;
1970 ndm->ndm_flags = neigh->flags;
1971 ndm->ndm_type = neigh->type;
1972 ndm->ndm_ifindex = neigh->dev->ifindex;
1973
1974 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1975
1976 read_lock_bh(&neigh->lock);
1977 ndm->ndm_state = neigh->nud_state;
1978 if ((neigh->nud_state & NUD_VALID) &&
1979 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1980 read_unlock_bh(&neigh->lock);
1981 goto nla_put_failure;
1982 }
1983
1984 ci.ndm_used = now - neigh->used;
1985 ci.ndm_confirmed = now - neigh->confirmed;
1986 ci.ndm_updated = now - neigh->updated;
1987 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
1988 read_unlock_bh(&neigh->lock);
1989
1990 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
1991 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1992
1993 return nlmsg_end(skb, nlh);
1994
1995 nla_put_failure:
1996 nlmsg_cancel(skb, nlh);
1997 return -EMSGSIZE;
1998 }
1999
2000 static void neigh_update_notify(struct neighbour *neigh)
2001 {
2002 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2003 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2004 }
2005
2006 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2007 struct netlink_callback *cb)
2008 {
2009 struct neighbour *n;
2010 int rc, h, s_h = cb->args[1];
2011 int idx, s_idx = idx = cb->args[2];
2012
2013 read_lock_bh(&tbl->lock);
2014 for (h = 0; h <= tbl->hash_mask; h++) {
2015 if (h < s_h)
2016 continue;
2017 if (h > s_h)
2018 s_idx = 0;
2019 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2020 if (idx < s_idx)
2021 continue;
2022 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2023 cb->nlh->nlmsg_seq,
2024 RTM_NEWNEIGH,
2025 NLM_F_MULTI) <= 0) {
2026 read_unlock_bh(&tbl->lock);
2027 rc = -1;
2028 goto out;
2029 }
2030 }
2031 }
2032 read_unlock_bh(&tbl->lock);
2033 rc = skb->len;
2034 out:
2035 cb->args[1] = h;
2036 cb->args[2] = idx;
2037 return rc;
2038 }
2039
2040 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2041 {
2042 struct neigh_table *tbl;
2043 int t, family, s_t;
2044
2045 read_lock(&neigh_tbl_lock);
2046 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2047 s_t = cb->args[0];
2048
2049 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2050 if (t < s_t || (family && tbl->family != family))
2051 continue;
2052 if (t > s_t)
2053 memset(&cb->args[1], 0, sizeof(cb->args) -
2054 sizeof(cb->args[0]));
2055 if (neigh_dump_table(tbl, skb, cb) < 0)
2056 break;
2057 }
2058 read_unlock(&neigh_tbl_lock);
2059
2060 cb->args[0] = t;
2061 return skb->len;
2062 }
2063
2064 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2065 {
2066 int chain;
2067
2068 read_lock_bh(&tbl->lock);
2069 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2070 struct neighbour *n;
2071
2072 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2073 cb(n, cookie);
2074 }
2075 read_unlock_bh(&tbl->lock);
2076 }
2077 EXPORT_SYMBOL(neigh_for_each);
2078
2079 /* The tbl->lock must be held as a writer and BH disabled. */
2080 void __neigh_for_each_release(struct neigh_table *tbl,
2081 int (*cb)(struct neighbour *))
2082 {
2083 int chain;
2084
2085 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2086 struct neighbour *n, **np;
2087
2088 np = &tbl->hash_buckets[chain];
2089 while ((n = *np) != NULL) {
2090 int release;
2091
2092 write_lock(&n->lock);
2093 release = cb(n);
2094 if (release) {
2095 *np = n->next;
2096 n->dead = 1;
2097 } else
2098 np = &n->next;
2099 write_unlock(&n->lock);
2100 if (release)
2101 neigh_cleanup_and_release(n);
2102 }
2103 }
2104 }
2105 EXPORT_SYMBOL(__neigh_for_each_release);
2106
2107 #ifdef CONFIG_PROC_FS
2108
2109 static struct neighbour *neigh_get_first(struct seq_file *seq)
2110 {
2111 struct neigh_seq_state *state = seq->private;
2112 struct neigh_table *tbl = state->tbl;
2113 struct neighbour *n = NULL;
2114 int bucket = state->bucket;
2115
2116 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2117 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2118 n = tbl->hash_buckets[bucket];
2119
2120 while (n) {
2121 if (state->neigh_sub_iter) {
2122 loff_t fakep = 0;
2123 void *v;
2124
2125 v = state->neigh_sub_iter(state, n, &fakep);
2126 if (!v)
2127 goto next;
2128 }
2129 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2130 break;
2131 if (n->nud_state & ~NUD_NOARP)
2132 break;
2133 next:
2134 n = n->next;
2135 }
2136
2137 if (n)
2138 break;
2139 }
2140 state->bucket = bucket;
2141
2142 return n;
2143 }
2144
2145 static struct neighbour *neigh_get_next(struct seq_file *seq,
2146 struct neighbour *n,
2147 loff_t *pos)
2148 {
2149 struct neigh_seq_state *state = seq->private;
2150 struct neigh_table *tbl = state->tbl;
2151
2152 if (state->neigh_sub_iter) {
2153 void *v = state->neigh_sub_iter(state, n, pos);
2154 if (v)
2155 return n;
2156 }
2157 n = n->next;
2158
2159 while (1) {
2160 while (n) {
2161 if (state->neigh_sub_iter) {
2162 void *v = state->neigh_sub_iter(state, n, pos);
2163 if (v)
2164 return n;
2165 goto next;
2166 }
2167 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2168 break;
2169
2170 if (n->nud_state & ~NUD_NOARP)
2171 break;
2172 next:
2173 n = n->next;
2174 }
2175
2176 if (n)
2177 break;
2178
2179 if (++state->bucket > tbl->hash_mask)
2180 break;
2181
2182 n = tbl->hash_buckets[state->bucket];
2183 }
2184
2185 if (n && pos)
2186 --(*pos);
2187 return n;
2188 }
2189
2190 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2191 {
2192 struct neighbour *n = neigh_get_first(seq);
2193
2194 if (n) {
2195 while (*pos) {
2196 n = neigh_get_next(seq, n, pos);
2197 if (!n)
2198 break;
2199 }
2200 }
2201 return *pos ? NULL : n;
2202 }
2203
2204 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2205 {
2206 struct neigh_seq_state *state = seq->private;
2207 struct neigh_table *tbl = state->tbl;
2208 struct pneigh_entry *pn = NULL;
2209 int bucket = state->bucket;
2210
2211 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2212 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2213 pn = tbl->phash_buckets[bucket];
2214 if (pn)
2215 break;
2216 }
2217 state->bucket = bucket;
2218
2219 return pn;
2220 }
2221
2222 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2223 struct pneigh_entry *pn,
2224 loff_t *pos)
2225 {
2226 struct neigh_seq_state *state = seq->private;
2227 struct neigh_table *tbl = state->tbl;
2228
2229 pn = pn->next;
2230 while (!pn) {
2231 if (++state->bucket > PNEIGH_HASHMASK)
2232 break;
2233 pn = tbl->phash_buckets[state->bucket];
2234 if (pn)
2235 break;
2236 }
2237
2238 if (pn && pos)
2239 --(*pos);
2240
2241 return pn;
2242 }
2243
2244 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2245 {
2246 struct pneigh_entry *pn = pneigh_get_first(seq);
2247
2248 if (pn) {
2249 while (*pos) {
2250 pn = pneigh_get_next(seq, pn, pos);
2251 if (!pn)
2252 break;
2253 }
2254 }
2255 return *pos ? NULL : pn;
2256 }
2257
2258 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2259 {
2260 struct neigh_seq_state *state = seq->private;
2261 void *rc;
2262
2263 rc = neigh_get_idx(seq, pos);
2264 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2265 rc = pneigh_get_idx(seq, pos);
2266
2267 return rc;
2268 }
2269
2270 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2271 {
2272 struct neigh_seq_state *state = seq->private;
2273 loff_t pos_minus_one;
2274
2275 state->tbl = tbl;
2276 state->bucket = 0;
2277 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2278
2279 read_lock_bh(&tbl->lock);
2280
2281 pos_minus_one = *pos - 1;
2282 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2283 }
2284 EXPORT_SYMBOL(neigh_seq_start);
2285
2286 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2287 {
2288 struct neigh_seq_state *state;
2289 void *rc;
2290
2291 if (v == SEQ_START_TOKEN) {
2292 rc = neigh_get_idx(seq, pos);
2293 goto out;
2294 }
2295
2296 state = seq->private;
2297 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2298 rc = neigh_get_next(seq, v, NULL);
2299 if (rc)
2300 goto out;
2301 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2302 rc = pneigh_get_first(seq);
2303 } else {
2304 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2305 rc = pneigh_get_next(seq, v, NULL);
2306 }
2307 out:
2308 ++(*pos);
2309 return rc;
2310 }
2311 EXPORT_SYMBOL(neigh_seq_next);
2312
2313 void neigh_seq_stop(struct seq_file *seq, void *v)
2314 {
2315 struct neigh_seq_state *state = seq->private;
2316 struct neigh_table *tbl = state->tbl;
2317
2318 read_unlock_bh(&tbl->lock);
2319 }
2320 EXPORT_SYMBOL(neigh_seq_stop);
2321
2322 /* statistics via seq_file */
2323
2324 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2325 {
2326 struct proc_dir_entry *pde = seq->private;
2327 struct neigh_table *tbl = pde->data;
2328 int cpu;
2329
2330 if (*pos == 0)
2331 return SEQ_START_TOKEN;
2332
2333 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2334 if (!cpu_possible(cpu))
2335 continue;
2336 *pos = cpu+1;
2337 return per_cpu_ptr(tbl->stats, cpu);
2338 }
2339 return NULL;
2340 }
2341
2342 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2343 {
2344 struct proc_dir_entry *pde = seq->private;
2345 struct neigh_table *tbl = pde->data;
2346 int cpu;
2347
2348 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2349 if (!cpu_possible(cpu))
2350 continue;
2351 *pos = cpu+1;
2352 return per_cpu_ptr(tbl->stats, cpu);
2353 }
2354 return NULL;
2355 }
2356
2357 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2358 {
2359
2360 }
2361
2362 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2363 {
2364 struct proc_dir_entry *pde = seq->private;
2365 struct neigh_table *tbl = pde->data;
2366 struct neigh_statistics *st = v;
2367
2368 if (v == SEQ_START_TOKEN) {
2369 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2370 return 0;
2371 }
2372
2373 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2374 "%08lx %08lx %08lx %08lx\n",
2375 atomic_read(&tbl->entries),
2376
2377 st->allocs,
2378 st->destroys,
2379 st->hash_grows,
2380
2381 st->lookups,
2382 st->hits,
2383
2384 st->res_failed,
2385
2386 st->rcv_probes_mcast,
2387 st->rcv_probes_ucast,
2388
2389 st->periodic_gc_runs,
2390 st->forced_gc_runs
2391 );
2392
2393 return 0;
2394 }
2395
2396 static const struct seq_operations neigh_stat_seq_ops = {
2397 .start = neigh_stat_seq_start,
2398 .next = neigh_stat_seq_next,
2399 .stop = neigh_stat_seq_stop,
2400 .show = neigh_stat_seq_show,
2401 };
2402
2403 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2404 {
2405 int ret = seq_open(file, &neigh_stat_seq_ops);
2406
2407 if (!ret) {
2408 struct seq_file *sf = file->private_data;
2409 sf->private = PDE(inode);
2410 }
2411 return ret;
2412 };
2413
2414 static const struct file_operations neigh_stat_seq_fops = {
2415 .owner = THIS_MODULE,
2416 .open = neigh_stat_seq_open,
2417 .read = seq_read,
2418 .llseek = seq_lseek,
2419 .release = seq_release,
2420 };
2421
2422 #endif /* CONFIG_PROC_FS */
2423
2424 static inline size_t neigh_nlmsg_size(void)
2425 {
2426 return NLMSG_ALIGN(sizeof(struct ndmsg))
2427 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2428 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2429 + nla_total_size(sizeof(struct nda_cacheinfo))
2430 + nla_total_size(4); /* NDA_PROBES */
2431 }
2432
2433 static void __neigh_notify(struct neighbour *n, int type, int flags)
2434 {
2435 struct sk_buff *skb;
2436 int err = -ENOBUFS;
2437
2438 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2439 if (skb == NULL)
2440 goto errout;
2441
2442 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2443 if (err < 0) {
2444 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2445 WARN_ON(err == -EMSGSIZE);
2446 kfree_skb(skb);
2447 goto errout;
2448 }
2449 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2450 errout:
2451 if (err < 0)
2452 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
2453 }
2454
2455 #ifdef CONFIG_ARPD
2456 void neigh_app_ns(struct neighbour *n)
2457 {
2458 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2459 }
2460 #endif /* CONFIG_ARPD */
2461
2462 #ifdef CONFIG_SYSCTL
2463
2464 static struct neigh_sysctl_table {
2465 struct ctl_table_header *sysctl_header;
2466 ctl_table neigh_vars[__NET_NEIGH_MAX];
2467 ctl_table neigh_dev[2];
2468 ctl_table neigh_neigh_dir[2];
2469 ctl_table neigh_proto_dir[2];
2470 ctl_table neigh_root_dir[2];
2471 } neigh_sysctl_template __read_mostly = {
2472 .neigh_vars = {
2473 {
2474 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2475 .procname = "mcast_solicit",
2476 .maxlen = sizeof(int),
2477 .mode = 0644,
2478 .proc_handler = &proc_dointvec,
2479 },
2480 {
2481 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2482 .procname = "ucast_solicit",
2483 .maxlen = sizeof(int),
2484 .mode = 0644,
2485 .proc_handler = &proc_dointvec,
2486 },
2487 {
2488 .ctl_name = NET_NEIGH_APP_SOLICIT,
2489 .procname = "app_solicit",
2490 .maxlen = sizeof(int),
2491 .mode = 0644,
2492 .proc_handler = &proc_dointvec,
2493 },
2494 {
2495 .ctl_name = NET_NEIGH_RETRANS_TIME,
2496 .procname = "retrans_time",
2497 .maxlen = sizeof(int),
2498 .mode = 0644,
2499 .proc_handler = &proc_dointvec_userhz_jiffies,
2500 },
2501 {
2502 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2503 .procname = "base_reachable_time",
2504 .maxlen = sizeof(int),
2505 .mode = 0644,
2506 .proc_handler = &proc_dointvec_jiffies,
2507 .strategy = &sysctl_jiffies,
2508 },
2509 {
2510 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2511 .procname = "delay_first_probe_time",
2512 .maxlen = sizeof(int),
2513 .mode = 0644,
2514 .proc_handler = &proc_dointvec_jiffies,
2515 .strategy = &sysctl_jiffies,
2516 },
2517 {
2518 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2519 .procname = "gc_stale_time",
2520 .maxlen = sizeof(int),
2521 .mode = 0644,
2522 .proc_handler = &proc_dointvec_jiffies,
2523 .strategy = &sysctl_jiffies,
2524 },
2525 {
2526 .ctl_name = NET_NEIGH_UNRES_QLEN,
2527 .procname = "unres_qlen",
2528 .maxlen = sizeof(int),
2529 .mode = 0644,
2530 .proc_handler = &proc_dointvec,
2531 },
2532 {
2533 .ctl_name = NET_NEIGH_PROXY_QLEN,
2534 .procname = "proxy_qlen",
2535 .maxlen = sizeof(int),
2536 .mode = 0644,
2537 .proc_handler = &proc_dointvec,
2538 },
2539 {
2540 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2541 .procname = "anycast_delay",
2542 .maxlen = sizeof(int),
2543 .mode = 0644,
2544 .proc_handler = &proc_dointvec_userhz_jiffies,
2545 },
2546 {
2547 .ctl_name = NET_NEIGH_PROXY_DELAY,
2548 .procname = "proxy_delay",
2549 .maxlen = sizeof(int),
2550 .mode = 0644,
2551 .proc_handler = &proc_dointvec_userhz_jiffies,
2552 },
2553 {
2554 .ctl_name = NET_NEIGH_LOCKTIME,
2555 .procname = "locktime",
2556 .maxlen = sizeof(int),
2557 .mode = 0644,
2558 .proc_handler = &proc_dointvec_userhz_jiffies,
2559 },
2560 {
2561 .ctl_name = NET_NEIGH_GC_INTERVAL,
2562 .procname = "gc_interval",
2563 .maxlen = sizeof(int),
2564 .mode = 0644,
2565 .proc_handler = &proc_dointvec_jiffies,
2566 .strategy = &sysctl_jiffies,
2567 },
2568 {
2569 .ctl_name = NET_NEIGH_GC_THRESH1,
2570 .procname = "gc_thresh1",
2571 .maxlen = sizeof(int),
2572 .mode = 0644,
2573 .proc_handler = &proc_dointvec,
2574 },
2575 {
2576 .ctl_name = NET_NEIGH_GC_THRESH2,
2577 .procname = "gc_thresh2",
2578 .maxlen = sizeof(int),
2579 .mode = 0644,
2580 .proc_handler = &proc_dointvec,
2581 },
2582 {
2583 .ctl_name = NET_NEIGH_GC_THRESH3,
2584 .procname = "gc_thresh3",
2585 .maxlen = sizeof(int),
2586 .mode = 0644,
2587 .proc_handler = &proc_dointvec,
2588 },
2589 {
2590 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2591 .procname = "retrans_time_ms",
2592 .maxlen = sizeof(int),
2593 .mode = 0644,
2594 .proc_handler = &proc_dointvec_ms_jiffies,
2595 .strategy = &sysctl_ms_jiffies,
2596 },
2597 {
2598 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2599 .procname = "base_reachable_time_ms",
2600 .maxlen = sizeof(int),
2601 .mode = 0644,
2602 .proc_handler = &proc_dointvec_ms_jiffies,
2603 .strategy = &sysctl_ms_jiffies,
2604 },
2605 },
2606 .neigh_dev = {
2607 {
2608 .ctl_name = NET_PROTO_CONF_DEFAULT,
2609 .procname = "default",
2610 .mode = 0555,
2611 },
2612 },
2613 .neigh_neigh_dir = {
2614 {
2615 .procname = "neigh",
2616 .mode = 0555,
2617 },
2618 },
2619 .neigh_proto_dir = {
2620 {
2621 .mode = 0555,
2622 },
2623 },
2624 .neigh_root_dir = {
2625 {
2626 .ctl_name = CTL_NET,
2627 .procname = "net",
2628 .mode = 0555,
2629 },
2630 },
2631 };
2632
2633 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2634 int p_id, int pdev_id, char *p_name,
2635 proc_handler *handler, ctl_handler *strategy)
2636 {
2637 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
2638 sizeof(*t), GFP_KERNEL);
2639 const char *dev_name_source = NULL;
2640 char *dev_name = NULL;
2641 int err = 0;
2642
2643 if (!t)
2644 return -ENOBUFS;
2645 t->neigh_vars[0].data = &p->mcast_probes;
2646 t->neigh_vars[1].data = &p->ucast_probes;
2647 t->neigh_vars[2].data = &p->app_probes;
2648 t->neigh_vars[3].data = &p->retrans_time;
2649 t->neigh_vars[4].data = &p->base_reachable_time;
2650 t->neigh_vars[5].data = &p->delay_probe_time;
2651 t->neigh_vars[6].data = &p->gc_staletime;
2652 t->neigh_vars[7].data = &p->queue_len;
2653 t->neigh_vars[8].data = &p->proxy_qlen;
2654 t->neigh_vars[9].data = &p->anycast_delay;
2655 t->neigh_vars[10].data = &p->proxy_delay;
2656 t->neigh_vars[11].data = &p->locktime;
2657
2658 if (dev) {
2659 dev_name_source = dev->name;
2660 t->neigh_dev[0].ctl_name = dev->ifindex;
2661 t->neigh_vars[12].procname = NULL;
2662 t->neigh_vars[13].procname = NULL;
2663 t->neigh_vars[14].procname = NULL;
2664 t->neigh_vars[15].procname = NULL;
2665 } else {
2666 dev_name_source = t->neigh_dev[0].procname;
2667 t->neigh_vars[12].data = (int *)(p + 1);
2668 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2669 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2670 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2671 }
2672
2673 t->neigh_vars[16].data = &p->retrans_time;
2674 t->neigh_vars[17].data = &p->base_reachable_time;
2675
2676 if (handler || strategy) {
2677 /* RetransTime */
2678 t->neigh_vars[3].proc_handler = handler;
2679 t->neigh_vars[3].strategy = strategy;
2680 t->neigh_vars[3].extra1 = dev;
2681 /* ReachableTime */
2682 t->neigh_vars[4].proc_handler = handler;
2683 t->neigh_vars[4].strategy = strategy;
2684 t->neigh_vars[4].extra1 = dev;
2685 /* RetransTime (in milliseconds)*/
2686 t->neigh_vars[16].proc_handler = handler;
2687 t->neigh_vars[16].strategy = strategy;
2688 t->neigh_vars[16].extra1 = dev;
2689 /* ReachableTime (in milliseconds) */
2690 t->neigh_vars[17].proc_handler = handler;
2691 t->neigh_vars[17].strategy = strategy;
2692 t->neigh_vars[17].extra1 = dev;
2693 }
2694
2695 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2696 if (!dev_name) {
2697 err = -ENOBUFS;
2698 goto free;
2699 }
2700
2701 t->neigh_dev[0].procname = dev_name;
2702
2703 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2704
2705 t->neigh_proto_dir[0].procname = p_name;
2706 t->neigh_proto_dir[0].ctl_name = p_id;
2707
2708 t->neigh_dev[0].child = t->neigh_vars;
2709 t->neigh_neigh_dir[0].child = t->neigh_dev;
2710 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2711 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2712
2713 t->sysctl_header = register_sysctl_table(t->neigh_root_dir);
2714 if (!t->sysctl_header) {
2715 err = -ENOBUFS;
2716 goto free_procname;
2717 }
2718 p->sysctl_table = t;
2719 return 0;
2720
2721 /* error path */
2722 free_procname:
2723 kfree(dev_name);
2724 free:
2725 kfree(t);
2726
2727 return err;
2728 }
2729
2730 void neigh_sysctl_unregister(struct neigh_parms *p)
2731 {
2732 if (p->sysctl_table) {
2733 struct neigh_sysctl_table *t = p->sysctl_table;
2734 p->sysctl_table = NULL;
2735 unregister_sysctl_table(t->sysctl_header);
2736 kfree(t->neigh_dev[0].procname);
2737 kfree(t);
2738 }
2739 }
2740
2741 #endif /* CONFIG_SYSCTL */
2742
2743 static int __init neigh_init(void)
2744 {
2745 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2746 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2747 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2748
2749 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2750 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2751
2752 return 0;
2753 }
2754
2755 subsys_initcall(neigh_init);
2756
2757 EXPORT_SYMBOL(__neigh_event_send);
2758 EXPORT_SYMBOL(neigh_changeaddr);
2759 EXPORT_SYMBOL(neigh_compat_output);
2760 EXPORT_SYMBOL(neigh_connected_output);
2761 EXPORT_SYMBOL(neigh_create);
2762 EXPORT_SYMBOL(neigh_destroy);
2763 EXPORT_SYMBOL(neigh_event_ns);
2764 EXPORT_SYMBOL(neigh_ifdown);
2765 EXPORT_SYMBOL(neigh_lookup);
2766 EXPORT_SYMBOL(neigh_lookup_nodev);
2767 EXPORT_SYMBOL(neigh_parms_alloc);
2768 EXPORT_SYMBOL(neigh_parms_release);
2769 EXPORT_SYMBOL(neigh_rand_reach_time);
2770 EXPORT_SYMBOL(neigh_resolve_output);
2771 EXPORT_SYMBOL(neigh_table_clear);
2772 EXPORT_SYMBOL(neigh_table_init);
2773 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2774 EXPORT_SYMBOL(neigh_update);
2775 EXPORT_SYMBOL(pneigh_enqueue);
2776 EXPORT_SYMBOL(pneigh_lookup);
2777
2778 #ifdef CONFIG_ARPD
2779 EXPORT_SYMBOL(neigh_app_ns);
2780 #endif
2781 #ifdef CONFIG_SYSCTL
2782 EXPORT_SYMBOL(neigh_sysctl_register);
2783 EXPORT_SYMBOL(neigh_sysctl_unregister);
2784 #endif
This page took 0.092254 seconds and 5 git commands to generate.