[NET]: Add a network namespace parameter to struct sock
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/neighbour.h>
29 #include <net/dst.h>
30 #include <net/sock.h>
31 #include <net/netevent.h>
32 #include <net/netlink.h>
33 #include <linux/rtnetlink.h>
34 #include <linux/random.h>
35 #include <linux/string.h>
36 #include <linux/log2.h>
37
38 #define NEIGH_DEBUG 1
39
40 #define NEIGH_PRINTK(x...) printk(x)
41 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
42 #define NEIGH_PRINTK0 NEIGH_PRINTK
43 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
44 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
45
46 #if NEIGH_DEBUG >= 1
47 #undef NEIGH_PRINTK1
48 #define NEIGH_PRINTK1 NEIGH_PRINTK
49 #endif
50 #if NEIGH_DEBUG >= 2
51 #undef NEIGH_PRINTK2
52 #define NEIGH_PRINTK2 NEIGH_PRINTK
53 #endif
54
55 #define PNEIGH_HASHMASK 0xF
56
57 static void neigh_timer_handler(unsigned long arg);
58 static void __neigh_notify(struct neighbour *n, int type, int flags);
59 static void neigh_update_notify(struct neighbour *neigh);
60 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
61 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 kfree_skb(skb);
104 return -ENETDOWN;
105 }
106
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114 }
115
116 /*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124 return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126
127
128 static int neigh_forced_gc(struct neigh_table *tbl)
129 {
130 int shrunk = 0;
131 int i;
132
133 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
134
135 write_lock_bh(&tbl->lock);
136 for (i = 0; i <= tbl->hash_mask; i++) {
137 struct neighbour *n, **np;
138
139 np = &tbl->hash_buckets[i];
140 while ((n = *np) != NULL) {
141 /* Neighbour record may be discarded if:
142 * - nobody refers to it.
143 * - it is not permanent
144 */
145 write_lock(&n->lock);
146 if (atomic_read(&n->refcnt) == 1 &&
147 !(n->nud_state & NUD_PERMANENT)) {
148 *np = n->next;
149 n->dead = 1;
150 shrunk = 1;
151 write_unlock(&n->lock);
152 neigh_cleanup_and_release(n);
153 continue;
154 }
155 write_unlock(&n->lock);
156 np = &n->next;
157 }
158 }
159
160 tbl->last_flush = jiffies;
161
162 write_unlock_bh(&tbl->lock);
163
164 return shrunk;
165 }
166
167 static int neigh_del_timer(struct neighbour *n)
168 {
169 if ((n->nud_state & NUD_IN_TIMER) &&
170 del_timer(&n->timer)) {
171 neigh_release(n);
172 return 1;
173 }
174 return 0;
175 }
176
177 static void pneigh_queue_purge(struct sk_buff_head *list)
178 {
179 struct sk_buff *skb;
180
181 while ((skb = skb_dequeue(list)) != NULL) {
182 dev_put(skb->dev);
183 kfree_skb(skb);
184 }
185 }
186
187 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
188 {
189 int i;
190
191 for (i = 0; i <= tbl->hash_mask; i++) {
192 struct neighbour *n, **np = &tbl->hash_buckets[i];
193
194 while ((n = *np) != NULL) {
195 if (dev && n->dev != dev) {
196 np = &n->next;
197 continue;
198 }
199 *np = n->next;
200 write_lock(&n->lock);
201 neigh_del_timer(n);
202 n->dead = 1;
203
204 if (atomic_read(&n->refcnt) != 1) {
205 /* The most unpleasant situation.
206 We must destroy neighbour entry,
207 but someone still uses it.
208
209 The destroy will be delayed until
210 the last user releases us, but
211 we must kill timers etc. and move
212 it to safe state.
213 */
214 skb_queue_purge(&n->arp_queue);
215 n->output = neigh_blackhole;
216 if (n->nud_state & NUD_VALID)
217 n->nud_state = NUD_NOARP;
218 else
219 n->nud_state = NUD_NONE;
220 NEIGH_PRINTK2("neigh %p is stray.\n", n);
221 }
222 write_unlock(&n->lock);
223 neigh_cleanup_and_release(n);
224 }
225 }
226 }
227
228 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
229 {
230 write_lock_bh(&tbl->lock);
231 neigh_flush_dev(tbl, dev);
232 write_unlock_bh(&tbl->lock);
233 }
234
235 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
236 {
237 write_lock_bh(&tbl->lock);
238 neigh_flush_dev(tbl, dev);
239 pneigh_ifdown(tbl, dev);
240 write_unlock_bh(&tbl->lock);
241
242 del_timer_sync(&tbl->proxy_timer);
243 pneigh_queue_purge(&tbl->proxy_queue);
244 return 0;
245 }
246
247 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
248 {
249 struct neighbour *n = NULL;
250 unsigned long now = jiffies;
251 int entries;
252
253 entries = atomic_inc_return(&tbl->entries) - 1;
254 if (entries >= tbl->gc_thresh3 ||
255 (entries >= tbl->gc_thresh2 &&
256 time_after(now, tbl->last_flush + 5 * HZ))) {
257 if (!neigh_forced_gc(tbl) &&
258 entries >= tbl->gc_thresh3)
259 goto out_entries;
260 }
261
262 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
263 if (!n)
264 goto out_entries;
265
266 skb_queue_head_init(&n->arp_queue);
267 rwlock_init(&n->lock);
268 n->updated = n->used = now;
269 n->nud_state = NUD_NONE;
270 n->output = neigh_blackhole;
271 n->parms = neigh_parms_clone(&tbl->parms);
272 init_timer(&n->timer);
273 n->timer.function = neigh_timer_handler;
274 n->timer.data = (unsigned long)n;
275
276 NEIGH_CACHE_STAT_INC(tbl, allocs);
277 n->tbl = tbl;
278 atomic_set(&n->refcnt, 1);
279 n->dead = 1;
280 out:
281 return n;
282
283 out_entries:
284 atomic_dec(&tbl->entries);
285 goto out;
286 }
287
288 static struct neighbour **neigh_hash_alloc(unsigned int entries)
289 {
290 unsigned long size = entries * sizeof(struct neighbour *);
291 struct neighbour **ret;
292
293 if (size <= PAGE_SIZE) {
294 ret = kzalloc(size, GFP_ATOMIC);
295 } else {
296 ret = (struct neighbour **)
297 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
298 }
299 return ret;
300 }
301
302 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
303 {
304 unsigned long size = entries * sizeof(struct neighbour *);
305
306 if (size <= PAGE_SIZE)
307 kfree(hash);
308 else
309 free_pages((unsigned long)hash, get_order(size));
310 }
311
312 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
313 {
314 struct neighbour **new_hash, **old_hash;
315 unsigned int i, new_hash_mask, old_entries;
316
317 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
318
319 BUG_ON(!is_power_of_2(new_entries));
320 new_hash = neigh_hash_alloc(new_entries);
321 if (!new_hash)
322 return;
323
324 old_entries = tbl->hash_mask + 1;
325 new_hash_mask = new_entries - 1;
326 old_hash = tbl->hash_buckets;
327
328 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
329 for (i = 0; i < old_entries; i++) {
330 struct neighbour *n, *next;
331
332 for (n = old_hash[i]; n; n = next) {
333 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
334
335 hash_val &= new_hash_mask;
336 next = n->next;
337
338 n->next = new_hash[hash_val];
339 new_hash[hash_val] = n;
340 }
341 }
342 tbl->hash_buckets = new_hash;
343 tbl->hash_mask = new_hash_mask;
344
345 neigh_hash_free(old_hash, old_entries);
346 }
347
348 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
349 struct net_device *dev)
350 {
351 struct neighbour *n;
352 int key_len = tbl->key_len;
353 u32 hash_val = tbl->hash(pkey, dev);
354
355 NEIGH_CACHE_STAT_INC(tbl, lookups);
356
357 read_lock_bh(&tbl->lock);
358 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
359 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
360 neigh_hold(n);
361 NEIGH_CACHE_STAT_INC(tbl, hits);
362 break;
363 }
364 }
365 read_unlock_bh(&tbl->lock);
366 return n;
367 }
368
369 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
370 {
371 struct neighbour *n;
372 int key_len = tbl->key_len;
373 u32 hash_val = tbl->hash(pkey, NULL);
374
375 NEIGH_CACHE_STAT_INC(tbl, lookups);
376
377 read_lock_bh(&tbl->lock);
378 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
379 if (!memcmp(n->primary_key, pkey, key_len)) {
380 neigh_hold(n);
381 NEIGH_CACHE_STAT_INC(tbl, hits);
382 break;
383 }
384 }
385 read_unlock_bh(&tbl->lock);
386 return n;
387 }
388
389 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
390 struct net_device *dev)
391 {
392 u32 hash_val;
393 int key_len = tbl->key_len;
394 int error;
395 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
396
397 if (!n) {
398 rc = ERR_PTR(-ENOBUFS);
399 goto out;
400 }
401
402 memcpy(n->primary_key, pkey, key_len);
403 n->dev = dev;
404 dev_hold(dev);
405
406 /* Protocol specific setup. */
407 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
408 rc = ERR_PTR(error);
409 goto out_neigh_release;
410 }
411
412 /* Device specific setup. */
413 if (n->parms->neigh_setup &&
414 (error = n->parms->neigh_setup(n)) < 0) {
415 rc = ERR_PTR(error);
416 goto out_neigh_release;
417 }
418
419 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
420
421 write_lock_bh(&tbl->lock);
422
423 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
424 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
425
426 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
427
428 if (n->parms->dead) {
429 rc = ERR_PTR(-EINVAL);
430 goto out_tbl_unlock;
431 }
432
433 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
434 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
435 neigh_hold(n1);
436 rc = n1;
437 goto out_tbl_unlock;
438 }
439 }
440
441 n->next = tbl->hash_buckets[hash_val];
442 tbl->hash_buckets[hash_val] = n;
443 n->dead = 0;
444 neigh_hold(n);
445 write_unlock_bh(&tbl->lock);
446 NEIGH_PRINTK2("neigh %p is created.\n", n);
447 rc = n;
448 out:
449 return rc;
450 out_tbl_unlock:
451 write_unlock_bh(&tbl->lock);
452 out_neigh_release:
453 neigh_release(n);
454 goto out;
455 }
456
457 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
458 struct net_device *dev, int creat)
459 {
460 struct pneigh_entry *n;
461 int key_len = tbl->key_len;
462 u32 hash_val = *(u32 *)(pkey + key_len - 4);
463
464 hash_val ^= (hash_val >> 16);
465 hash_val ^= hash_val >> 8;
466 hash_val ^= hash_val >> 4;
467 hash_val &= PNEIGH_HASHMASK;
468
469 read_lock_bh(&tbl->lock);
470
471 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
472 if (!memcmp(n->key, pkey, key_len) &&
473 (n->dev == dev || !n->dev)) {
474 read_unlock_bh(&tbl->lock);
475 goto out;
476 }
477 }
478 read_unlock_bh(&tbl->lock);
479 n = NULL;
480 if (!creat)
481 goto out;
482
483 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
484 if (!n)
485 goto out;
486
487 memcpy(n->key, pkey, key_len);
488 n->dev = dev;
489 if (dev)
490 dev_hold(dev);
491
492 if (tbl->pconstructor && tbl->pconstructor(n)) {
493 if (dev)
494 dev_put(dev);
495 kfree(n);
496 n = NULL;
497 goto out;
498 }
499
500 write_lock_bh(&tbl->lock);
501 n->next = tbl->phash_buckets[hash_val];
502 tbl->phash_buckets[hash_val] = n;
503 write_unlock_bh(&tbl->lock);
504 out:
505 return n;
506 }
507
508
509 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
510 struct net_device *dev)
511 {
512 struct pneigh_entry *n, **np;
513 int key_len = tbl->key_len;
514 u32 hash_val = *(u32 *)(pkey + key_len - 4);
515
516 hash_val ^= (hash_val >> 16);
517 hash_val ^= hash_val >> 8;
518 hash_val ^= hash_val >> 4;
519 hash_val &= PNEIGH_HASHMASK;
520
521 write_lock_bh(&tbl->lock);
522 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
523 np = &n->next) {
524 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
525 *np = n->next;
526 write_unlock_bh(&tbl->lock);
527 if (tbl->pdestructor)
528 tbl->pdestructor(n);
529 if (n->dev)
530 dev_put(n->dev);
531 kfree(n);
532 return 0;
533 }
534 }
535 write_unlock_bh(&tbl->lock);
536 return -ENOENT;
537 }
538
539 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
540 {
541 struct pneigh_entry *n, **np;
542 u32 h;
543
544 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
545 np = &tbl->phash_buckets[h];
546 while ((n = *np) != NULL) {
547 if (!dev || n->dev == dev) {
548 *np = n->next;
549 if (tbl->pdestructor)
550 tbl->pdestructor(n);
551 if (n->dev)
552 dev_put(n->dev);
553 kfree(n);
554 continue;
555 }
556 np = &n->next;
557 }
558 }
559 return -ENOENT;
560 }
561
562
563 /*
564 * neighbour must already be out of the table;
565 *
566 */
567 void neigh_destroy(struct neighbour *neigh)
568 {
569 struct hh_cache *hh;
570
571 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
572
573 if (!neigh->dead) {
574 printk(KERN_WARNING
575 "Destroying alive neighbour %p\n", neigh);
576 dump_stack();
577 return;
578 }
579
580 if (neigh_del_timer(neigh))
581 printk(KERN_WARNING "Impossible event.\n");
582
583 while ((hh = neigh->hh) != NULL) {
584 neigh->hh = hh->hh_next;
585 hh->hh_next = NULL;
586
587 write_seqlock_bh(&hh->hh_lock);
588 hh->hh_output = neigh_blackhole;
589 write_sequnlock_bh(&hh->hh_lock);
590 if (atomic_dec_and_test(&hh->hh_refcnt))
591 kfree(hh);
592 }
593
594 skb_queue_purge(&neigh->arp_queue);
595
596 dev_put(neigh->dev);
597 neigh_parms_put(neigh->parms);
598
599 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
600
601 atomic_dec(&neigh->tbl->entries);
602 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
603 }
604
605 /* Neighbour state is suspicious;
606 disable fast path.
607
608 Called with write_locked neigh.
609 */
610 static void neigh_suspect(struct neighbour *neigh)
611 {
612 struct hh_cache *hh;
613
614 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
615
616 neigh->output = neigh->ops->output;
617
618 for (hh = neigh->hh; hh; hh = hh->hh_next)
619 hh->hh_output = neigh->ops->output;
620 }
621
622 /* Neighbour state is OK;
623 enable fast path.
624
625 Called with write_locked neigh.
626 */
627 static void neigh_connect(struct neighbour *neigh)
628 {
629 struct hh_cache *hh;
630
631 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
632
633 neigh->output = neigh->ops->connected_output;
634
635 for (hh = neigh->hh; hh; hh = hh->hh_next)
636 hh->hh_output = neigh->ops->hh_output;
637 }
638
639 static void neigh_periodic_timer(unsigned long arg)
640 {
641 struct neigh_table *tbl = (struct neigh_table *)arg;
642 struct neighbour *n, **np;
643 unsigned long expire, now = jiffies;
644
645 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
646
647 write_lock(&tbl->lock);
648
649 /*
650 * periodically recompute ReachableTime from random function
651 */
652
653 if (time_after(now, tbl->last_rand + 300 * HZ)) {
654 struct neigh_parms *p;
655 tbl->last_rand = now;
656 for (p = &tbl->parms; p; p = p->next)
657 p->reachable_time =
658 neigh_rand_reach_time(p->base_reachable_time);
659 }
660
661 np = &tbl->hash_buckets[tbl->hash_chain_gc];
662 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
663
664 while ((n = *np) != NULL) {
665 unsigned int state;
666
667 write_lock(&n->lock);
668
669 state = n->nud_state;
670 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
671 write_unlock(&n->lock);
672 goto next_elt;
673 }
674
675 if (time_before(n->used, n->confirmed))
676 n->used = n->confirmed;
677
678 if (atomic_read(&n->refcnt) == 1 &&
679 (state == NUD_FAILED ||
680 time_after(now, n->used + n->parms->gc_staletime))) {
681 *np = n->next;
682 n->dead = 1;
683 write_unlock(&n->lock);
684 neigh_cleanup_and_release(n);
685 continue;
686 }
687 write_unlock(&n->lock);
688
689 next_elt:
690 np = &n->next;
691 }
692
693 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
694 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
695 * base_reachable_time.
696 */
697 expire = tbl->parms.base_reachable_time >> 1;
698 expire /= (tbl->hash_mask + 1);
699 if (!expire)
700 expire = 1;
701
702 if (expire>HZ)
703 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
704 else
705 mod_timer(&tbl->gc_timer, now + expire);
706
707 write_unlock(&tbl->lock);
708 }
709
710 static __inline__ int neigh_max_probes(struct neighbour *n)
711 {
712 struct neigh_parms *p = n->parms;
713 return (n->nud_state & NUD_PROBE ?
714 p->ucast_probes :
715 p->ucast_probes + p->app_probes + p->mcast_probes);
716 }
717
718 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
719 {
720 if (unlikely(mod_timer(&n->timer, when))) {
721 printk("NEIGH: BUG, double timer add, state is %x\n",
722 n->nud_state);
723 dump_stack();
724 }
725 }
726
727 /* Called when a timer expires for a neighbour entry. */
728
729 static void neigh_timer_handler(unsigned long arg)
730 {
731 unsigned long now, next;
732 struct neighbour *neigh = (struct neighbour *)arg;
733 unsigned state;
734 int notify = 0;
735
736 write_lock(&neigh->lock);
737
738 state = neigh->nud_state;
739 now = jiffies;
740 next = now + HZ;
741
742 if (!(state & NUD_IN_TIMER)) {
743 #ifndef CONFIG_SMP
744 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
745 #endif
746 goto out;
747 }
748
749 if (state & NUD_REACHABLE) {
750 if (time_before_eq(now,
751 neigh->confirmed + neigh->parms->reachable_time)) {
752 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
753 next = neigh->confirmed + neigh->parms->reachable_time;
754 } else if (time_before_eq(now,
755 neigh->used + neigh->parms->delay_probe_time)) {
756 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
757 neigh->nud_state = NUD_DELAY;
758 neigh->updated = jiffies;
759 neigh_suspect(neigh);
760 next = now + neigh->parms->delay_probe_time;
761 } else {
762 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
763 neigh->nud_state = NUD_STALE;
764 neigh->updated = jiffies;
765 neigh_suspect(neigh);
766 notify = 1;
767 }
768 } else if (state & NUD_DELAY) {
769 if (time_before_eq(now,
770 neigh->confirmed + neigh->parms->delay_probe_time)) {
771 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
772 neigh->nud_state = NUD_REACHABLE;
773 neigh->updated = jiffies;
774 neigh_connect(neigh);
775 notify = 1;
776 next = neigh->confirmed + neigh->parms->reachable_time;
777 } else {
778 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
779 neigh->nud_state = NUD_PROBE;
780 neigh->updated = jiffies;
781 atomic_set(&neigh->probes, 0);
782 next = now + neigh->parms->retrans_time;
783 }
784 } else {
785 /* NUD_PROBE|NUD_INCOMPLETE */
786 next = now + neigh->parms->retrans_time;
787 }
788
789 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
790 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
791 struct sk_buff *skb;
792
793 neigh->nud_state = NUD_FAILED;
794 neigh->updated = jiffies;
795 notify = 1;
796 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
797 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
798
799 /* It is very thin place. report_unreachable is very complicated
800 routine. Particularly, it can hit the same neighbour entry!
801
802 So that, we try to be accurate and avoid dead loop. --ANK
803 */
804 while (neigh->nud_state == NUD_FAILED &&
805 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
806 write_unlock(&neigh->lock);
807 neigh->ops->error_report(neigh, skb);
808 write_lock(&neigh->lock);
809 }
810 skb_queue_purge(&neigh->arp_queue);
811 }
812
813 if (neigh->nud_state & NUD_IN_TIMER) {
814 if (time_before(next, jiffies + HZ/2))
815 next = jiffies + HZ/2;
816 if (!mod_timer(&neigh->timer, next))
817 neigh_hold(neigh);
818 }
819 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
820 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
821 /* keep skb alive even if arp_queue overflows */
822 if (skb)
823 skb_get(skb);
824 write_unlock(&neigh->lock);
825 neigh->ops->solicit(neigh, skb);
826 atomic_inc(&neigh->probes);
827 if (skb)
828 kfree_skb(skb);
829 } else {
830 out:
831 write_unlock(&neigh->lock);
832 }
833
834 if (notify)
835 neigh_update_notify(neigh);
836
837 neigh_release(neigh);
838 }
839
840 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
841 {
842 int rc;
843 unsigned long now;
844
845 write_lock_bh(&neigh->lock);
846
847 rc = 0;
848 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
849 goto out_unlock_bh;
850
851 now = jiffies;
852
853 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
854 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
855 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
856 neigh->nud_state = NUD_INCOMPLETE;
857 neigh->updated = jiffies;
858 neigh_hold(neigh);
859 neigh_add_timer(neigh, now + 1);
860 } else {
861 neigh->nud_state = NUD_FAILED;
862 neigh->updated = jiffies;
863 write_unlock_bh(&neigh->lock);
864
865 if (skb)
866 kfree_skb(skb);
867 return 1;
868 }
869 } else if (neigh->nud_state & NUD_STALE) {
870 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
871 neigh_hold(neigh);
872 neigh->nud_state = NUD_DELAY;
873 neigh->updated = jiffies;
874 neigh_add_timer(neigh,
875 jiffies + neigh->parms->delay_probe_time);
876 }
877
878 if (neigh->nud_state == NUD_INCOMPLETE) {
879 if (skb) {
880 if (skb_queue_len(&neigh->arp_queue) >=
881 neigh->parms->queue_len) {
882 struct sk_buff *buff;
883 buff = neigh->arp_queue.next;
884 __skb_unlink(buff, &neigh->arp_queue);
885 kfree_skb(buff);
886 }
887 __skb_queue_tail(&neigh->arp_queue, skb);
888 }
889 rc = 1;
890 }
891 out_unlock_bh:
892 write_unlock_bh(&neigh->lock);
893 return rc;
894 }
895
896 static void neigh_update_hhs(struct neighbour *neigh)
897 {
898 struct hh_cache *hh;
899 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
900 neigh->dev->header_cache_update;
901
902 if (update) {
903 for (hh = neigh->hh; hh; hh = hh->hh_next) {
904 write_seqlock_bh(&hh->hh_lock);
905 update(hh, neigh->dev, neigh->ha);
906 write_sequnlock_bh(&hh->hh_lock);
907 }
908 }
909 }
910
911
912
913 /* Generic update routine.
914 -- lladdr is new lladdr or NULL, if it is not supplied.
915 -- new is new state.
916 -- flags
917 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
918 if it is different.
919 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
920 lladdr instead of overriding it
921 if it is different.
922 It also allows to retain current state
923 if lladdr is unchanged.
924 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
925
926 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
927 NTF_ROUTER flag.
928 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
929 a router.
930
931 Caller MUST hold reference count on the entry.
932 */
933
934 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
935 u32 flags)
936 {
937 u8 old;
938 int err;
939 int notify = 0;
940 struct net_device *dev;
941 int update_isrouter = 0;
942
943 write_lock_bh(&neigh->lock);
944
945 dev = neigh->dev;
946 old = neigh->nud_state;
947 err = -EPERM;
948
949 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
950 (old & (NUD_NOARP | NUD_PERMANENT)))
951 goto out;
952
953 if (!(new & NUD_VALID)) {
954 neigh_del_timer(neigh);
955 if (old & NUD_CONNECTED)
956 neigh_suspect(neigh);
957 neigh->nud_state = new;
958 err = 0;
959 notify = old & NUD_VALID;
960 goto out;
961 }
962
963 /* Compare new lladdr with cached one */
964 if (!dev->addr_len) {
965 /* First case: device needs no address. */
966 lladdr = neigh->ha;
967 } else if (lladdr) {
968 /* The second case: if something is already cached
969 and a new address is proposed:
970 - compare new & old
971 - if they are different, check override flag
972 */
973 if ((old & NUD_VALID) &&
974 !memcmp(lladdr, neigh->ha, dev->addr_len))
975 lladdr = neigh->ha;
976 } else {
977 /* No address is supplied; if we know something,
978 use it, otherwise discard the request.
979 */
980 err = -EINVAL;
981 if (!(old & NUD_VALID))
982 goto out;
983 lladdr = neigh->ha;
984 }
985
986 if (new & NUD_CONNECTED)
987 neigh->confirmed = jiffies;
988 neigh->updated = jiffies;
989
990 /* If entry was valid and address is not changed,
991 do not change entry state, if new one is STALE.
992 */
993 err = 0;
994 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
995 if (old & NUD_VALID) {
996 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
997 update_isrouter = 0;
998 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
999 (old & NUD_CONNECTED)) {
1000 lladdr = neigh->ha;
1001 new = NUD_STALE;
1002 } else
1003 goto out;
1004 } else {
1005 if (lladdr == neigh->ha && new == NUD_STALE &&
1006 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1007 (old & NUD_CONNECTED))
1008 )
1009 new = old;
1010 }
1011 }
1012
1013 if (new != old) {
1014 neigh_del_timer(neigh);
1015 if (new & NUD_IN_TIMER) {
1016 neigh_hold(neigh);
1017 neigh_add_timer(neigh, (jiffies +
1018 ((new & NUD_REACHABLE) ?
1019 neigh->parms->reachable_time :
1020 0)));
1021 }
1022 neigh->nud_state = new;
1023 }
1024
1025 if (lladdr != neigh->ha) {
1026 memcpy(&neigh->ha, lladdr, dev->addr_len);
1027 neigh_update_hhs(neigh);
1028 if (!(new & NUD_CONNECTED))
1029 neigh->confirmed = jiffies -
1030 (neigh->parms->base_reachable_time << 1);
1031 notify = 1;
1032 }
1033 if (new == old)
1034 goto out;
1035 if (new & NUD_CONNECTED)
1036 neigh_connect(neigh);
1037 else
1038 neigh_suspect(neigh);
1039 if (!(old & NUD_VALID)) {
1040 struct sk_buff *skb;
1041
1042 /* Again: avoid dead loop if something went wrong */
1043
1044 while (neigh->nud_state & NUD_VALID &&
1045 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1046 struct neighbour *n1 = neigh;
1047 write_unlock_bh(&neigh->lock);
1048 /* On shaper/eql skb->dst->neighbour != neigh :( */
1049 if (skb->dst && skb->dst->neighbour)
1050 n1 = skb->dst->neighbour;
1051 n1->output(skb);
1052 write_lock_bh(&neigh->lock);
1053 }
1054 skb_queue_purge(&neigh->arp_queue);
1055 }
1056 out:
1057 if (update_isrouter) {
1058 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1059 (neigh->flags | NTF_ROUTER) :
1060 (neigh->flags & ~NTF_ROUTER);
1061 }
1062 write_unlock_bh(&neigh->lock);
1063
1064 if (notify)
1065 neigh_update_notify(neigh);
1066
1067 return err;
1068 }
1069
1070 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1071 u8 *lladdr, void *saddr,
1072 struct net_device *dev)
1073 {
1074 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1075 lladdr || !dev->addr_len);
1076 if (neigh)
1077 neigh_update(neigh, lladdr, NUD_STALE,
1078 NEIGH_UPDATE_F_OVERRIDE);
1079 return neigh;
1080 }
1081
1082 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1083 __be16 protocol)
1084 {
1085 struct hh_cache *hh;
1086 struct net_device *dev = dst->dev;
1087
1088 for (hh = n->hh; hh; hh = hh->hh_next)
1089 if (hh->hh_type == protocol)
1090 break;
1091
1092 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1093 seqlock_init(&hh->hh_lock);
1094 hh->hh_type = protocol;
1095 atomic_set(&hh->hh_refcnt, 0);
1096 hh->hh_next = NULL;
1097 if (dev->hard_header_cache(n, hh)) {
1098 kfree(hh);
1099 hh = NULL;
1100 } else {
1101 atomic_inc(&hh->hh_refcnt);
1102 hh->hh_next = n->hh;
1103 n->hh = hh;
1104 if (n->nud_state & NUD_CONNECTED)
1105 hh->hh_output = n->ops->hh_output;
1106 else
1107 hh->hh_output = n->ops->output;
1108 }
1109 }
1110 if (hh) {
1111 atomic_inc(&hh->hh_refcnt);
1112 dst->hh = hh;
1113 }
1114 }
1115
1116 /* This function can be used in contexts, where only old dev_queue_xmit
1117 worked, f.e. if you want to override normal output path (eql, shaper),
1118 but resolution is not made yet.
1119 */
1120
1121 int neigh_compat_output(struct sk_buff *skb)
1122 {
1123 struct net_device *dev = skb->dev;
1124
1125 __skb_pull(skb, skb_network_offset(skb));
1126
1127 if (dev->hard_header &&
1128 dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1129 skb->len) < 0 &&
1130 dev->rebuild_header(skb))
1131 return 0;
1132
1133 return dev_queue_xmit(skb);
1134 }
1135
1136 /* Slow and careful. */
1137
1138 int neigh_resolve_output(struct sk_buff *skb)
1139 {
1140 struct dst_entry *dst = skb->dst;
1141 struct neighbour *neigh;
1142 int rc = 0;
1143
1144 if (!dst || !(neigh = dst->neighbour))
1145 goto discard;
1146
1147 __skb_pull(skb, skb_network_offset(skb));
1148
1149 if (!neigh_event_send(neigh, skb)) {
1150 int err;
1151 struct net_device *dev = neigh->dev;
1152 if (dev->hard_header_cache && !dst->hh) {
1153 write_lock_bh(&neigh->lock);
1154 if (!dst->hh)
1155 neigh_hh_init(neigh, dst, dst->ops->protocol);
1156 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1157 neigh->ha, NULL, skb->len);
1158 write_unlock_bh(&neigh->lock);
1159 } else {
1160 read_lock_bh(&neigh->lock);
1161 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1162 neigh->ha, NULL, skb->len);
1163 read_unlock_bh(&neigh->lock);
1164 }
1165 if (err >= 0)
1166 rc = neigh->ops->queue_xmit(skb);
1167 else
1168 goto out_kfree_skb;
1169 }
1170 out:
1171 return rc;
1172 discard:
1173 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1174 dst, dst ? dst->neighbour : NULL);
1175 out_kfree_skb:
1176 rc = -EINVAL;
1177 kfree_skb(skb);
1178 goto out;
1179 }
1180
1181 /* As fast as possible without hh cache */
1182
1183 int neigh_connected_output(struct sk_buff *skb)
1184 {
1185 int err;
1186 struct dst_entry *dst = skb->dst;
1187 struct neighbour *neigh = dst->neighbour;
1188 struct net_device *dev = neigh->dev;
1189
1190 __skb_pull(skb, skb_network_offset(skb));
1191
1192 read_lock_bh(&neigh->lock);
1193 err = dev->hard_header(skb, dev, ntohs(skb->protocol),
1194 neigh->ha, NULL, skb->len);
1195 read_unlock_bh(&neigh->lock);
1196 if (err >= 0)
1197 err = neigh->ops->queue_xmit(skb);
1198 else {
1199 err = -EINVAL;
1200 kfree_skb(skb);
1201 }
1202 return err;
1203 }
1204
1205 static void neigh_proxy_process(unsigned long arg)
1206 {
1207 struct neigh_table *tbl = (struct neigh_table *)arg;
1208 long sched_next = 0;
1209 unsigned long now = jiffies;
1210 struct sk_buff *skb;
1211
1212 spin_lock(&tbl->proxy_queue.lock);
1213
1214 skb = tbl->proxy_queue.next;
1215
1216 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1217 struct sk_buff *back = skb;
1218 long tdif = NEIGH_CB(back)->sched_next - now;
1219
1220 skb = skb->next;
1221 if (tdif <= 0) {
1222 struct net_device *dev = back->dev;
1223 __skb_unlink(back, &tbl->proxy_queue);
1224 if (tbl->proxy_redo && netif_running(dev))
1225 tbl->proxy_redo(back);
1226 else
1227 kfree_skb(back);
1228
1229 dev_put(dev);
1230 } else if (!sched_next || tdif < sched_next)
1231 sched_next = tdif;
1232 }
1233 del_timer(&tbl->proxy_timer);
1234 if (sched_next)
1235 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1236 spin_unlock(&tbl->proxy_queue.lock);
1237 }
1238
1239 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1240 struct sk_buff *skb)
1241 {
1242 unsigned long now = jiffies;
1243 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1244
1245 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1246 kfree_skb(skb);
1247 return;
1248 }
1249
1250 NEIGH_CB(skb)->sched_next = sched_next;
1251 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1252
1253 spin_lock(&tbl->proxy_queue.lock);
1254 if (del_timer(&tbl->proxy_timer)) {
1255 if (time_before(tbl->proxy_timer.expires, sched_next))
1256 sched_next = tbl->proxy_timer.expires;
1257 }
1258 dst_release(skb->dst);
1259 skb->dst = NULL;
1260 dev_hold(skb->dev);
1261 __skb_queue_tail(&tbl->proxy_queue, skb);
1262 mod_timer(&tbl->proxy_timer, sched_next);
1263 spin_unlock(&tbl->proxy_queue.lock);
1264 }
1265
1266
1267 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1268 struct neigh_table *tbl)
1269 {
1270 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1271
1272 if (p) {
1273 p->tbl = tbl;
1274 atomic_set(&p->refcnt, 1);
1275 INIT_RCU_HEAD(&p->rcu_head);
1276 p->reachable_time =
1277 neigh_rand_reach_time(p->base_reachable_time);
1278 if (dev) {
1279 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1280 kfree(p);
1281 return NULL;
1282 }
1283
1284 dev_hold(dev);
1285 p->dev = dev;
1286 }
1287 p->sysctl_table = NULL;
1288 write_lock_bh(&tbl->lock);
1289 p->next = tbl->parms.next;
1290 tbl->parms.next = p;
1291 write_unlock_bh(&tbl->lock);
1292 }
1293 return p;
1294 }
1295
1296 static void neigh_rcu_free_parms(struct rcu_head *head)
1297 {
1298 struct neigh_parms *parms =
1299 container_of(head, struct neigh_parms, rcu_head);
1300
1301 neigh_parms_put(parms);
1302 }
1303
1304 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1305 {
1306 struct neigh_parms **p;
1307
1308 if (!parms || parms == &tbl->parms)
1309 return;
1310 write_lock_bh(&tbl->lock);
1311 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1312 if (*p == parms) {
1313 *p = parms->next;
1314 parms->dead = 1;
1315 write_unlock_bh(&tbl->lock);
1316 if (parms->dev)
1317 dev_put(parms->dev);
1318 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1319 return;
1320 }
1321 }
1322 write_unlock_bh(&tbl->lock);
1323 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1324 }
1325
1326 void neigh_parms_destroy(struct neigh_parms *parms)
1327 {
1328 kfree(parms);
1329 }
1330
1331 static struct lock_class_key neigh_table_proxy_queue_class;
1332
1333 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1334 {
1335 unsigned long now = jiffies;
1336 unsigned long phsize;
1337
1338 atomic_set(&tbl->parms.refcnt, 1);
1339 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1340 tbl->parms.reachable_time =
1341 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1342
1343 if (!tbl->kmem_cachep)
1344 tbl->kmem_cachep =
1345 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1346 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1347 NULL);
1348 tbl->stats = alloc_percpu(struct neigh_statistics);
1349 if (!tbl->stats)
1350 panic("cannot create neighbour cache statistics");
1351
1352 #ifdef CONFIG_PROC_FS
1353 tbl->pde = create_proc_entry(tbl->id, 0, proc_net_stat);
1354 if (!tbl->pde)
1355 panic("cannot create neighbour proc dir entry");
1356 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1357 tbl->pde->data = tbl;
1358 #endif
1359
1360 tbl->hash_mask = 1;
1361 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1362
1363 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1364 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1365
1366 if (!tbl->hash_buckets || !tbl->phash_buckets)
1367 panic("cannot allocate neighbour cache hashes");
1368
1369 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1370
1371 rwlock_init(&tbl->lock);
1372 init_timer(&tbl->gc_timer);
1373 tbl->gc_timer.data = (unsigned long)tbl;
1374 tbl->gc_timer.function = neigh_periodic_timer;
1375 tbl->gc_timer.expires = now + 1;
1376 add_timer(&tbl->gc_timer);
1377
1378 init_timer(&tbl->proxy_timer);
1379 tbl->proxy_timer.data = (unsigned long)tbl;
1380 tbl->proxy_timer.function = neigh_proxy_process;
1381 skb_queue_head_init_class(&tbl->proxy_queue,
1382 &neigh_table_proxy_queue_class);
1383
1384 tbl->last_flush = now;
1385 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1386 }
1387
1388 void neigh_table_init(struct neigh_table *tbl)
1389 {
1390 struct neigh_table *tmp;
1391
1392 neigh_table_init_no_netlink(tbl);
1393 write_lock(&neigh_tbl_lock);
1394 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1395 if (tmp->family == tbl->family)
1396 break;
1397 }
1398 tbl->next = neigh_tables;
1399 neigh_tables = tbl;
1400 write_unlock(&neigh_tbl_lock);
1401
1402 if (unlikely(tmp)) {
1403 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1404 "family %d\n", tbl->family);
1405 dump_stack();
1406 }
1407 }
1408
1409 int neigh_table_clear(struct neigh_table *tbl)
1410 {
1411 struct neigh_table **tp;
1412
1413 /* It is not clean... Fix it to unload IPv6 module safely */
1414 del_timer_sync(&tbl->gc_timer);
1415 del_timer_sync(&tbl->proxy_timer);
1416 pneigh_queue_purge(&tbl->proxy_queue);
1417 neigh_ifdown(tbl, NULL);
1418 if (atomic_read(&tbl->entries))
1419 printk(KERN_CRIT "neighbour leakage\n");
1420 write_lock(&neigh_tbl_lock);
1421 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1422 if (*tp == tbl) {
1423 *tp = tbl->next;
1424 break;
1425 }
1426 }
1427 write_unlock(&neigh_tbl_lock);
1428
1429 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1430 tbl->hash_buckets = NULL;
1431
1432 kfree(tbl->phash_buckets);
1433 tbl->phash_buckets = NULL;
1434
1435 free_percpu(tbl->stats);
1436 tbl->stats = NULL;
1437
1438 return 0;
1439 }
1440
1441 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1442 {
1443 struct ndmsg *ndm;
1444 struct nlattr *dst_attr;
1445 struct neigh_table *tbl;
1446 struct net_device *dev = NULL;
1447 int err = -EINVAL;
1448
1449 if (nlmsg_len(nlh) < sizeof(*ndm))
1450 goto out;
1451
1452 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1453 if (dst_attr == NULL)
1454 goto out;
1455
1456 ndm = nlmsg_data(nlh);
1457 if (ndm->ndm_ifindex) {
1458 dev = dev_get_by_index(ndm->ndm_ifindex);
1459 if (dev == NULL) {
1460 err = -ENODEV;
1461 goto out;
1462 }
1463 }
1464
1465 read_lock(&neigh_tbl_lock);
1466 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1467 struct neighbour *neigh;
1468
1469 if (tbl->family != ndm->ndm_family)
1470 continue;
1471 read_unlock(&neigh_tbl_lock);
1472
1473 if (nla_len(dst_attr) < tbl->key_len)
1474 goto out_dev_put;
1475
1476 if (ndm->ndm_flags & NTF_PROXY) {
1477 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1478 goto out_dev_put;
1479 }
1480
1481 if (dev == NULL)
1482 goto out_dev_put;
1483
1484 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1485 if (neigh == NULL) {
1486 err = -ENOENT;
1487 goto out_dev_put;
1488 }
1489
1490 err = neigh_update(neigh, NULL, NUD_FAILED,
1491 NEIGH_UPDATE_F_OVERRIDE |
1492 NEIGH_UPDATE_F_ADMIN);
1493 neigh_release(neigh);
1494 goto out_dev_put;
1495 }
1496 read_unlock(&neigh_tbl_lock);
1497 err = -EAFNOSUPPORT;
1498
1499 out_dev_put:
1500 if (dev)
1501 dev_put(dev);
1502 out:
1503 return err;
1504 }
1505
1506 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1507 {
1508 struct ndmsg *ndm;
1509 struct nlattr *tb[NDA_MAX+1];
1510 struct neigh_table *tbl;
1511 struct net_device *dev = NULL;
1512 int err;
1513
1514 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1515 if (err < 0)
1516 goto out;
1517
1518 err = -EINVAL;
1519 if (tb[NDA_DST] == NULL)
1520 goto out;
1521
1522 ndm = nlmsg_data(nlh);
1523 if (ndm->ndm_ifindex) {
1524 dev = dev_get_by_index(ndm->ndm_ifindex);
1525 if (dev == NULL) {
1526 err = -ENODEV;
1527 goto out;
1528 }
1529
1530 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1531 goto out_dev_put;
1532 }
1533
1534 read_lock(&neigh_tbl_lock);
1535 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1536 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1537 struct neighbour *neigh;
1538 void *dst, *lladdr;
1539
1540 if (tbl->family != ndm->ndm_family)
1541 continue;
1542 read_unlock(&neigh_tbl_lock);
1543
1544 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1545 goto out_dev_put;
1546 dst = nla_data(tb[NDA_DST]);
1547 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1548
1549 if (ndm->ndm_flags & NTF_PROXY) {
1550 struct pneigh_entry *pn;
1551
1552 err = -ENOBUFS;
1553 pn = pneigh_lookup(tbl, dst, dev, 1);
1554 if (pn) {
1555 pn->flags = ndm->ndm_flags;
1556 err = 0;
1557 }
1558 goto out_dev_put;
1559 }
1560
1561 if (dev == NULL)
1562 goto out_dev_put;
1563
1564 neigh = neigh_lookup(tbl, dst, dev);
1565 if (neigh == NULL) {
1566 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1567 err = -ENOENT;
1568 goto out_dev_put;
1569 }
1570
1571 neigh = __neigh_lookup_errno(tbl, dst, dev);
1572 if (IS_ERR(neigh)) {
1573 err = PTR_ERR(neigh);
1574 goto out_dev_put;
1575 }
1576 } else {
1577 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1578 err = -EEXIST;
1579 neigh_release(neigh);
1580 goto out_dev_put;
1581 }
1582
1583 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1584 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1585 }
1586
1587 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1588 neigh_release(neigh);
1589 goto out_dev_put;
1590 }
1591
1592 read_unlock(&neigh_tbl_lock);
1593 err = -EAFNOSUPPORT;
1594
1595 out_dev_put:
1596 if (dev)
1597 dev_put(dev);
1598 out:
1599 return err;
1600 }
1601
1602 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1603 {
1604 struct nlattr *nest;
1605
1606 nest = nla_nest_start(skb, NDTA_PARMS);
1607 if (nest == NULL)
1608 return -ENOBUFS;
1609
1610 if (parms->dev)
1611 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1612
1613 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1614 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1615 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1616 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1617 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1618 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1619 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1620 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1621 parms->base_reachable_time);
1622 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1623 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1624 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1625 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1626 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1627 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1628
1629 return nla_nest_end(skb, nest);
1630
1631 nla_put_failure:
1632 return nla_nest_cancel(skb, nest);
1633 }
1634
1635 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1636 u32 pid, u32 seq, int type, int flags)
1637 {
1638 struct nlmsghdr *nlh;
1639 struct ndtmsg *ndtmsg;
1640
1641 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1642 if (nlh == NULL)
1643 return -EMSGSIZE;
1644
1645 ndtmsg = nlmsg_data(nlh);
1646
1647 read_lock_bh(&tbl->lock);
1648 ndtmsg->ndtm_family = tbl->family;
1649 ndtmsg->ndtm_pad1 = 0;
1650 ndtmsg->ndtm_pad2 = 0;
1651
1652 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1653 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1654 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1655 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1656 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1657
1658 {
1659 unsigned long now = jiffies;
1660 unsigned int flush_delta = now - tbl->last_flush;
1661 unsigned int rand_delta = now - tbl->last_rand;
1662
1663 struct ndt_config ndc = {
1664 .ndtc_key_len = tbl->key_len,
1665 .ndtc_entry_size = tbl->entry_size,
1666 .ndtc_entries = atomic_read(&tbl->entries),
1667 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1668 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1669 .ndtc_hash_rnd = tbl->hash_rnd,
1670 .ndtc_hash_mask = tbl->hash_mask,
1671 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1672 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1673 };
1674
1675 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1676 }
1677
1678 {
1679 int cpu;
1680 struct ndt_stats ndst;
1681
1682 memset(&ndst, 0, sizeof(ndst));
1683
1684 for_each_possible_cpu(cpu) {
1685 struct neigh_statistics *st;
1686
1687 st = per_cpu_ptr(tbl->stats, cpu);
1688 ndst.ndts_allocs += st->allocs;
1689 ndst.ndts_destroys += st->destroys;
1690 ndst.ndts_hash_grows += st->hash_grows;
1691 ndst.ndts_res_failed += st->res_failed;
1692 ndst.ndts_lookups += st->lookups;
1693 ndst.ndts_hits += st->hits;
1694 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1695 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1696 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1697 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1698 }
1699
1700 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1701 }
1702
1703 BUG_ON(tbl->parms.dev);
1704 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1705 goto nla_put_failure;
1706
1707 read_unlock_bh(&tbl->lock);
1708 return nlmsg_end(skb, nlh);
1709
1710 nla_put_failure:
1711 read_unlock_bh(&tbl->lock);
1712 nlmsg_cancel(skb, nlh);
1713 return -EMSGSIZE;
1714 }
1715
1716 static int neightbl_fill_param_info(struct sk_buff *skb,
1717 struct neigh_table *tbl,
1718 struct neigh_parms *parms,
1719 u32 pid, u32 seq, int type,
1720 unsigned int flags)
1721 {
1722 struct ndtmsg *ndtmsg;
1723 struct nlmsghdr *nlh;
1724
1725 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1726 if (nlh == NULL)
1727 return -EMSGSIZE;
1728
1729 ndtmsg = nlmsg_data(nlh);
1730
1731 read_lock_bh(&tbl->lock);
1732 ndtmsg->ndtm_family = tbl->family;
1733 ndtmsg->ndtm_pad1 = 0;
1734 ndtmsg->ndtm_pad2 = 0;
1735
1736 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1737 neightbl_fill_parms(skb, parms) < 0)
1738 goto errout;
1739
1740 read_unlock_bh(&tbl->lock);
1741 return nlmsg_end(skb, nlh);
1742 errout:
1743 read_unlock_bh(&tbl->lock);
1744 nlmsg_cancel(skb, nlh);
1745 return -EMSGSIZE;
1746 }
1747
1748 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1749 int ifindex)
1750 {
1751 struct neigh_parms *p;
1752
1753 for (p = &tbl->parms; p; p = p->next)
1754 if ((p->dev && p->dev->ifindex == ifindex) ||
1755 (!p->dev && !ifindex))
1756 return p;
1757
1758 return NULL;
1759 }
1760
1761 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1762 [NDTA_NAME] = { .type = NLA_STRING },
1763 [NDTA_THRESH1] = { .type = NLA_U32 },
1764 [NDTA_THRESH2] = { .type = NLA_U32 },
1765 [NDTA_THRESH3] = { .type = NLA_U32 },
1766 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1767 [NDTA_PARMS] = { .type = NLA_NESTED },
1768 };
1769
1770 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1771 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1772 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1773 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1774 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1775 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1776 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1777 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1778 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1779 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1780 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1781 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1782 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1783 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1784 };
1785
1786 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1787 {
1788 struct neigh_table *tbl;
1789 struct ndtmsg *ndtmsg;
1790 struct nlattr *tb[NDTA_MAX+1];
1791 int err;
1792
1793 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1794 nl_neightbl_policy);
1795 if (err < 0)
1796 goto errout;
1797
1798 if (tb[NDTA_NAME] == NULL) {
1799 err = -EINVAL;
1800 goto errout;
1801 }
1802
1803 ndtmsg = nlmsg_data(nlh);
1804 read_lock(&neigh_tbl_lock);
1805 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1806 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1807 continue;
1808
1809 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1810 break;
1811 }
1812
1813 if (tbl == NULL) {
1814 err = -ENOENT;
1815 goto errout_locked;
1816 }
1817
1818 /*
1819 * We acquire tbl->lock to be nice to the periodic timers and
1820 * make sure they always see a consistent set of values.
1821 */
1822 write_lock_bh(&tbl->lock);
1823
1824 if (tb[NDTA_PARMS]) {
1825 struct nlattr *tbp[NDTPA_MAX+1];
1826 struct neigh_parms *p;
1827 int i, ifindex = 0;
1828
1829 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1830 nl_ntbl_parm_policy);
1831 if (err < 0)
1832 goto errout_tbl_lock;
1833
1834 if (tbp[NDTPA_IFINDEX])
1835 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1836
1837 p = lookup_neigh_params(tbl, ifindex);
1838 if (p == NULL) {
1839 err = -ENOENT;
1840 goto errout_tbl_lock;
1841 }
1842
1843 for (i = 1; i <= NDTPA_MAX; i++) {
1844 if (tbp[i] == NULL)
1845 continue;
1846
1847 switch (i) {
1848 case NDTPA_QUEUE_LEN:
1849 p->queue_len = nla_get_u32(tbp[i]);
1850 break;
1851 case NDTPA_PROXY_QLEN:
1852 p->proxy_qlen = nla_get_u32(tbp[i]);
1853 break;
1854 case NDTPA_APP_PROBES:
1855 p->app_probes = nla_get_u32(tbp[i]);
1856 break;
1857 case NDTPA_UCAST_PROBES:
1858 p->ucast_probes = nla_get_u32(tbp[i]);
1859 break;
1860 case NDTPA_MCAST_PROBES:
1861 p->mcast_probes = nla_get_u32(tbp[i]);
1862 break;
1863 case NDTPA_BASE_REACHABLE_TIME:
1864 p->base_reachable_time = nla_get_msecs(tbp[i]);
1865 break;
1866 case NDTPA_GC_STALETIME:
1867 p->gc_staletime = nla_get_msecs(tbp[i]);
1868 break;
1869 case NDTPA_DELAY_PROBE_TIME:
1870 p->delay_probe_time = nla_get_msecs(tbp[i]);
1871 break;
1872 case NDTPA_RETRANS_TIME:
1873 p->retrans_time = nla_get_msecs(tbp[i]);
1874 break;
1875 case NDTPA_ANYCAST_DELAY:
1876 p->anycast_delay = nla_get_msecs(tbp[i]);
1877 break;
1878 case NDTPA_PROXY_DELAY:
1879 p->proxy_delay = nla_get_msecs(tbp[i]);
1880 break;
1881 case NDTPA_LOCKTIME:
1882 p->locktime = nla_get_msecs(tbp[i]);
1883 break;
1884 }
1885 }
1886 }
1887
1888 if (tb[NDTA_THRESH1])
1889 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1890
1891 if (tb[NDTA_THRESH2])
1892 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1893
1894 if (tb[NDTA_THRESH3])
1895 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1896
1897 if (tb[NDTA_GC_INTERVAL])
1898 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1899
1900 err = 0;
1901
1902 errout_tbl_lock:
1903 write_unlock_bh(&tbl->lock);
1904 errout_locked:
1905 read_unlock(&neigh_tbl_lock);
1906 errout:
1907 return err;
1908 }
1909
1910 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1911 {
1912 int family, tidx, nidx = 0;
1913 int tbl_skip = cb->args[0];
1914 int neigh_skip = cb->args[1];
1915 struct neigh_table *tbl;
1916
1917 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1918
1919 read_lock(&neigh_tbl_lock);
1920 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1921 struct neigh_parms *p;
1922
1923 if (tidx < tbl_skip || (family && tbl->family != family))
1924 continue;
1925
1926 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1927 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1928 NLM_F_MULTI) <= 0)
1929 break;
1930
1931 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1932 if (nidx < neigh_skip)
1933 continue;
1934
1935 if (neightbl_fill_param_info(skb, tbl, p,
1936 NETLINK_CB(cb->skb).pid,
1937 cb->nlh->nlmsg_seq,
1938 RTM_NEWNEIGHTBL,
1939 NLM_F_MULTI) <= 0)
1940 goto out;
1941 }
1942
1943 neigh_skip = 0;
1944 }
1945 out:
1946 read_unlock(&neigh_tbl_lock);
1947 cb->args[0] = tidx;
1948 cb->args[1] = nidx;
1949
1950 return skb->len;
1951 }
1952
1953 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1954 u32 pid, u32 seq, int type, unsigned int flags)
1955 {
1956 unsigned long now = jiffies;
1957 struct nda_cacheinfo ci;
1958 struct nlmsghdr *nlh;
1959 struct ndmsg *ndm;
1960
1961 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1962 if (nlh == NULL)
1963 return -EMSGSIZE;
1964
1965 ndm = nlmsg_data(nlh);
1966 ndm->ndm_family = neigh->ops->family;
1967 ndm->ndm_pad1 = 0;
1968 ndm->ndm_pad2 = 0;
1969 ndm->ndm_flags = neigh->flags;
1970 ndm->ndm_type = neigh->type;
1971 ndm->ndm_ifindex = neigh->dev->ifindex;
1972
1973 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1974
1975 read_lock_bh(&neigh->lock);
1976 ndm->ndm_state = neigh->nud_state;
1977 if ((neigh->nud_state & NUD_VALID) &&
1978 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1979 read_unlock_bh(&neigh->lock);
1980 goto nla_put_failure;
1981 }
1982
1983 ci.ndm_used = now - neigh->used;
1984 ci.ndm_confirmed = now - neigh->confirmed;
1985 ci.ndm_updated = now - neigh->updated;
1986 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
1987 read_unlock_bh(&neigh->lock);
1988
1989 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
1990 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1991
1992 return nlmsg_end(skb, nlh);
1993
1994 nla_put_failure:
1995 nlmsg_cancel(skb, nlh);
1996 return -EMSGSIZE;
1997 }
1998
1999 static void neigh_update_notify(struct neighbour *neigh)
2000 {
2001 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2002 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2003 }
2004
2005 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2006 struct netlink_callback *cb)
2007 {
2008 struct neighbour *n;
2009 int rc, h, s_h = cb->args[1];
2010 int idx, s_idx = idx = cb->args[2];
2011
2012 read_lock_bh(&tbl->lock);
2013 for (h = 0; h <= tbl->hash_mask; h++) {
2014 if (h < s_h)
2015 continue;
2016 if (h > s_h)
2017 s_idx = 0;
2018 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2019 if (idx < s_idx)
2020 continue;
2021 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2022 cb->nlh->nlmsg_seq,
2023 RTM_NEWNEIGH,
2024 NLM_F_MULTI) <= 0) {
2025 read_unlock_bh(&tbl->lock);
2026 rc = -1;
2027 goto out;
2028 }
2029 }
2030 }
2031 read_unlock_bh(&tbl->lock);
2032 rc = skb->len;
2033 out:
2034 cb->args[1] = h;
2035 cb->args[2] = idx;
2036 return rc;
2037 }
2038
2039 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2040 {
2041 struct neigh_table *tbl;
2042 int t, family, s_t;
2043
2044 read_lock(&neigh_tbl_lock);
2045 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2046 s_t = cb->args[0];
2047
2048 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2049 if (t < s_t || (family && tbl->family != family))
2050 continue;
2051 if (t > s_t)
2052 memset(&cb->args[1], 0, sizeof(cb->args) -
2053 sizeof(cb->args[0]));
2054 if (neigh_dump_table(tbl, skb, cb) < 0)
2055 break;
2056 }
2057 read_unlock(&neigh_tbl_lock);
2058
2059 cb->args[0] = t;
2060 return skb->len;
2061 }
2062
2063 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2064 {
2065 int chain;
2066
2067 read_lock_bh(&tbl->lock);
2068 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2069 struct neighbour *n;
2070
2071 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2072 cb(n, cookie);
2073 }
2074 read_unlock_bh(&tbl->lock);
2075 }
2076 EXPORT_SYMBOL(neigh_for_each);
2077
2078 /* The tbl->lock must be held as a writer and BH disabled. */
2079 void __neigh_for_each_release(struct neigh_table *tbl,
2080 int (*cb)(struct neighbour *))
2081 {
2082 int chain;
2083
2084 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2085 struct neighbour *n, **np;
2086
2087 np = &tbl->hash_buckets[chain];
2088 while ((n = *np) != NULL) {
2089 int release;
2090
2091 write_lock(&n->lock);
2092 release = cb(n);
2093 if (release) {
2094 *np = n->next;
2095 n->dead = 1;
2096 } else
2097 np = &n->next;
2098 write_unlock(&n->lock);
2099 if (release)
2100 neigh_cleanup_and_release(n);
2101 }
2102 }
2103 }
2104 EXPORT_SYMBOL(__neigh_for_each_release);
2105
2106 #ifdef CONFIG_PROC_FS
2107
2108 static struct neighbour *neigh_get_first(struct seq_file *seq)
2109 {
2110 struct neigh_seq_state *state = seq->private;
2111 struct neigh_table *tbl = state->tbl;
2112 struct neighbour *n = NULL;
2113 int bucket = state->bucket;
2114
2115 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2116 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2117 n = tbl->hash_buckets[bucket];
2118
2119 while (n) {
2120 if (state->neigh_sub_iter) {
2121 loff_t fakep = 0;
2122 void *v;
2123
2124 v = state->neigh_sub_iter(state, n, &fakep);
2125 if (!v)
2126 goto next;
2127 }
2128 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2129 break;
2130 if (n->nud_state & ~NUD_NOARP)
2131 break;
2132 next:
2133 n = n->next;
2134 }
2135
2136 if (n)
2137 break;
2138 }
2139 state->bucket = bucket;
2140
2141 return n;
2142 }
2143
2144 static struct neighbour *neigh_get_next(struct seq_file *seq,
2145 struct neighbour *n,
2146 loff_t *pos)
2147 {
2148 struct neigh_seq_state *state = seq->private;
2149 struct neigh_table *tbl = state->tbl;
2150
2151 if (state->neigh_sub_iter) {
2152 void *v = state->neigh_sub_iter(state, n, pos);
2153 if (v)
2154 return n;
2155 }
2156 n = n->next;
2157
2158 while (1) {
2159 while (n) {
2160 if (state->neigh_sub_iter) {
2161 void *v = state->neigh_sub_iter(state, n, pos);
2162 if (v)
2163 return n;
2164 goto next;
2165 }
2166 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2167 break;
2168
2169 if (n->nud_state & ~NUD_NOARP)
2170 break;
2171 next:
2172 n = n->next;
2173 }
2174
2175 if (n)
2176 break;
2177
2178 if (++state->bucket > tbl->hash_mask)
2179 break;
2180
2181 n = tbl->hash_buckets[state->bucket];
2182 }
2183
2184 if (n && pos)
2185 --(*pos);
2186 return n;
2187 }
2188
2189 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2190 {
2191 struct neighbour *n = neigh_get_first(seq);
2192
2193 if (n) {
2194 while (*pos) {
2195 n = neigh_get_next(seq, n, pos);
2196 if (!n)
2197 break;
2198 }
2199 }
2200 return *pos ? NULL : n;
2201 }
2202
2203 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2204 {
2205 struct neigh_seq_state *state = seq->private;
2206 struct neigh_table *tbl = state->tbl;
2207 struct pneigh_entry *pn = NULL;
2208 int bucket = state->bucket;
2209
2210 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2211 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2212 pn = tbl->phash_buckets[bucket];
2213 if (pn)
2214 break;
2215 }
2216 state->bucket = bucket;
2217
2218 return pn;
2219 }
2220
2221 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2222 struct pneigh_entry *pn,
2223 loff_t *pos)
2224 {
2225 struct neigh_seq_state *state = seq->private;
2226 struct neigh_table *tbl = state->tbl;
2227
2228 pn = pn->next;
2229 while (!pn) {
2230 if (++state->bucket > PNEIGH_HASHMASK)
2231 break;
2232 pn = tbl->phash_buckets[state->bucket];
2233 if (pn)
2234 break;
2235 }
2236
2237 if (pn && pos)
2238 --(*pos);
2239
2240 return pn;
2241 }
2242
2243 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2244 {
2245 struct pneigh_entry *pn = pneigh_get_first(seq);
2246
2247 if (pn) {
2248 while (*pos) {
2249 pn = pneigh_get_next(seq, pn, pos);
2250 if (!pn)
2251 break;
2252 }
2253 }
2254 return *pos ? NULL : pn;
2255 }
2256
2257 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2258 {
2259 struct neigh_seq_state *state = seq->private;
2260 void *rc;
2261
2262 rc = neigh_get_idx(seq, pos);
2263 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2264 rc = pneigh_get_idx(seq, pos);
2265
2266 return rc;
2267 }
2268
2269 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2270 {
2271 struct neigh_seq_state *state = seq->private;
2272 loff_t pos_minus_one;
2273
2274 state->tbl = tbl;
2275 state->bucket = 0;
2276 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2277
2278 read_lock_bh(&tbl->lock);
2279
2280 pos_minus_one = *pos - 1;
2281 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2282 }
2283 EXPORT_SYMBOL(neigh_seq_start);
2284
2285 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2286 {
2287 struct neigh_seq_state *state;
2288 void *rc;
2289
2290 if (v == SEQ_START_TOKEN) {
2291 rc = neigh_get_idx(seq, pos);
2292 goto out;
2293 }
2294
2295 state = seq->private;
2296 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2297 rc = neigh_get_next(seq, v, NULL);
2298 if (rc)
2299 goto out;
2300 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2301 rc = pneigh_get_first(seq);
2302 } else {
2303 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2304 rc = pneigh_get_next(seq, v, NULL);
2305 }
2306 out:
2307 ++(*pos);
2308 return rc;
2309 }
2310 EXPORT_SYMBOL(neigh_seq_next);
2311
2312 void neigh_seq_stop(struct seq_file *seq, void *v)
2313 {
2314 struct neigh_seq_state *state = seq->private;
2315 struct neigh_table *tbl = state->tbl;
2316
2317 read_unlock_bh(&tbl->lock);
2318 }
2319 EXPORT_SYMBOL(neigh_seq_stop);
2320
2321 /* statistics via seq_file */
2322
2323 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2324 {
2325 struct proc_dir_entry *pde = seq->private;
2326 struct neigh_table *tbl = pde->data;
2327 int cpu;
2328
2329 if (*pos == 0)
2330 return SEQ_START_TOKEN;
2331
2332 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2333 if (!cpu_possible(cpu))
2334 continue;
2335 *pos = cpu+1;
2336 return per_cpu_ptr(tbl->stats, cpu);
2337 }
2338 return NULL;
2339 }
2340
2341 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2342 {
2343 struct proc_dir_entry *pde = seq->private;
2344 struct neigh_table *tbl = pde->data;
2345 int cpu;
2346
2347 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2348 if (!cpu_possible(cpu))
2349 continue;
2350 *pos = cpu+1;
2351 return per_cpu_ptr(tbl->stats, cpu);
2352 }
2353 return NULL;
2354 }
2355
2356 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2357 {
2358
2359 }
2360
2361 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2362 {
2363 struct proc_dir_entry *pde = seq->private;
2364 struct neigh_table *tbl = pde->data;
2365 struct neigh_statistics *st = v;
2366
2367 if (v == SEQ_START_TOKEN) {
2368 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2369 return 0;
2370 }
2371
2372 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2373 "%08lx %08lx %08lx %08lx\n",
2374 atomic_read(&tbl->entries),
2375
2376 st->allocs,
2377 st->destroys,
2378 st->hash_grows,
2379
2380 st->lookups,
2381 st->hits,
2382
2383 st->res_failed,
2384
2385 st->rcv_probes_mcast,
2386 st->rcv_probes_ucast,
2387
2388 st->periodic_gc_runs,
2389 st->forced_gc_runs
2390 );
2391
2392 return 0;
2393 }
2394
2395 static const struct seq_operations neigh_stat_seq_ops = {
2396 .start = neigh_stat_seq_start,
2397 .next = neigh_stat_seq_next,
2398 .stop = neigh_stat_seq_stop,
2399 .show = neigh_stat_seq_show,
2400 };
2401
2402 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2403 {
2404 int ret = seq_open(file, &neigh_stat_seq_ops);
2405
2406 if (!ret) {
2407 struct seq_file *sf = file->private_data;
2408 sf->private = PDE(inode);
2409 }
2410 return ret;
2411 };
2412
2413 static const struct file_operations neigh_stat_seq_fops = {
2414 .owner = THIS_MODULE,
2415 .open = neigh_stat_seq_open,
2416 .read = seq_read,
2417 .llseek = seq_lseek,
2418 .release = seq_release,
2419 };
2420
2421 #endif /* CONFIG_PROC_FS */
2422
2423 static inline size_t neigh_nlmsg_size(void)
2424 {
2425 return NLMSG_ALIGN(sizeof(struct ndmsg))
2426 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2427 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2428 + nla_total_size(sizeof(struct nda_cacheinfo))
2429 + nla_total_size(4); /* NDA_PROBES */
2430 }
2431
2432 static void __neigh_notify(struct neighbour *n, int type, int flags)
2433 {
2434 struct sk_buff *skb;
2435 int err = -ENOBUFS;
2436
2437 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2438 if (skb == NULL)
2439 goto errout;
2440
2441 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2442 if (err < 0) {
2443 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2444 WARN_ON(err == -EMSGSIZE);
2445 kfree_skb(skb);
2446 goto errout;
2447 }
2448 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2449 errout:
2450 if (err < 0)
2451 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
2452 }
2453
2454 #ifdef CONFIG_ARPD
2455 void neigh_app_ns(struct neighbour *n)
2456 {
2457 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2458 }
2459 #endif /* CONFIG_ARPD */
2460
2461 #ifdef CONFIG_SYSCTL
2462
2463 static struct neigh_sysctl_table {
2464 struct ctl_table_header *sysctl_header;
2465 ctl_table neigh_vars[__NET_NEIGH_MAX];
2466 ctl_table neigh_dev[2];
2467 ctl_table neigh_neigh_dir[2];
2468 ctl_table neigh_proto_dir[2];
2469 ctl_table neigh_root_dir[2];
2470 } neigh_sysctl_template __read_mostly = {
2471 .neigh_vars = {
2472 {
2473 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2474 .procname = "mcast_solicit",
2475 .maxlen = sizeof(int),
2476 .mode = 0644,
2477 .proc_handler = &proc_dointvec,
2478 },
2479 {
2480 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2481 .procname = "ucast_solicit",
2482 .maxlen = sizeof(int),
2483 .mode = 0644,
2484 .proc_handler = &proc_dointvec,
2485 },
2486 {
2487 .ctl_name = NET_NEIGH_APP_SOLICIT,
2488 .procname = "app_solicit",
2489 .maxlen = sizeof(int),
2490 .mode = 0644,
2491 .proc_handler = &proc_dointvec,
2492 },
2493 {
2494 .ctl_name = NET_NEIGH_RETRANS_TIME,
2495 .procname = "retrans_time",
2496 .maxlen = sizeof(int),
2497 .mode = 0644,
2498 .proc_handler = &proc_dointvec_userhz_jiffies,
2499 },
2500 {
2501 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2502 .procname = "base_reachable_time",
2503 .maxlen = sizeof(int),
2504 .mode = 0644,
2505 .proc_handler = &proc_dointvec_jiffies,
2506 .strategy = &sysctl_jiffies,
2507 },
2508 {
2509 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2510 .procname = "delay_first_probe_time",
2511 .maxlen = sizeof(int),
2512 .mode = 0644,
2513 .proc_handler = &proc_dointvec_jiffies,
2514 .strategy = &sysctl_jiffies,
2515 },
2516 {
2517 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2518 .procname = "gc_stale_time",
2519 .maxlen = sizeof(int),
2520 .mode = 0644,
2521 .proc_handler = &proc_dointvec_jiffies,
2522 .strategy = &sysctl_jiffies,
2523 },
2524 {
2525 .ctl_name = NET_NEIGH_UNRES_QLEN,
2526 .procname = "unres_qlen",
2527 .maxlen = sizeof(int),
2528 .mode = 0644,
2529 .proc_handler = &proc_dointvec,
2530 },
2531 {
2532 .ctl_name = NET_NEIGH_PROXY_QLEN,
2533 .procname = "proxy_qlen",
2534 .maxlen = sizeof(int),
2535 .mode = 0644,
2536 .proc_handler = &proc_dointvec,
2537 },
2538 {
2539 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2540 .procname = "anycast_delay",
2541 .maxlen = sizeof(int),
2542 .mode = 0644,
2543 .proc_handler = &proc_dointvec_userhz_jiffies,
2544 },
2545 {
2546 .ctl_name = NET_NEIGH_PROXY_DELAY,
2547 .procname = "proxy_delay",
2548 .maxlen = sizeof(int),
2549 .mode = 0644,
2550 .proc_handler = &proc_dointvec_userhz_jiffies,
2551 },
2552 {
2553 .ctl_name = NET_NEIGH_LOCKTIME,
2554 .procname = "locktime",
2555 .maxlen = sizeof(int),
2556 .mode = 0644,
2557 .proc_handler = &proc_dointvec_userhz_jiffies,
2558 },
2559 {
2560 .ctl_name = NET_NEIGH_GC_INTERVAL,
2561 .procname = "gc_interval",
2562 .maxlen = sizeof(int),
2563 .mode = 0644,
2564 .proc_handler = &proc_dointvec_jiffies,
2565 .strategy = &sysctl_jiffies,
2566 },
2567 {
2568 .ctl_name = NET_NEIGH_GC_THRESH1,
2569 .procname = "gc_thresh1",
2570 .maxlen = sizeof(int),
2571 .mode = 0644,
2572 .proc_handler = &proc_dointvec,
2573 },
2574 {
2575 .ctl_name = NET_NEIGH_GC_THRESH2,
2576 .procname = "gc_thresh2",
2577 .maxlen = sizeof(int),
2578 .mode = 0644,
2579 .proc_handler = &proc_dointvec,
2580 },
2581 {
2582 .ctl_name = NET_NEIGH_GC_THRESH3,
2583 .procname = "gc_thresh3",
2584 .maxlen = sizeof(int),
2585 .mode = 0644,
2586 .proc_handler = &proc_dointvec,
2587 },
2588 {
2589 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2590 .procname = "retrans_time_ms",
2591 .maxlen = sizeof(int),
2592 .mode = 0644,
2593 .proc_handler = &proc_dointvec_ms_jiffies,
2594 .strategy = &sysctl_ms_jiffies,
2595 },
2596 {
2597 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2598 .procname = "base_reachable_time_ms",
2599 .maxlen = sizeof(int),
2600 .mode = 0644,
2601 .proc_handler = &proc_dointvec_ms_jiffies,
2602 .strategy = &sysctl_ms_jiffies,
2603 },
2604 },
2605 .neigh_dev = {
2606 {
2607 .ctl_name = NET_PROTO_CONF_DEFAULT,
2608 .procname = "default",
2609 .mode = 0555,
2610 },
2611 },
2612 .neigh_neigh_dir = {
2613 {
2614 .procname = "neigh",
2615 .mode = 0555,
2616 },
2617 },
2618 .neigh_proto_dir = {
2619 {
2620 .mode = 0555,
2621 },
2622 },
2623 .neigh_root_dir = {
2624 {
2625 .ctl_name = CTL_NET,
2626 .procname = "net",
2627 .mode = 0555,
2628 },
2629 },
2630 };
2631
2632 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2633 int p_id, int pdev_id, char *p_name,
2634 proc_handler *handler, ctl_handler *strategy)
2635 {
2636 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
2637 sizeof(*t), GFP_KERNEL);
2638 const char *dev_name_source = NULL;
2639 char *dev_name = NULL;
2640 int err = 0;
2641
2642 if (!t)
2643 return -ENOBUFS;
2644 t->neigh_vars[0].data = &p->mcast_probes;
2645 t->neigh_vars[1].data = &p->ucast_probes;
2646 t->neigh_vars[2].data = &p->app_probes;
2647 t->neigh_vars[3].data = &p->retrans_time;
2648 t->neigh_vars[4].data = &p->base_reachable_time;
2649 t->neigh_vars[5].data = &p->delay_probe_time;
2650 t->neigh_vars[6].data = &p->gc_staletime;
2651 t->neigh_vars[7].data = &p->queue_len;
2652 t->neigh_vars[8].data = &p->proxy_qlen;
2653 t->neigh_vars[9].data = &p->anycast_delay;
2654 t->neigh_vars[10].data = &p->proxy_delay;
2655 t->neigh_vars[11].data = &p->locktime;
2656
2657 if (dev) {
2658 dev_name_source = dev->name;
2659 t->neigh_dev[0].ctl_name = dev->ifindex;
2660 t->neigh_vars[12].procname = NULL;
2661 t->neigh_vars[13].procname = NULL;
2662 t->neigh_vars[14].procname = NULL;
2663 t->neigh_vars[15].procname = NULL;
2664 } else {
2665 dev_name_source = t->neigh_dev[0].procname;
2666 t->neigh_vars[12].data = (int *)(p + 1);
2667 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2668 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2669 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2670 }
2671
2672 t->neigh_vars[16].data = &p->retrans_time;
2673 t->neigh_vars[17].data = &p->base_reachable_time;
2674
2675 if (handler || strategy) {
2676 /* RetransTime */
2677 t->neigh_vars[3].proc_handler = handler;
2678 t->neigh_vars[3].strategy = strategy;
2679 t->neigh_vars[3].extra1 = dev;
2680 /* ReachableTime */
2681 t->neigh_vars[4].proc_handler = handler;
2682 t->neigh_vars[4].strategy = strategy;
2683 t->neigh_vars[4].extra1 = dev;
2684 /* RetransTime (in milliseconds)*/
2685 t->neigh_vars[16].proc_handler = handler;
2686 t->neigh_vars[16].strategy = strategy;
2687 t->neigh_vars[16].extra1 = dev;
2688 /* ReachableTime (in milliseconds) */
2689 t->neigh_vars[17].proc_handler = handler;
2690 t->neigh_vars[17].strategy = strategy;
2691 t->neigh_vars[17].extra1 = dev;
2692 }
2693
2694 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2695 if (!dev_name) {
2696 err = -ENOBUFS;
2697 goto free;
2698 }
2699
2700 t->neigh_dev[0].procname = dev_name;
2701
2702 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2703
2704 t->neigh_proto_dir[0].procname = p_name;
2705 t->neigh_proto_dir[0].ctl_name = p_id;
2706
2707 t->neigh_dev[0].child = t->neigh_vars;
2708 t->neigh_neigh_dir[0].child = t->neigh_dev;
2709 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2710 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2711
2712 t->sysctl_header = register_sysctl_table(t->neigh_root_dir);
2713 if (!t->sysctl_header) {
2714 err = -ENOBUFS;
2715 goto free_procname;
2716 }
2717 p->sysctl_table = t;
2718 return 0;
2719
2720 /* error path */
2721 free_procname:
2722 kfree(dev_name);
2723 free:
2724 kfree(t);
2725
2726 return err;
2727 }
2728
2729 void neigh_sysctl_unregister(struct neigh_parms *p)
2730 {
2731 if (p->sysctl_table) {
2732 struct neigh_sysctl_table *t = p->sysctl_table;
2733 p->sysctl_table = NULL;
2734 unregister_sysctl_table(t->sysctl_header);
2735 kfree(t->neigh_dev[0].procname);
2736 kfree(t);
2737 }
2738 }
2739
2740 #endif /* CONFIG_SYSCTL */
2741
2742 static int __init neigh_init(void)
2743 {
2744 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2745 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2746 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2747
2748 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2749 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2750
2751 return 0;
2752 }
2753
2754 subsys_initcall(neigh_init);
2755
2756 EXPORT_SYMBOL(__neigh_event_send);
2757 EXPORT_SYMBOL(neigh_changeaddr);
2758 EXPORT_SYMBOL(neigh_compat_output);
2759 EXPORT_SYMBOL(neigh_connected_output);
2760 EXPORT_SYMBOL(neigh_create);
2761 EXPORT_SYMBOL(neigh_destroy);
2762 EXPORT_SYMBOL(neigh_event_ns);
2763 EXPORT_SYMBOL(neigh_ifdown);
2764 EXPORT_SYMBOL(neigh_lookup);
2765 EXPORT_SYMBOL(neigh_lookup_nodev);
2766 EXPORT_SYMBOL(neigh_parms_alloc);
2767 EXPORT_SYMBOL(neigh_parms_release);
2768 EXPORT_SYMBOL(neigh_rand_reach_time);
2769 EXPORT_SYMBOL(neigh_resolve_output);
2770 EXPORT_SYMBOL(neigh_table_clear);
2771 EXPORT_SYMBOL(neigh_table_init);
2772 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2773 EXPORT_SYMBOL(neigh_update);
2774 EXPORT_SYMBOL(pneigh_enqueue);
2775 EXPORT_SYMBOL(pneigh_lookup);
2776
2777 #ifdef CONFIG_ARPD
2778 EXPORT_SYMBOL(neigh_app_ns);
2779 #endif
2780 #ifdef CONFIG_SYSCTL
2781 EXPORT_SYMBOL(neigh_sysctl_register);
2782 EXPORT_SYMBOL(neigh_sysctl_unregister);
2783 #endif
This page took 0.097231 seconds and 5 git commands to generate.