[NET]: Modify all rtnetlink methods to only work in the initial namespace (v2)
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK 0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
68
69 /*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100 static DEFINE_RWLOCK(neigh_tbl_lock);
101
102 static int neigh_blackhole(struct sk_buff *skb)
103 {
104 kfree_skb(skb);
105 return -ENETDOWN;
106 }
107
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 {
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
114 neigh_release(neigh);
115 }
116
117 /*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123 unsigned long neigh_rand_reach_time(unsigned long base)
124 {
125 return (base ? (net_random() % base) + (base >> 1) : 0);
126 }
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166 }
167
168 static int neigh_del_timer(struct neighbour *n)
169 {
170 if ((n->nud_state & NUD_IN_TIMER) &&
171 del_timer(&n->timer)) {
172 neigh_release(n);
173 return 1;
174 }
175 return 0;
176 }
177
178 static void pneigh_queue_purge(struct sk_buff_head *list)
179 {
180 struct sk_buff *skb;
181
182 while ((skb = skb_dequeue(list)) != NULL) {
183 dev_put(skb->dev);
184 kfree_skb(skb);
185 }
186 }
187
188 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
189 {
190 int i;
191
192 for (i = 0; i <= tbl->hash_mask; i++) {
193 struct neighbour *n, **np = &tbl->hash_buckets[i];
194
195 while ((n = *np) != NULL) {
196 if (dev && n->dev != dev) {
197 np = &n->next;
198 continue;
199 }
200 *np = n->next;
201 write_lock(&n->lock);
202 neigh_del_timer(n);
203 n->dead = 1;
204
205 if (atomic_read(&n->refcnt) != 1) {
206 /* The most unpleasant situation.
207 We must destroy neighbour entry,
208 but someone still uses it.
209
210 The destroy will be delayed until
211 the last user releases us, but
212 we must kill timers etc. and move
213 it to safe state.
214 */
215 skb_queue_purge(&n->arp_queue);
216 n->output = neigh_blackhole;
217 if (n->nud_state & NUD_VALID)
218 n->nud_state = NUD_NOARP;
219 else
220 n->nud_state = NUD_NONE;
221 NEIGH_PRINTK2("neigh %p is stray.\n", n);
222 }
223 write_unlock(&n->lock);
224 neigh_cleanup_and_release(n);
225 }
226 }
227 }
228
229 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
230 {
231 write_lock_bh(&tbl->lock);
232 neigh_flush_dev(tbl, dev);
233 write_unlock_bh(&tbl->lock);
234 }
235
236 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
237 {
238 write_lock_bh(&tbl->lock);
239 neigh_flush_dev(tbl, dev);
240 pneigh_ifdown(tbl, dev);
241 write_unlock_bh(&tbl->lock);
242
243 del_timer_sync(&tbl->proxy_timer);
244 pneigh_queue_purge(&tbl->proxy_queue);
245 return 0;
246 }
247
248 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
249 {
250 struct neighbour *n = NULL;
251 unsigned long now = jiffies;
252 int entries;
253
254 entries = atomic_inc_return(&tbl->entries) - 1;
255 if (entries >= tbl->gc_thresh3 ||
256 (entries >= tbl->gc_thresh2 &&
257 time_after(now, tbl->last_flush + 5 * HZ))) {
258 if (!neigh_forced_gc(tbl) &&
259 entries >= tbl->gc_thresh3)
260 goto out_entries;
261 }
262
263 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
264 if (!n)
265 goto out_entries;
266
267 skb_queue_head_init(&n->arp_queue);
268 rwlock_init(&n->lock);
269 n->updated = n->used = now;
270 n->nud_state = NUD_NONE;
271 n->output = neigh_blackhole;
272 n->parms = neigh_parms_clone(&tbl->parms);
273 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
274
275 NEIGH_CACHE_STAT_INC(tbl, allocs);
276 n->tbl = tbl;
277 atomic_set(&n->refcnt, 1);
278 n->dead = 1;
279 out:
280 return n;
281
282 out_entries:
283 atomic_dec(&tbl->entries);
284 goto out;
285 }
286
287 static struct neighbour **neigh_hash_alloc(unsigned int entries)
288 {
289 unsigned long size = entries * sizeof(struct neighbour *);
290 struct neighbour **ret;
291
292 if (size <= PAGE_SIZE) {
293 ret = kzalloc(size, GFP_ATOMIC);
294 } else {
295 ret = (struct neighbour **)
296 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
297 }
298 return ret;
299 }
300
301 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
302 {
303 unsigned long size = entries * sizeof(struct neighbour *);
304
305 if (size <= PAGE_SIZE)
306 kfree(hash);
307 else
308 free_pages((unsigned long)hash, get_order(size));
309 }
310
311 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
312 {
313 struct neighbour **new_hash, **old_hash;
314 unsigned int i, new_hash_mask, old_entries;
315
316 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
317
318 BUG_ON(!is_power_of_2(new_entries));
319 new_hash = neigh_hash_alloc(new_entries);
320 if (!new_hash)
321 return;
322
323 old_entries = tbl->hash_mask + 1;
324 new_hash_mask = new_entries - 1;
325 old_hash = tbl->hash_buckets;
326
327 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
328 for (i = 0; i < old_entries; i++) {
329 struct neighbour *n, *next;
330
331 for (n = old_hash[i]; n; n = next) {
332 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
333
334 hash_val &= new_hash_mask;
335 next = n->next;
336
337 n->next = new_hash[hash_val];
338 new_hash[hash_val] = n;
339 }
340 }
341 tbl->hash_buckets = new_hash;
342 tbl->hash_mask = new_hash_mask;
343
344 neigh_hash_free(old_hash, old_entries);
345 }
346
347 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
348 struct net_device *dev)
349 {
350 struct neighbour *n;
351 int key_len = tbl->key_len;
352 u32 hash_val = tbl->hash(pkey, dev);
353
354 NEIGH_CACHE_STAT_INC(tbl, lookups);
355
356 read_lock_bh(&tbl->lock);
357 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
358 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
359 neigh_hold(n);
360 NEIGH_CACHE_STAT_INC(tbl, hits);
361 break;
362 }
363 }
364 read_unlock_bh(&tbl->lock);
365 return n;
366 }
367
368 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
369 {
370 struct neighbour *n;
371 int key_len = tbl->key_len;
372 u32 hash_val = tbl->hash(pkey, NULL);
373
374 NEIGH_CACHE_STAT_INC(tbl, lookups);
375
376 read_lock_bh(&tbl->lock);
377 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
378 if (!memcmp(n->primary_key, pkey, key_len)) {
379 neigh_hold(n);
380 NEIGH_CACHE_STAT_INC(tbl, hits);
381 break;
382 }
383 }
384 read_unlock_bh(&tbl->lock);
385 return n;
386 }
387
388 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
389 struct net_device *dev)
390 {
391 u32 hash_val;
392 int key_len = tbl->key_len;
393 int error;
394 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
395
396 if (!n) {
397 rc = ERR_PTR(-ENOBUFS);
398 goto out;
399 }
400
401 memcpy(n->primary_key, pkey, key_len);
402 n->dev = dev;
403 dev_hold(dev);
404
405 /* Protocol specific setup. */
406 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
407 rc = ERR_PTR(error);
408 goto out_neigh_release;
409 }
410
411 /* Device specific setup. */
412 if (n->parms->neigh_setup &&
413 (error = n->parms->neigh_setup(n)) < 0) {
414 rc = ERR_PTR(error);
415 goto out_neigh_release;
416 }
417
418 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
419
420 write_lock_bh(&tbl->lock);
421
422 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
423 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
424
425 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
426
427 if (n->parms->dead) {
428 rc = ERR_PTR(-EINVAL);
429 goto out_tbl_unlock;
430 }
431
432 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
433 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
434 neigh_hold(n1);
435 rc = n1;
436 goto out_tbl_unlock;
437 }
438 }
439
440 n->next = tbl->hash_buckets[hash_val];
441 tbl->hash_buckets[hash_val] = n;
442 n->dead = 0;
443 neigh_hold(n);
444 write_unlock_bh(&tbl->lock);
445 NEIGH_PRINTK2("neigh %p is created.\n", n);
446 rc = n;
447 out:
448 return rc;
449 out_tbl_unlock:
450 write_unlock_bh(&tbl->lock);
451 out_neigh_release:
452 neigh_release(n);
453 goto out;
454 }
455
456 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
457 struct net_device *dev, int creat)
458 {
459 struct pneigh_entry *n;
460 int key_len = tbl->key_len;
461 u32 hash_val = *(u32 *)(pkey + key_len - 4);
462
463 hash_val ^= (hash_val >> 16);
464 hash_val ^= hash_val >> 8;
465 hash_val ^= hash_val >> 4;
466 hash_val &= PNEIGH_HASHMASK;
467
468 read_lock_bh(&tbl->lock);
469
470 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
471 if (!memcmp(n->key, pkey, key_len) &&
472 (n->dev == dev || !n->dev)) {
473 read_unlock_bh(&tbl->lock);
474 goto out;
475 }
476 }
477 read_unlock_bh(&tbl->lock);
478 n = NULL;
479 if (!creat)
480 goto out;
481
482 ASSERT_RTNL();
483
484 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
485 if (!n)
486 goto out;
487
488 memcpy(n->key, pkey, key_len);
489 n->dev = dev;
490 if (dev)
491 dev_hold(dev);
492
493 if (tbl->pconstructor && tbl->pconstructor(n)) {
494 if (dev)
495 dev_put(dev);
496 kfree(n);
497 n = NULL;
498 goto out;
499 }
500
501 write_lock_bh(&tbl->lock);
502 n->next = tbl->phash_buckets[hash_val];
503 tbl->phash_buckets[hash_val] = n;
504 write_unlock_bh(&tbl->lock);
505 out:
506 return n;
507 }
508
509
510 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
511 struct net_device *dev)
512 {
513 struct pneigh_entry *n, **np;
514 int key_len = tbl->key_len;
515 u32 hash_val = *(u32 *)(pkey + key_len - 4);
516
517 hash_val ^= (hash_val >> 16);
518 hash_val ^= hash_val >> 8;
519 hash_val ^= hash_val >> 4;
520 hash_val &= PNEIGH_HASHMASK;
521
522 write_lock_bh(&tbl->lock);
523 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
524 np = &n->next) {
525 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
526 *np = n->next;
527 write_unlock_bh(&tbl->lock);
528 if (tbl->pdestructor)
529 tbl->pdestructor(n);
530 if (n->dev)
531 dev_put(n->dev);
532 kfree(n);
533 return 0;
534 }
535 }
536 write_unlock_bh(&tbl->lock);
537 return -ENOENT;
538 }
539
540 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
541 {
542 struct pneigh_entry *n, **np;
543 u32 h;
544
545 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
546 np = &tbl->phash_buckets[h];
547 while ((n = *np) != NULL) {
548 if (!dev || n->dev == dev) {
549 *np = n->next;
550 if (tbl->pdestructor)
551 tbl->pdestructor(n);
552 if (n->dev)
553 dev_put(n->dev);
554 kfree(n);
555 continue;
556 }
557 np = &n->next;
558 }
559 }
560 return -ENOENT;
561 }
562
563
564 /*
565 * neighbour must already be out of the table;
566 *
567 */
568 void neigh_destroy(struct neighbour *neigh)
569 {
570 struct hh_cache *hh;
571
572 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
573
574 if (!neigh->dead) {
575 printk(KERN_WARNING
576 "Destroying alive neighbour %p\n", neigh);
577 dump_stack();
578 return;
579 }
580
581 if (neigh_del_timer(neigh))
582 printk(KERN_WARNING "Impossible event.\n");
583
584 while ((hh = neigh->hh) != NULL) {
585 neigh->hh = hh->hh_next;
586 hh->hh_next = NULL;
587
588 write_seqlock_bh(&hh->hh_lock);
589 hh->hh_output = neigh_blackhole;
590 write_sequnlock_bh(&hh->hh_lock);
591 if (atomic_dec_and_test(&hh->hh_refcnt))
592 kfree(hh);
593 }
594
595 skb_queue_purge(&neigh->arp_queue);
596
597 dev_put(neigh->dev);
598 neigh_parms_put(neigh->parms);
599
600 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
601
602 atomic_dec(&neigh->tbl->entries);
603 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
604 }
605
606 /* Neighbour state is suspicious;
607 disable fast path.
608
609 Called with write_locked neigh.
610 */
611 static void neigh_suspect(struct neighbour *neigh)
612 {
613 struct hh_cache *hh;
614
615 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
616
617 neigh->output = neigh->ops->output;
618
619 for (hh = neigh->hh; hh; hh = hh->hh_next)
620 hh->hh_output = neigh->ops->output;
621 }
622
623 /* Neighbour state is OK;
624 enable fast path.
625
626 Called with write_locked neigh.
627 */
628 static void neigh_connect(struct neighbour *neigh)
629 {
630 struct hh_cache *hh;
631
632 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
633
634 neigh->output = neigh->ops->connected_output;
635
636 for (hh = neigh->hh; hh; hh = hh->hh_next)
637 hh->hh_output = neigh->ops->hh_output;
638 }
639
640 static void neigh_periodic_timer(unsigned long arg)
641 {
642 struct neigh_table *tbl = (struct neigh_table *)arg;
643 struct neighbour *n, **np;
644 unsigned long expire, now = jiffies;
645
646 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
647
648 write_lock(&tbl->lock);
649
650 /*
651 * periodically recompute ReachableTime from random function
652 */
653
654 if (time_after(now, tbl->last_rand + 300 * HZ)) {
655 struct neigh_parms *p;
656 tbl->last_rand = now;
657 for (p = &tbl->parms; p; p = p->next)
658 p->reachable_time =
659 neigh_rand_reach_time(p->base_reachable_time);
660 }
661
662 np = &tbl->hash_buckets[tbl->hash_chain_gc];
663 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
664
665 while ((n = *np) != NULL) {
666 unsigned int state;
667
668 write_lock(&n->lock);
669
670 state = n->nud_state;
671 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
672 write_unlock(&n->lock);
673 goto next_elt;
674 }
675
676 if (time_before(n->used, n->confirmed))
677 n->used = n->confirmed;
678
679 if (atomic_read(&n->refcnt) == 1 &&
680 (state == NUD_FAILED ||
681 time_after(now, n->used + n->parms->gc_staletime))) {
682 *np = n->next;
683 n->dead = 1;
684 write_unlock(&n->lock);
685 neigh_cleanup_and_release(n);
686 continue;
687 }
688 write_unlock(&n->lock);
689
690 next_elt:
691 np = &n->next;
692 }
693
694 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
695 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
696 * base_reachable_time.
697 */
698 expire = tbl->parms.base_reachable_time >> 1;
699 expire /= (tbl->hash_mask + 1);
700 if (!expire)
701 expire = 1;
702
703 if (expire>HZ)
704 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
705 else
706 mod_timer(&tbl->gc_timer, now + expire);
707
708 write_unlock(&tbl->lock);
709 }
710
711 static __inline__ int neigh_max_probes(struct neighbour *n)
712 {
713 struct neigh_parms *p = n->parms;
714 return (n->nud_state & NUD_PROBE ?
715 p->ucast_probes :
716 p->ucast_probes + p->app_probes + p->mcast_probes);
717 }
718
719 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
720 {
721 if (unlikely(mod_timer(&n->timer, when))) {
722 printk("NEIGH: BUG, double timer add, state is %x\n",
723 n->nud_state);
724 dump_stack();
725 }
726 }
727
728 /* Called when a timer expires for a neighbour entry. */
729
730 static void neigh_timer_handler(unsigned long arg)
731 {
732 unsigned long now, next;
733 struct neighbour *neigh = (struct neighbour *)arg;
734 unsigned state;
735 int notify = 0;
736
737 write_lock(&neigh->lock);
738
739 state = neigh->nud_state;
740 now = jiffies;
741 next = now + HZ;
742
743 if (!(state & NUD_IN_TIMER)) {
744 #ifndef CONFIG_SMP
745 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
746 #endif
747 goto out;
748 }
749
750 if (state & NUD_REACHABLE) {
751 if (time_before_eq(now,
752 neigh->confirmed + neigh->parms->reachable_time)) {
753 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
754 next = neigh->confirmed + neigh->parms->reachable_time;
755 } else if (time_before_eq(now,
756 neigh->used + neigh->parms->delay_probe_time)) {
757 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
758 neigh->nud_state = NUD_DELAY;
759 neigh->updated = jiffies;
760 neigh_suspect(neigh);
761 next = now + neigh->parms->delay_probe_time;
762 } else {
763 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
764 neigh->nud_state = NUD_STALE;
765 neigh->updated = jiffies;
766 neigh_suspect(neigh);
767 notify = 1;
768 }
769 } else if (state & NUD_DELAY) {
770 if (time_before_eq(now,
771 neigh->confirmed + neigh->parms->delay_probe_time)) {
772 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
773 neigh->nud_state = NUD_REACHABLE;
774 neigh->updated = jiffies;
775 neigh_connect(neigh);
776 notify = 1;
777 next = neigh->confirmed + neigh->parms->reachable_time;
778 } else {
779 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
780 neigh->nud_state = NUD_PROBE;
781 neigh->updated = jiffies;
782 atomic_set(&neigh->probes, 0);
783 next = now + neigh->parms->retrans_time;
784 }
785 } else {
786 /* NUD_PROBE|NUD_INCOMPLETE */
787 next = now + neigh->parms->retrans_time;
788 }
789
790 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
791 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
792 struct sk_buff *skb;
793
794 neigh->nud_state = NUD_FAILED;
795 neigh->updated = jiffies;
796 notify = 1;
797 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
798 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
799
800 /* It is very thin place. report_unreachable is very complicated
801 routine. Particularly, it can hit the same neighbour entry!
802
803 So that, we try to be accurate and avoid dead loop. --ANK
804 */
805 while (neigh->nud_state == NUD_FAILED &&
806 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
807 write_unlock(&neigh->lock);
808 neigh->ops->error_report(neigh, skb);
809 write_lock(&neigh->lock);
810 }
811 skb_queue_purge(&neigh->arp_queue);
812 }
813
814 if (neigh->nud_state & NUD_IN_TIMER) {
815 if (time_before(next, jiffies + HZ/2))
816 next = jiffies + HZ/2;
817 if (!mod_timer(&neigh->timer, next))
818 neigh_hold(neigh);
819 }
820 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
821 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
822 /* keep skb alive even if arp_queue overflows */
823 if (skb)
824 skb_get(skb);
825 write_unlock(&neigh->lock);
826 neigh->ops->solicit(neigh, skb);
827 atomic_inc(&neigh->probes);
828 if (skb)
829 kfree_skb(skb);
830 } else {
831 out:
832 write_unlock(&neigh->lock);
833 }
834
835 if (notify)
836 neigh_update_notify(neigh);
837
838 neigh_release(neigh);
839 }
840
841 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
842 {
843 int rc;
844 unsigned long now;
845
846 write_lock_bh(&neigh->lock);
847
848 rc = 0;
849 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
850 goto out_unlock_bh;
851
852 now = jiffies;
853
854 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
855 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
856 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
857 neigh->nud_state = NUD_INCOMPLETE;
858 neigh->updated = jiffies;
859 neigh_hold(neigh);
860 neigh_add_timer(neigh, now + 1);
861 } else {
862 neigh->nud_state = NUD_FAILED;
863 neigh->updated = jiffies;
864 write_unlock_bh(&neigh->lock);
865
866 if (skb)
867 kfree_skb(skb);
868 return 1;
869 }
870 } else if (neigh->nud_state & NUD_STALE) {
871 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
872 neigh_hold(neigh);
873 neigh->nud_state = NUD_DELAY;
874 neigh->updated = jiffies;
875 neigh_add_timer(neigh,
876 jiffies + neigh->parms->delay_probe_time);
877 }
878
879 if (neigh->nud_state == NUD_INCOMPLETE) {
880 if (skb) {
881 if (skb_queue_len(&neigh->arp_queue) >=
882 neigh->parms->queue_len) {
883 struct sk_buff *buff;
884 buff = neigh->arp_queue.next;
885 __skb_unlink(buff, &neigh->arp_queue);
886 kfree_skb(buff);
887 }
888 __skb_queue_tail(&neigh->arp_queue, skb);
889 }
890 rc = 1;
891 }
892 out_unlock_bh:
893 write_unlock_bh(&neigh->lock);
894 return rc;
895 }
896
897 static void neigh_update_hhs(struct neighbour *neigh)
898 {
899 struct hh_cache *hh;
900 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
901 = neigh->dev->header_ops->cache_update;
902
903 if (update) {
904 for (hh = neigh->hh; hh; hh = hh->hh_next) {
905 write_seqlock_bh(&hh->hh_lock);
906 update(hh, neigh->dev, neigh->ha);
907 write_sequnlock_bh(&hh->hh_lock);
908 }
909 }
910 }
911
912
913
914 /* Generic update routine.
915 -- lladdr is new lladdr or NULL, if it is not supplied.
916 -- new is new state.
917 -- flags
918 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
919 if it is different.
920 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
921 lladdr instead of overriding it
922 if it is different.
923 It also allows to retain current state
924 if lladdr is unchanged.
925 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
926
927 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
928 NTF_ROUTER flag.
929 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
930 a router.
931
932 Caller MUST hold reference count on the entry.
933 */
934
935 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
936 u32 flags)
937 {
938 u8 old;
939 int err;
940 int notify = 0;
941 struct net_device *dev;
942 int update_isrouter = 0;
943
944 write_lock_bh(&neigh->lock);
945
946 dev = neigh->dev;
947 old = neigh->nud_state;
948 err = -EPERM;
949
950 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
951 (old & (NUD_NOARP | NUD_PERMANENT)))
952 goto out;
953
954 if (!(new & NUD_VALID)) {
955 neigh_del_timer(neigh);
956 if (old & NUD_CONNECTED)
957 neigh_suspect(neigh);
958 neigh->nud_state = new;
959 err = 0;
960 notify = old & NUD_VALID;
961 goto out;
962 }
963
964 /* Compare new lladdr with cached one */
965 if (!dev->addr_len) {
966 /* First case: device needs no address. */
967 lladdr = neigh->ha;
968 } else if (lladdr) {
969 /* The second case: if something is already cached
970 and a new address is proposed:
971 - compare new & old
972 - if they are different, check override flag
973 */
974 if ((old & NUD_VALID) &&
975 !memcmp(lladdr, neigh->ha, dev->addr_len))
976 lladdr = neigh->ha;
977 } else {
978 /* No address is supplied; if we know something,
979 use it, otherwise discard the request.
980 */
981 err = -EINVAL;
982 if (!(old & NUD_VALID))
983 goto out;
984 lladdr = neigh->ha;
985 }
986
987 if (new & NUD_CONNECTED)
988 neigh->confirmed = jiffies;
989 neigh->updated = jiffies;
990
991 /* If entry was valid and address is not changed,
992 do not change entry state, if new one is STALE.
993 */
994 err = 0;
995 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
996 if (old & NUD_VALID) {
997 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
998 update_isrouter = 0;
999 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1000 (old & NUD_CONNECTED)) {
1001 lladdr = neigh->ha;
1002 new = NUD_STALE;
1003 } else
1004 goto out;
1005 } else {
1006 if (lladdr == neigh->ha && new == NUD_STALE &&
1007 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1008 (old & NUD_CONNECTED))
1009 )
1010 new = old;
1011 }
1012 }
1013
1014 if (new != old) {
1015 neigh_del_timer(neigh);
1016 if (new & NUD_IN_TIMER) {
1017 neigh_hold(neigh);
1018 neigh_add_timer(neigh, (jiffies +
1019 ((new & NUD_REACHABLE) ?
1020 neigh->parms->reachable_time :
1021 0)));
1022 }
1023 neigh->nud_state = new;
1024 }
1025
1026 if (lladdr != neigh->ha) {
1027 memcpy(&neigh->ha, lladdr, dev->addr_len);
1028 neigh_update_hhs(neigh);
1029 if (!(new & NUD_CONNECTED))
1030 neigh->confirmed = jiffies -
1031 (neigh->parms->base_reachable_time << 1);
1032 notify = 1;
1033 }
1034 if (new == old)
1035 goto out;
1036 if (new & NUD_CONNECTED)
1037 neigh_connect(neigh);
1038 else
1039 neigh_suspect(neigh);
1040 if (!(old & NUD_VALID)) {
1041 struct sk_buff *skb;
1042
1043 /* Again: avoid dead loop if something went wrong */
1044
1045 while (neigh->nud_state & NUD_VALID &&
1046 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 struct neighbour *n1 = neigh;
1048 write_unlock_bh(&neigh->lock);
1049 /* On shaper/eql skb->dst->neighbour != neigh :( */
1050 if (skb->dst && skb->dst->neighbour)
1051 n1 = skb->dst->neighbour;
1052 n1->output(skb);
1053 write_lock_bh(&neigh->lock);
1054 }
1055 skb_queue_purge(&neigh->arp_queue);
1056 }
1057 out:
1058 if (update_isrouter) {
1059 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1060 (neigh->flags | NTF_ROUTER) :
1061 (neigh->flags & ~NTF_ROUTER);
1062 }
1063 write_unlock_bh(&neigh->lock);
1064
1065 if (notify)
1066 neigh_update_notify(neigh);
1067
1068 return err;
1069 }
1070
1071 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1072 u8 *lladdr, void *saddr,
1073 struct net_device *dev)
1074 {
1075 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076 lladdr || !dev->addr_len);
1077 if (neigh)
1078 neigh_update(neigh, lladdr, NUD_STALE,
1079 NEIGH_UPDATE_F_OVERRIDE);
1080 return neigh;
1081 }
1082
1083 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1084 __be16 protocol)
1085 {
1086 struct hh_cache *hh;
1087 struct net_device *dev = dst->dev;
1088
1089 for (hh = n->hh; hh; hh = hh->hh_next)
1090 if (hh->hh_type == protocol)
1091 break;
1092
1093 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1094 seqlock_init(&hh->hh_lock);
1095 hh->hh_type = protocol;
1096 atomic_set(&hh->hh_refcnt, 0);
1097 hh->hh_next = NULL;
1098
1099 if (dev->header_ops->cache(n, hh)) {
1100 kfree(hh);
1101 hh = NULL;
1102 } else {
1103 atomic_inc(&hh->hh_refcnt);
1104 hh->hh_next = n->hh;
1105 n->hh = hh;
1106 if (n->nud_state & NUD_CONNECTED)
1107 hh->hh_output = n->ops->hh_output;
1108 else
1109 hh->hh_output = n->ops->output;
1110 }
1111 }
1112 if (hh) {
1113 atomic_inc(&hh->hh_refcnt);
1114 dst->hh = hh;
1115 }
1116 }
1117
1118 /* This function can be used in contexts, where only old dev_queue_xmit
1119 worked, f.e. if you want to override normal output path (eql, shaper),
1120 but resolution is not made yet.
1121 */
1122
1123 int neigh_compat_output(struct sk_buff *skb)
1124 {
1125 struct net_device *dev = skb->dev;
1126
1127 __skb_pull(skb, skb_network_offset(skb));
1128
1129 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1130 skb->len) < 0 &&
1131 dev->header_ops->rebuild(skb))
1132 return 0;
1133
1134 return dev_queue_xmit(skb);
1135 }
1136
1137 /* Slow and careful. */
1138
1139 int neigh_resolve_output(struct sk_buff *skb)
1140 {
1141 struct dst_entry *dst = skb->dst;
1142 struct neighbour *neigh;
1143 int rc = 0;
1144
1145 if (!dst || !(neigh = dst->neighbour))
1146 goto discard;
1147
1148 __skb_pull(skb, skb_network_offset(skb));
1149
1150 if (!neigh_event_send(neigh, skb)) {
1151 int err;
1152 struct net_device *dev = neigh->dev;
1153 if (dev->header_ops->cache && !dst->hh) {
1154 write_lock_bh(&neigh->lock);
1155 if (!dst->hh)
1156 neigh_hh_init(neigh, dst, dst->ops->protocol);
1157 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1158 neigh->ha, NULL, skb->len);
1159 write_unlock_bh(&neigh->lock);
1160 } else {
1161 read_lock_bh(&neigh->lock);
1162 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1163 neigh->ha, NULL, skb->len);
1164 read_unlock_bh(&neigh->lock);
1165 }
1166 if (err >= 0)
1167 rc = neigh->ops->queue_xmit(skb);
1168 else
1169 goto out_kfree_skb;
1170 }
1171 out:
1172 return rc;
1173 discard:
1174 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1175 dst, dst ? dst->neighbour : NULL);
1176 out_kfree_skb:
1177 rc = -EINVAL;
1178 kfree_skb(skb);
1179 goto out;
1180 }
1181
1182 /* As fast as possible without hh cache */
1183
1184 int neigh_connected_output(struct sk_buff *skb)
1185 {
1186 int err;
1187 struct dst_entry *dst = skb->dst;
1188 struct neighbour *neigh = dst->neighbour;
1189 struct net_device *dev = neigh->dev;
1190
1191 __skb_pull(skb, skb_network_offset(skb));
1192
1193 read_lock_bh(&neigh->lock);
1194 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1195 neigh->ha, NULL, skb->len);
1196 read_unlock_bh(&neigh->lock);
1197 if (err >= 0)
1198 err = neigh->ops->queue_xmit(skb);
1199 else {
1200 err = -EINVAL;
1201 kfree_skb(skb);
1202 }
1203 return err;
1204 }
1205
1206 static void neigh_proxy_process(unsigned long arg)
1207 {
1208 struct neigh_table *tbl = (struct neigh_table *)arg;
1209 long sched_next = 0;
1210 unsigned long now = jiffies;
1211 struct sk_buff *skb;
1212
1213 spin_lock(&tbl->proxy_queue.lock);
1214
1215 skb = tbl->proxy_queue.next;
1216
1217 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1218 struct sk_buff *back = skb;
1219 long tdif = NEIGH_CB(back)->sched_next - now;
1220
1221 skb = skb->next;
1222 if (tdif <= 0) {
1223 struct net_device *dev = back->dev;
1224 __skb_unlink(back, &tbl->proxy_queue);
1225 if (tbl->proxy_redo && netif_running(dev))
1226 tbl->proxy_redo(back);
1227 else
1228 kfree_skb(back);
1229
1230 dev_put(dev);
1231 } else if (!sched_next || tdif < sched_next)
1232 sched_next = tdif;
1233 }
1234 del_timer(&tbl->proxy_timer);
1235 if (sched_next)
1236 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1237 spin_unlock(&tbl->proxy_queue.lock);
1238 }
1239
1240 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1241 struct sk_buff *skb)
1242 {
1243 unsigned long now = jiffies;
1244 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1245
1246 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1247 kfree_skb(skb);
1248 return;
1249 }
1250
1251 NEIGH_CB(skb)->sched_next = sched_next;
1252 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1253
1254 spin_lock(&tbl->proxy_queue.lock);
1255 if (del_timer(&tbl->proxy_timer)) {
1256 if (time_before(tbl->proxy_timer.expires, sched_next))
1257 sched_next = tbl->proxy_timer.expires;
1258 }
1259 dst_release(skb->dst);
1260 skb->dst = NULL;
1261 dev_hold(skb->dev);
1262 __skb_queue_tail(&tbl->proxy_queue, skb);
1263 mod_timer(&tbl->proxy_timer, sched_next);
1264 spin_unlock(&tbl->proxy_queue.lock);
1265 }
1266
1267
1268 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1269 struct neigh_table *tbl)
1270 {
1271 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1272
1273 if (p) {
1274 p->tbl = tbl;
1275 atomic_set(&p->refcnt, 1);
1276 INIT_RCU_HEAD(&p->rcu_head);
1277 p->reachable_time =
1278 neigh_rand_reach_time(p->base_reachable_time);
1279 if (dev) {
1280 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1281 kfree(p);
1282 return NULL;
1283 }
1284
1285 dev_hold(dev);
1286 p->dev = dev;
1287 }
1288 p->sysctl_table = NULL;
1289 write_lock_bh(&tbl->lock);
1290 p->next = tbl->parms.next;
1291 tbl->parms.next = p;
1292 write_unlock_bh(&tbl->lock);
1293 }
1294 return p;
1295 }
1296
1297 static void neigh_rcu_free_parms(struct rcu_head *head)
1298 {
1299 struct neigh_parms *parms =
1300 container_of(head, struct neigh_parms, rcu_head);
1301
1302 neigh_parms_put(parms);
1303 }
1304
1305 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1306 {
1307 struct neigh_parms **p;
1308
1309 if (!parms || parms == &tbl->parms)
1310 return;
1311 write_lock_bh(&tbl->lock);
1312 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1313 if (*p == parms) {
1314 *p = parms->next;
1315 parms->dead = 1;
1316 write_unlock_bh(&tbl->lock);
1317 if (parms->dev)
1318 dev_put(parms->dev);
1319 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1320 return;
1321 }
1322 }
1323 write_unlock_bh(&tbl->lock);
1324 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1325 }
1326
1327 void neigh_parms_destroy(struct neigh_parms *parms)
1328 {
1329 kfree(parms);
1330 }
1331
1332 static struct lock_class_key neigh_table_proxy_queue_class;
1333
1334 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1335 {
1336 unsigned long now = jiffies;
1337 unsigned long phsize;
1338
1339 atomic_set(&tbl->parms.refcnt, 1);
1340 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1341 tbl->parms.reachable_time =
1342 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1343
1344 if (!tbl->kmem_cachep)
1345 tbl->kmem_cachep =
1346 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1347 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1348 NULL);
1349 tbl->stats = alloc_percpu(struct neigh_statistics);
1350 if (!tbl->stats)
1351 panic("cannot create neighbour cache statistics");
1352
1353 #ifdef CONFIG_PROC_FS
1354 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
1355 if (!tbl->pde)
1356 panic("cannot create neighbour proc dir entry");
1357 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1358 tbl->pde->data = tbl;
1359 #endif
1360
1361 tbl->hash_mask = 1;
1362 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1363
1364 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1365 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1366
1367 if (!tbl->hash_buckets || !tbl->phash_buckets)
1368 panic("cannot allocate neighbour cache hashes");
1369
1370 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1371
1372 rwlock_init(&tbl->lock);
1373 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1374 tbl->gc_timer.expires = now + 1;
1375 add_timer(&tbl->gc_timer);
1376
1377 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1378 skb_queue_head_init_class(&tbl->proxy_queue,
1379 &neigh_table_proxy_queue_class);
1380
1381 tbl->last_flush = now;
1382 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1383 }
1384
1385 void neigh_table_init(struct neigh_table *tbl)
1386 {
1387 struct neigh_table *tmp;
1388
1389 neigh_table_init_no_netlink(tbl);
1390 write_lock(&neigh_tbl_lock);
1391 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1392 if (tmp->family == tbl->family)
1393 break;
1394 }
1395 tbl->next = neigh_tables;
1396 neigh_tables = tbl;
1397 write_unlock(&neigh_tbl_lock);
1398
1399 if (unlikely(tmp)) {
1400 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1401 "family %d\n", tbl->family);
1402 dump_stack();
1403 }
1404 }
1405
1406 int neigh_table_clear(struct neigh_table *tbl)
1407 {
1408 struct neigh_table **tp;
1409
1410 /* It is not clean... Fix it to unload IPv6 module safely */
1411 del_timer_sync(&tbl->gc_timer);
1412 del_timer_sync(&tbl->proxy_timer);
1413 pneigh_queue_purge(&tbl->proxy_queue);
1414 neigh_ifdown(tbl, NULL);
1415 if (atomic_read(&tbl->entries))
1416 printk(KERN_CRIT "neighbour leakage\n");
1417 write_lock(&neigh_tbl_lock);
1418 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1419 if (*tp == tbl) {
1420 *tp = tbl->next;
1421 break;
1422 }
1423 }
1424 write_unlock(&neigh_tbl_lock);
1425
1426 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1427 tbl->hash_buckets = NULL;
1428
1429 kfree(tbl->phash_buckets);
1430 tbl->phash_buckets = NULL;
1431
1432 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1433
1434 free_percpu(tbl->stats);
1435 tbl->stats = NULL;
1436
1437 kmem_cache_destroy(tbl->kmem_cachep);
1438 tbl->kmem_cachep = NULL;
1439
1440 return 0;
1441 }
1442
1443 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1444 {
1445 struct net *net = skb->sk->sk_net;
1446 struct ndmsg *ndm;
1447 struct nlattr *dst_attr;
1448 struct neigh_table *tbl;
1449 struct net_device *dev = NULL;
1450 int err = -EINVAL;
1451
1452 if (net != &init_net)
1453 return -EINVAL;
1454
1455 if (nlmsg_len(nlh) < sizeof(*ndm))
1456 goto out;
1457
1458 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1459 if (dst_attr == NULL)
1460 goto out;
1461
1462 ndm = nlmsg_data(nlh);
1463 if (ndm->ndm_ifindex) {
1464 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1465 if (dev == NULL) {
1466 err = -ENODEV;
1467 goto out;
1468 }
1469 }
1470
1471 read_lock(&neigh_tbl_lock);
1472 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1473 struct neighbour *neigh;
1474
1475 if (tbl->family != ndm->ndm_family)
1476 continue;
1477 read_unlock(&neigh_tbl_lock);
1478
1479 if (nla_len(dst_attr) < tbl->key_len)
1480 goto out_dev_put;
1481
1482 if (ndm->ndm_flags & NTF_PROXY) {
1483 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1484 goto out_dev_put;
1485 }
1486
1487 if (dev == NULL)
1488 goto out_dev_put;
1489
1490 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1491 if (neigh == NULL) {
1492 err = -ENOENT;
1493 goto out_dev_put;
1494 }
1495
1496 err = neigh_update(neigh, NULL, NUD_FAILED,
1497 NEIGH_UPDATE_F_OVERRIDE |
1498 NEIGH_UPDATE_F_ADMIN);
1499 neigh_release(neigh);
1500 goto out_dev_put;
1501 }
1502 read_unlock(&neigh_tbl_lock);
1503 err = -EAFNOSUPPORT;
1504
1505 out_dev_put:
1506 if (dev)
1507 dev_put(dev);
1508 out:
1509 return err;
1510 }
1511
1512 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1513 {
1514 struct net *net = skb->sk->sk_net;
1515 struct ndmsg *ndm;
1516 struct nlattr *tb[NDA_MAX+1];
1517 struct neigh_table *tbl;
1518 struct net_device *dev = NULL;
1519 int err;
1520
1521 if (net != &init_net)
1522 return -EINVAL;
1523
1524 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1525 if (err < 0)
1526 goto out;
1527
1528 err = -EINVAL;
1529 if (tb[NDA_DST] == NULL)
1530 goto out;
1531
1532 ndm = nlmsg_data(nlh);
1533 if (ndm->ndm_ifindex) {
1534 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1535 if (dev == NULL) {
1536 err = -ENODEV;
1537 goto out;
1538 }
1539
1540 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1541 goto out_dev_put;
1542 }
1543
1544 read_lock(&neigh_tbl_lock);
1545 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1546 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1547 struct neighbour *neigh;
1548 void *dst, *lladdr;
1549
1550 if (tbl->family != ndm->ndm_family)
1551 continue;
1552 read_unlock(&neigh_tbl_lock);
1553
1554 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1555 goto out_dev_put;
1556 dst = nla_data(tb[NDA_DST]);
1557 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1558
1559 if (ndm->ndm_flags & NTF_PROXY) {
1560 struct pneigh_entry *pn;
1561
1562 err = -ENOBUFS;
1563 pn = pneigh_lookup(tbl, dst, dev, 1);
1564 if (pn) {
1565 pn->flags = ndm->ndm_flags;
1566 err = 0;
1567 }
1568 goto out_dev_put;
1569 }
1570
1571 if (dev == NULL)
1572 goto out_dev_put;
1573
1574 neigh = neigh_lookup(tbl, dst, dev);
1575 if (neigh == NULL) {
1576 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1577 err = -ENOENT;
1578 goto out_dev_put;
1579 }
1580
1581 neigh = __neigh_lookup_errno(tbl, dst, dev);
1582 if (IS_ERR(neigh)) {
1583 err = PTR_ERR(neigh);
1584 goto out_dev_put;
1585 }
1586 } else {
1587 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1588 err = -EEXIST;
1589 neigh_release(neigh);
1590 goto out_dev_put;
1591 }
1592
1593 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1594 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1595 }
1596
1597 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1598 neigh_release(neigh);
1599 goto out_dev_put;
1600 }
1601
1602 read_unlock(&neigh_tbl_lock);
1603 err = -EAFNOSUPPORT;
1604
1605 out_dev_put:
1606 if (dev)
1607 dev_put(dev);
1608 out:
1609 return err;
1610 }
1611
1612 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1613 {
1614 struct nlattr *nest;
1615
1616 nest = nla_nest_start(skb, NDTA_PARMS);
1617 if (nest == NULL)
1618 return -ENOBUFS;
1619
1620 if (parms->dev)
1621 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1622
1623 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1624 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1625 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1626 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1627 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1628 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1629 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1630 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1631 parms->base_reachable_time);
1632 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1633 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1634 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1635 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1636 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1637 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1638
1639 return nla_nest_end(skb, nest);
1640
1641 nla_put_failure:
1642 return nla_nest_cancel(skb, nest);
1643 }
1644
1645 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1646 u32 pid, u32 seq, int type, int flags)
1647 {
1648 struct nlmsghdr *nlh;
1649 struct ndtmsg *ndtmsg;
1650
1651 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1652 if (nlh == NULL)
1653 return -EMSGSIZE;
1654
1655 ndtmsg = nlmsg_data(nlh);
1656
1657 read_lock_bh(&tbl->lock);
1658 ndtmsg->ndtm_family = tbl->family;
1659 ndtmsg->ndtm_pad1 = 0;
1660 ndtmsg->ndtm_pad2 = 0;
1661
1662 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1663 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1664 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1665 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1666 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1667
1668 {
1669 unsigned long now = jiffies;
1670 unsigned int flush_delta = now - tbl->last_flush;
1671 unsigned int rand_delta = now - tbl->last_rand;
1672
1673 struct ndt_config ndc = {
1674 .ndtc_key_len = tbl->key_len,
1675 .ndtc_entry_size = tbl->entry_size,
1676 .ndtc_entries = atomic_read(&tbl->entries),
1677 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1678 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1679 .ndtc_hash_rnd = tbl->hash_rnd,
1680 .ndtc_hash_mask = tbl->hash_mask,
1681 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1682 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1683 };
1684
1685 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1686 }
1687
1688 {
1689 int cpu;
1690 struct ndt_stats ndst;
1691
1692 memset(&ndst, 0, sizeof(ndst));
1693
1694 for_each_possible_cpu(cpu) {
1695 struct neigh_statistics *st;
1696
1697 st = per_cpu_ptr(tbl->stats, cpu);
1698 ndst.ndts_allocs += st->allocs;
1699 ndst.ndts_destroys += st->destroys;
1700 ndst.ndts_hash_grows += st->hash_grows;
1701 ndst.ndts_res_failed += st->res_failed;
1702 ndst.ndts_lookups += st->lookups;
1703 ndst.ndts_hits += st->hits;
1704 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1705 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1706 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1707 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1708 }
1709
1710 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1711 }
1712
1713 BUG_ON(tbl->parms.dev);
1714 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1715 goto nla_put_failure;
1716
1717 read_unlock_bh(&tbl->lock);
1718 return nlmsg_end(skb, nlh);
1719
1720 nla_put_failure:
1721 read_unlock_bh(&tbl->lock);
1722 nlmsg_cancel(skb, nlh);
1723 return -EMSGSIZE;
1724 }
1725
1726 static int neightbl_fill_param_info(struct sk_buff *skb,
1727 struct neigh_table *tbl,
1728 struct neigh_parms *parms,
1729 u32 pid, u32 seq, int type,
1730 unsigned int flags)
1731 {
1732 struct ndtmsg *ndtmsg;
1733 struct nlmsghdr *nlh;
1734
1735 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1736 if (nlh == NULL)
1737 return -EMSGSIZE;
1738
1739 ndtmsg = nlmsg_data(nlh);
1740
1741 read_lock_bh(&tbl->lock);
1742 ndtmsg->ndtm_family = tbl->family;
1743 ndtmsg->ndtm_pad1 = 0;
1744 ndtmsg->ndtm_pad2 = 0;
1745
1746 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1747 neightbl_fill_parms(skb, parms) < 0)
1748 goto errout;
1749
1750 read_unlock_bh(&tbl->lock);
1751 return nlmsg_end(skb, nlh);
1752 errout:
1753 read_unlock_bh(&tbl->lock);
1754 nlmsg_cancel(skb, nlh);
1755 return -EMSGSIZE;
1756 }
1757
1758 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1759 int ifindex)
1760 {
1761 struct neigh_parms *p;
1762
1763 for (p = &tbl->parms; p; p = p->next)
1764 if ((p->dev && p->dev->ifindex == ifindex) ||
1765 (!p->dev && !ifindex))
1766 return p;
1767
1768 return NULL;
1769 }
1770
1771 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1772 [NDTA_NAME] = { .type = NLA_STRING },
1773 [NDTA_THRESH1] = { .type = NLA_U32 },
1774 [NDTA_THRESH2] = { .type = NLA_U32 },
1775 [NDTA_THRESH3] = { .type = NLA_U32 },
1776 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1777 [NDTA_PARMS] = { .type = NLA_NESTED },
1778 };
1779
1780 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1781 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1782 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1783 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1784 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1785 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1786 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1787 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1788 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1789 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1790 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1791 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1792 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1793 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1794 };
1795
1796 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1797 {
1798 struct net *net = skb->sk->sk_net;
1799 struct neigh_table *tbl;
1800 struct ndtmsg *ndtmsg;
1801 struct nlattr *tb[NDTA_MAX+1];
1802 int err;
1803
1804 if (net != &init_net)
1805 return -EINVAL;
1806
1807 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1808 nl_neightbl_policy);
1809 if (err < 0)
1810 goto errout;
1811
1812 if (tb[NDTA_NAME] == NULL) {
1813 err = -EINVAL;
1814 goto errout;
1815 }
1816
1817 ndtmsg = nlmsg_data(nlh);
1818 read_lock(&neigh_tbl_lock);
1819 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1820 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1821 continue;
1822
1823 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1824 break;
1825 }
1826
1827 if (tbl == NULL) {
1828 err = -ENOENT;
1829 goto errout_locked;
1830 }
1831
1832 /*
1833 * We acquire tbl->lock to be nice to the periodic timers and
1834 * make sure they always see a consistent set of values.
1835 */
1836 write_lock_bh(&tbl->lock);
1837
1838 if (tb[NDTA_PARMS]) {
1839 struct nlattr *tbp[NDTPA_MAX+1];
1840 struct neigh_parms *p;
1841 int i, ifindex = 0;
1842
1843 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1844 nl_ntbl_parm_policy);
1845 if (err < 0)
1846 goto errout_tbl_lock;
1847
1848 if (tbp[NDTPA_IFINDEX])
1849 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1850
1851 p = lookup_neigh_params(tbl, ifindex);
1852 if (p == NULL) {
1853 err = -ENOENT;
1854 goto errout_tbl_lock;
1855 }
1856
1857 for (i = 1; i <= NDTPA_MAX; i++) {
1858 if (tbp[i] == NULL)
1859 continue;
1860
1861 switch (i) {
1862 case NDTPA_QUEUE_LEN:
1863 p->queue_len = nla_get_u32(tbp[i]);
1864 break;
1865 case NDTPA_PROXY_QLEN:
1866 p->proxy_qlen = nla_get_u32(tbp[i]);
1867 break;
1868 case NDTPA_APP_PROBES:
1869 p->app_probes = nla_get_u32(tbp[i]);
1870 break;
1871 case NDTPA_UCAST_PROBES:
1872 p->ucast_probes = nla_get_u32(tbp[i]);
1873 break;
1874 case NDTPA_MCAST_PROBES:
1875 p->mcast_probes = nla_get_u32(tbp[i]);
1876 break;
1877 case NDTPA_BASE_REACHABLE_TIME:
1878 p->base_reachable_time = nla_get_msecs(tbp[i]);
1879 break;
1880 case NDTPA_GC_STALETIME:
1881 p->gc_staletime = nla_get_msecs(tbp[i]);
1882 break;
1883 case NDTPA_DELAY_PROBE_TIME:
1884 p->delay_probe_time = nla_get_msecs(tbp[i]);
1885 break;
1886 case NDTPA_RETRANS_TIME:
1887 p->retrans_time = nla_get_msecs(tbp[i]);
1888 break;
1889 case NDTPA_ANYCAST_DELAY:
1890 p->anycast_delay = nla_get_msecs(tbp[i]);
1891 break;
1892 case NDTPA_PROXY_DELAY:
1893 p->proxy_delay = nla_get_msecs(tbp[i]);
1894 break;
1895 case NDTPA_LOCKTIME:
1896 p->locktime = nla_get_msecs(tbp[i]);
1897 break;
1898 }
1899 }
1900 }
1901
1902 if (tb[NDTA_THRESH1])
1903 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1904
1905 if (tb[NDTA_THRESH2])
1906 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1907
1908 if (tb[NDTA_THRESH3])
1909 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1910
1911 if (tb[NDTA_GC_INTERVAL])
1912 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1913
1914 err = 0;
1915
1916 errout_tbl_lock:
1917 write_unlock_bh(&tbl->lock);
1918 errout_locked:
1919 read_unlock(&neigh_tbl_lock);
1920 errout:
1921 return err;
1922 }
1923
1924 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1925 {
1926 struct net *net = skb->sk->sk_net;
1927 int family, tidx, nidx = 0;
1928 int tbl_skip = cb->args[0];
1929 int neigh_skip = cb->args[1];
1930 struct neigh_table *tbl;
1931
1932 if (net != &init_net)
1933 return 0;
1934
1935 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1936
1937 read_lock(&neigh_tbl_lock);
1938 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1939 struct neigh_parms *p;
1940
1941 if (tidx < tbl_skip || (family && tbl->family != family))
1942 continue;
1943
1944 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1945 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1946 NLM_F_MULTI) <= 0)
1947 break;
1948
1949 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1950 if (nidx < neigh_skip)
1951 continue;
1952
1953 if (neightbl_fill_param_info(skb, tbl, p,
1954 NETLINK_CB(cb->skb).pid,
1955 cb->nlh->nlmsg_seq,
1956 RTM_NEWNEIGHTBL,
1957 NLM_F_MULTI) <= 0)
1958 goto out;
1959 }
1960
1961 neigh_skip = 0;
1962 }
1963 out:
1964 read_unlock(&neigh_tbl_lock);
1965 cb->args[0] = tidx;
1966 cb->args[1] = nidx;
1967
1968 return skb->len;
1969 }
1970
1971 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1972 u32 pid, u32 seq, int type, unsigned int flags)
1973 {
1974 unsigned long now = jiffies;
1975 struct nda_cacheinfo ci;
1976 struct nlmsghdr *nlh;
1977 struct ndmsg *ndm;
1978
1979 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1980 if (nlh == NULL)
1981 return -EMSGSIZE;
1982
1983 ndm = nlmsg_data(nlh);
1984 ndm->ndm_family = neigh->ops->family;
1985 ndm->ndm_pad1 = 0;
1986 ndm->ndm_pad2 = 0;
1987 ndm->ndm_flags = neigh->flags;
1988 ndm->ndm_type = neigh->type;
1989 ndm->ndm_ifindex = neigh->dev->ifindex;
1990
1991 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1992
1993 read_lock_bh(&neigh->lock);
1994 ndm->ndm_state = neigh->nud_state;
1995 if ((neigh->nud_state & NUD_VALID) &&
1996 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1997 read_unlock_bh(&neigh->lock);
1998 goto nla_put_failure;
1999 }
2000
2001 ci.ndm_used = now - neigh->used;
2002 ci.ndm_confirmed = now - neigh->confirmed;
2003 ci.ndm_updated = now - neigh->updated;
2004 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2005 read_unlock_bh(&neigh->lock);
2006
2007 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2008 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2009
2010 return nlmsg_end(skb, nlh);
2011
2012 nla_put_failure:
2013 nlmsg_cancel(skb, nlh);
2014 return -EMSGSIZE;
2015 }
2016
2017 static void neigh_update_notify(struct neighbour *neigh)
2018 {
2019 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2020 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2021 }
2022
2023 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2024 struct netlink_callback *cb)
2025 {
2026 struct neighbour *n;
2027 int rc, h, s_h = cb->args[1];
2028 int idx, s_idx = idx = cb->args[2];
2029
2030 read_lock_bh(&tbl->lock);
2031 for (h = 0; h <= tbl->hash_mask; h++) {
2032 if (h < s_h)
2033 continue;
2034 if (h > s_h)
2035 s_idx = 0;
2036 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2037 if (idx < s_idx)
2038 continue;
2039 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2040 cb->nlh->nlmsg_seq,
2041 RTM_NEWNEIGH,
2042 NLM_F_MULTI) <= 0) {
2043 read_unlock_bh(&tbl->lock);
2044 rc = -1;
2045 goto out;
2046 }
2047 }
2048 }
2049 read_unlock_bh(&tbl->lock);
2050 rc = skb->len;
2051 out:
2052 cb->args[1] = h;
2053 cb->args[2] = idx;
2054 return rc;
2055 }
2056
2057 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2058 {
2059 struct net *net = skb->sk->sk_net;
2060 struct neigh_table *tbl;
2061 int t, family, s_t;
2062
2063 if (net != &init_net)
2064 return 0;
2065
2066 read_lock(&neigh_tbl_lock);
2067 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2068 s_t = cb->args[0];
2069
2070 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2071 if (t < s_t || (family && tbl->family != family))
2072 continue;
2073 if (t > s_t)
2074 memset(&cb->args[1], 0, sizeof(cb->args) -
2075 sizeof(cb->args[0]));
2076 if (neigh_dump_table(tbl, skb, cb) < 0)
2077 break;
2078 }
2079 read_unlock(&neigh_tbl_lock);
2080
2081 cb->args[0] = t;
2082 return skb->len;
2083 }
2084
2085 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2086 {
2087 int chain;
2088
2089 read_lock_bh(&tbl->lock);
2090 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2091 struct neighbour *n;
2092
2093 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2094 cb(n, cookie);
2095 }
2096 read_unlock_bh(&tbl->lock);
2097 }
2098 EXPORT_SYMBOL(neigh_for_each);
2099
2100 /* The tbl->lock must be held as a writer and BH disabled. */
2101 void __neigh_for_each_release(struct neigh_table *tbl,
2102 int (*cb)(struct neighbour *))
2103 {
2104 int chain;
2105
2106 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2107 struct neighbour *n, **np;
2108
2109 np = &tbl->hash_buckets[chain];
2110 while ((n = *np) != NULL) {
2111 int release;
2112
2113 write_lock(&n->lock);
2114 release = cb(n);
2115 if (release) {
2116 *np = n->next;
2117 n->dead = 1;
2118 } else
2119 np = &n->next;
2120 write_unlock(&n->lock);
2121 if (release)
2122 neigh_cleanup_and_release(n);
2123 }
2124 }
2125 }
2126 EXPORT_SYMBOL(__neigh_for_each_release);
2127
2128 #ifdef CONFIG_PROC_FS
2129
2130 static struct neighbour *neigh_get_first(struct seq_file *seq)
2131 {
2132 struct neigh_seq_state *state = seq->private;
2133 struct neigh_table *tbl = state->tbl;
2134 struct neighbour *n = NULL;
2135 int bucket = state->bucket;
2136
2137 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2138 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2139 n = tbl->hash_buckets[bucket];
2140
2141 while (n) {
2142 if (state->neigh_sub_iter) {
2143 loff_t fakep = 0;
2144 void *v;
2145
2146 v = state->neigh_sub_iter(state, n, &fakep);
2147 if (!v)
2148 goto next;
2149 }
2150 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2151 break;
2152 if (n->nud_state & ~NUD_NOARP)
2153 break;
2154 next:
2155 n = n->next;
2156 }
2157
2158 if (n)
2159 break;
2160 }
2161 state->bucket = bucket;
2162
2163 return n;
2164 }
2165
2166 static struct neighbour *neigh_get_next(struct seq_file *seq,
2167 struct neighbour *n,
2168 loff_t *pos)
2169 {
2170 struct neigh_seq_state *state = seq->private;
2171 struct neigh_table *tbl = state->tbl;
2172
2173 if (state->neigh_sub_iter) {
2174 void *v = state->neigh_sub_iter(state, n, pos);
2175 if (v)
2176 return n;
2177 }
2178 n = n->next;
2179
2180 while (1) {
2181 while (n) {
2182 if (state->neigh_sub_iter) {
2183 void *v = state->neigh_sub_iter(state, n, pos);
2184 if (v)
2185 return n;
2186 goto next;
2187 }
2188 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2189 break;
2190
2191 if (n->nud_state & ~NUD_NOARP)
2192 break;
2193 next:
2194 n = n->next;
2195 }
2196
2197 if (n)
2198 break;
2199
2200 if (++state->bucket > tbl->hash_mask)
2201 break;
2202
2203 n = tbl->hash_buckets[state->bucket];
2204 }
2205
2206 if (n && pos)
2207 --(*pos);
2208 return n;
2209 }
2210
2211 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2212 {
2213 struct neighbour *n = neigh_get_first(seq);
2214
2215 if (n) {
2216 while (*pos) {
2217 n = neigh_get_next(seq, n, pos);
2218 if (!n)
2219 break;
2220 }
2221 }
2222 return *pos ? NULL : n;
2223 }
2224
2225 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2226 {
2227 struct neigh_seq_state *state = seq->private;
2228 struct neigh_table *tbl = state->tbl;
2229 struct pneigh_entry *pn = NULL;
2230 int bucket = state->bucket;
2231
2232 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2233 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2234 pn = tbl->phash_buckets[bucket];
2235 if (pn)
2236 break;
2237 }
2238 state->bucket = bucket;
2239
2240 return pn;
2241 }
2242
2243 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2244 struct pneigh_entry *pn,
2245 loff_t *pos)
2246 {
2247 struct neigh_seq_state *state = seq->private;
2248 struct neigh_table *tbl = state->tbl;
2249
2250 pn = pn->next;
2251 while (!pn) {
2252 if (++state->bucket > PNEIGH_HASHMASK)
2253 break;
2254 pn = tbl->phash_buckets[state->bucket];
2255 if (pn)
2256 break;
2257 }
2258
2259 if (pn && pos)
2260 --(*pos);
2261
2262 return pn;
2263 }
2264
2265 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2266 {
2267 struct pneigh_entry *pn = pneigh_get_first(seq);
2268
2269 if (pn) {
2270 while (*pos) {
2271 pn = pneigh_get_next(seq, pn, pos);
2272 if (!pn)
2273 break;
2274 }
2275 }
2276 return *pos ? NULL : pn;
2277 }
2278
2279 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2280 {
2281 struct neigh_seq_state *state = seq->private;
2282 void *rc;
2283
2284 rc = neigh_get_idx(seq, pos);
2285 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2286 rc = pneigh_get_idx(seq, pos);
2287
2288 return rc;
2289 }
2290
2291 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2292 {
2293 struct neigh_seq_state *state = seq->private;
2294 loff_t pos_minus_one;
2295
2296 state->tbl = tbl;
2297 state->bucket = 0;
2298 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2299
2300 read_lock_bh(&tbl->lock);
2301
2302 pos_minus_one = *pos - 1;
2303 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2304 }
2305 EXPORT_SYMBOL(neigh_seq_start);
2306
2307 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2308 {
2309 struct neigh_seq_state *state;
2310 void *rc;
2311
2312 if (v == SEQ_START_TOKEN) {
2313 rc = neigh_get_idx(seq, pos);
2314 goto out;
2315 }
2316
2317 state = seq->private;
2318 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2319 rc = neigh_get_next(seq, v, NULL);
2320 if (rc)
2321 goto out;
2322 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2323 rc = pneigh_get_first(seq);
2324 } else {
2325 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2326 rc = pneigh_get_next(seq, v, NULL);
2327 }
2328 out:
2329 ++(*pos);
2330 return rc;
2331 }
2332 EXPORT_SYMBOL(neigh_seq_next);
2333
2334 void neigh_seq_stop(struct seq_file *seq, void *v)
2335 {
2336 struct neigh_seq_state *state = seq->private;
2337 struct neigh_table *tbl = state->tbl;
2338
2339 read_unlock_bh(&tbl->lock);
2340 }
2341 EXPORT_SYMBOL(neigh_seq_stop);
2342
2343 /* statistics via seq_file */
2344
2345 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2346 {
2347 struct proc_dir_entry *pde = seq->private;
2348 struct neigh_table *tbl = pde->data;
2349 int cpu;
2350
2351 if (*pos == 0)
2352 return SEQ_START_TOKEN;
2353
2354 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2355 if (!cpu_possible(cpu))
2356 continue;
2357 *pos = cpu+1;
2358 return per_cpu_ptr(tbl->stats, cpu);
2359 }
2360 return NULL;
2361 }
2362
2363 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2364 {
2365 struct proc_dir_entry *pde = seq->private;
2366 struct neigh_table *tbl = pde->data;
2367 int cpu;
2368
2369 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2370 if (!cpu_possible(cpu))
2371 continue;
2372 *pos = cpu+1;
2373 return per_cpu_ptr(tbl->stats, cpu);
2374 }
2375 return NULL;
2376 }
2377
2378 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2379 {
2380
2381 }
2382
2383 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2384 {
2385 struct proc_dir_entry *pde = seq->private;
2386 struct neigh_table *tbl = pde->data;
2387 struct neigh_statistics *st = v;
2388
2389 if (v == SEQ_START_TOKEN) {
2390 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2391 return 0;
2392 }
2393
2394 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2395 "%08lx %08lx %08lx %08lx\n",
2396 atomic_read(&tbl->entries),
2397
2398 st->allocs,
2399 st->destroys,
2400 st->hash_grows,
2401
2402 st->lookups,
2403 st->hits,
2404
2405 st->res_failed,
2406
2407 st->rcv_probes_mcast,
2408 st->rcv_probes_ucast,
2409
2410 st->periodic_gc_runs,
2411 st->forced_gc_runs
2412 );
2413
2414 return 0;
2415 }
2416
2417 static const struct seq_operations neigh_stat_seq_ops = {
2418 .start = neigh_stat_seq_start,
2419 .next = neigh_stat_seq_next,
2420 .stop = neigh_stat_seq_stop,
2421 .show = neigh_stat_seq_show,
2422 };
2423
2424 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2425 {
2426 int ret = seq_open(file, &neigh_stat_seq_ops);
2427
2428 if (!ret) {
2429 struct seq_file *sf = file->private_data;
2430 sf->private = PDE(inode);
2431 }
2432 return ret;
2433 };
2434
2435 static const struct file_operations neigh_stat_seq_fops = {
2436 .owner = THIS_MODULE,
2437 .open = neigh_stat_seq_open,
2438 .read = seq_read,
2439 .llseek = seq_lseek,
2440 .release = seq_release,
2441 };
2442
2443 #endif /* CONFIG_PROC_FS */
2444
2445 static inline size_t neigh_nlmsg_size(void)
2446 {
2447 return NLMSG_ALIGN(sizeof(struct ndmsg))
2448 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2449 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2450 + nla_total_size(sizeof(struct nda_cacheinfo))
2451 + nla_total_size(4); /* NDA_PROBES */
2452 }
2453
2454 static void __neigh_notify(struct neighbour *n, int type, int flags)
2455 {
2456 struct sk_buff *skb;
2457 int err = -ENOBUFS;
2458
2459 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2460 if (skb == NULL)
2461 goto errout;
2462
2463 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2464 if (err < 0) {
2465 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2466 WARN_ON(err == -EMSGSIZE);
2467 kfree_skb(skb);
2468 goto errout;
2469 }
2470 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2471 errout:
2472 if (err < 0)
2473 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
2474 }
2475
2476 #ifdef CONFIG_ARPD
2477 void neigh_app_ns(struct neighbour *n)
2478 {
2479 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2480 }
2481 #endif /* CONFIG_ARPD */
2482
2483 #ifdef CONFIG_SYSCTL
2484
2485 static struct neigh_sysctl_table {
2486 struct ctl_table_header *sysctl_header;
2487 ctl_table neigh_vars[__NET_NEIGH_MAX];
2488 ctl_table neigh_dev[2];
2489 ctl_table neigh_neigh_dir[2];
2490 ctl_table neigh_proto_dir[2];
2491 ctl_table neigh_root_dir[2];
2492 } neigh_sysctl_template __read_mostly = {
2493 .neigh_vars = {
2494 {
2495 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2496 .procname = "mcast_solicit",
2497 .maxlen = sizeof(int),
2498 .mode = 0644,
2499 .proc_handler = &proc_dointvec,
2500 },
2501 {
2502 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2503 .procname = "ucast_solicit",
2504 .maxlen = sizeof(int),
2505 .mode = 0644,
2506 .proc_handler = &proc_dointvec,
2507 },
2508 {
2509 .ctl_name = NET_NEIGH_APP_SOLICIT,
2510 .procname = "app_solicit",
2511 .maxlen = sizeof(int),
2512 .mode = 0644,
2513 .proc_handler = &proc_dointvec,
2514 },
2515 {
2516 .procname = "retrans_time",
2517 .maxlen = sizeof(int),
2518 .mode = 0644,
2519 .proc_handler = &proc_dointvec_userhz_jiffies,
2520 },
2521 {
2522 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2523 .procname = "base_reachable_time",
2524 .maxlen = sizeof(int),
2525 .mode = 0644,
2526 .proc_handler = &proc_dointvec_jiffies,
2527 .strategy = &sysctl_jiffies,
2528 },
2529 {
2530 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2531 .procname = "delay_first_probe_time",
2532 .maxlen = sizeof(int),
2533 .mode = 0644,
2534 .proc_handler = &proc_dointvec_jiffies,
2535 .strategy = &sysctl_jiffies,
2536 },
2537 {
2538 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2539 .procname = "gc_stale_time",
2540 .maxlen = sizeof(int),
2541 .mode = 0644,
2542 .proc_handler = &proc_dointvec_jiffies,
2543 .strategy = &sysctl_jiffies,
2544 },
2545 {
2546 .ctl_name = NET_NEIGH_UNRES_QLEN,
2547 .procname = "unres_qlen",
2548 .maxlen = sizeof(int),
2549 .mode = 0644,
2550 .proc_handler = &proc_dointvec,
2551 },
2552 {
2553 .ctl_name = NET_NEIGH_PROXY_QLEN,
2554 .procname = "proxy_qlen",
2555 .maxlen = sizeof(int),
2556 .mode = 0644,
2557 .proc_handler = &proc_dointvec,
2558 },
2559 {
2560 .procname = "anycast_delay",
2561 .maxlen = sizeof(int),
2562 .mode = 0644,
2563 .proc_handler = &proc_dointvec_userhz_jiffies,
2564 },
2565 {
2566 .procname = "proxy_delay",
2567 .maxlen = sizeof(int),
2568 .mode = 0644,
2569 .proc_handler = &proc_dointvec_userhz_jiffies,
2570 },
2571 {
2572 .procname = "locktime",
2573 .maxlen = sizeof(int),
2574 .mode = 0644,
2575 .proc_handler = &proc_dointvec_userhz_jiffies,
2576 },
2577 {
2578 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2579 .procname = "retrans_time_ms",
2580 .maxlen = sizeof(int),
2581 .mode = 0644,
2582 .proc_handler = &proc_dointvec_ms_jiffies,
2583 .strategy = &sysctl_ms_jiffies,
2584 },
2585 {
2586 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2587 .procname = "base_reachable_time_ms",
2588 .maxlen = sizeof(int),
2589 .mode = 0644,
2590 .proc_handler = &proc_dointvec_ms_jiffies,
2591 .strategy = &sysctl_ms_jiffies,
2592 },
2593 {
2594 .ctl_name = NET_NEIGH_GC_INTERVAL,
2595 .procname = "gc_interval",
2596 .maxlen = sizeof(int),
2597 .mode = 0644,
2598 .proc_handler = &proc_dointvec_jiffies,
2599 .strategy = &sysctl_jiffies,
2600 },
2601 {
2602 .ctl_name = NET_NEIGH_GC_THRESH1,
2603 .procname = "gc_thresh1",
2604 .maxlen = sizeof(int),
2605 .mode = 0644,
2606 .proc_handler = &proc_dointvec,
2607 },
2608 {
2609 .ctl_name = NET_NEIGH_GC_THRESH2,
2610 .procname = "gc_thresh2",
2611 .maxlen = sizeof(int),
2612 .mode = 0644,
2613 .proc_handler = &proc_dointvec,
2614 },
2615 {
2616 .ctl_name = NET_NEIGH_GC_THRESH3,
2617 .procname = "gc_thresh3",
2618 .maxlen = sizeof(int),
2619 .mode = 0644,
2620 .proc_handler = &proc_dointvec,
2621 },
2622 {}
2623 },
2624 .neigh_dev = {
2625 {
2626 .ctl_name = NET_PROTO_CONF_DEFAULT,
2627 .procname = "default",
2628 .mode = 0555,
2629 },
2630 },
2631 .neigh_neigh_dir = {
2632 {
2633 .procname = "neigh",
2634 .mode = 0555,
2635 },
2636 },
2637 .neigh_proto_dir = {
2638 {
2639 .mode = 0555,
2640 },
2641 },
2642 .neigh_root_dir = {
2643 {
2644 .ctl_name = CTL_NET,
2645 .procname = "net",
2646 .mode = 0555,
2647 },
2648 },
2649 };
2650
2651 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2652 int p_id, int pdev_id, char *p_name,
2653 proc_handler *handler, ctl_handler *strategy)
2654 {
2655 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
2656 sizeof(*t), GFP_KERNEL);
2657 const char *dev_name_source = NULL;
2658 char *dev_name = NULL;
2659 int err = 0;
2660
2661 if (!t)
2662 return -ENOBUFS;
2663 t->neigh_vars[0].data = &p->mcast_probes;
2664 t->neigh_vars[1].data = &p->ucast_probes;
2665 t->neigh_vars[2].data = &p->app_probes;
2666 t->neigh_vars[3].data = &p->retrans_time;
2667 t->neigh_vars[4].data = &p->base_reachable_time;
2668 t->neigh_vars[5].data = &p->delay_probe_time;
2669 t->neigh_vars[6].data = &p->gc_staletime;
2670 t->neigh_vars[7].data = &p->queue_len;
2671 t->neigh_vars[8].data = &p->proxy_qlen;
2672 t->neigh_vars[9].data = &p->anycast_delay;
2673 t->neigh_vars[10].data = &p->proxy_delay;
2674 t->neigh_vars[11].data = &p->locktime;
2675 t->neigh_vars[12].data = &p->retrans_time;
2676 t->neigh_vars[13].data = &p->base_reachable_time;
2677
2678 if (dev) {
2679 dev_name_source = dev->name;
2680 t->neigh_dev[0].ctl_name = dev->ifindex;
2681 /* Terminate the table early */
2682 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2683 } else {
2684 dev_name_source = t->neigh_dev[0].procname;
2685 t->neigh_vars[14].data = (int *)(p + 1);
2686 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2687 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2688 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2689 }
2690
2691
2692 if (handler || strategy) {
2693 /* RetransTime */
2694 t->neigh_vars[3].proc_handler = handler;
2695 t->neigh_vars[3].strategy = strategy;
2696 t->neigh_vars[3].extra1 = dev;
2697 if (!strategy)
2698 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2699 /* ReachableTime */
2700 t->neigh_vars[4].proc_handler = handler;
2701 t->neigh_vars[4].strategy = strategy;
2702 t->neigh_vars[4].extra1 = dev;
2703 if (!strategy)
2704 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2705 /* RetransTime (in milliseconds)*/
2706 t->neigh_vars[12].proc_handler = handler;
2707 t->neigh_vars[12].strategy = strategy;
2708 t->neigh_vars[12].extra1 = dev;
2709 if (!strategy)
2710 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2711 /* ReachableTime (in milliseconds) */
2712 t->neigh_vars[13].proc_handler = handler;
2713 t->neigh_vars[13].strategy = strategy;
2714 t->neigh_vars[13].extra1 = dev;
2715 if (!strategy)
2716 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2717 }
2718
2719 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2720 if (!dev_name) {
2721 err = -ENOBUFS;
2722 goto free;
2723 }
2724
2725 t->neigh_dev[0].procname = dev_name;
2726
2727 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2728
2729 t->neigh_proto_dir[0].procname = p_name;
2730 t->neigh_proto_dir[0].ctl_name = p_id;
2731
2732 t->neigh_dev[0].child = t->neigh_vars;
2733 t->neigh_neigh_dir[0].child = t->neigh_dev;
2734 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2735 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2736
2737 t->sysctl_header = register_sysctl_table(t->neigh_root_dir);
2738 if (!t->sysctl_header) {
2739 err = -ENOBUFS;
2740 goto free_procname;
2741 }
2742 p->sysctl_table = t;
2743 return 0;
2744
2745 /* error path */
2746 free_procname:
2747 kfree(dev_name);
2748 free:
2749 kfree(t);
2750
2751 return err;
2752 }
2753
2754 void neigh_sysctl_unregister(struct neigh_parms *p)
2755 {
2756 if (p->sysctl_table) {
2757 struct neigh_sysctl_table *t = p->sysctl_table;
2758 p->sysctl_table = NULL;
2759 unregister_sysctl_table(t->sysctl_header);
2760 kfree(t->neigh_dev[0].procname);
2761 kfree(t);
2762 }
2763 }
2764
2765 #endif /* CONFIG_SYSCTL */
2766
2767 static int __init neigh_init(void)
2768 {
2769 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2770 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2771 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2772
2773 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2774 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2775
2776 return 0;
2777 }
2778
2779 subsys_initcall(neigh_init);
2780
2781 EXPORT_SYMBOL(__neigh_event_send);
2782 EXPORT_SYMBOL(neigh_changeaddr);
2783 EXPORT_SYMBOL(neigh_compat_output);
2784 EXPORT_SYMBOL(neigh_connected_output);
2785 EXPORT_SYMBOL(neigh_create);
2786 EXPORT_SYMBOL(neigh_destroy);
2787 EXPORT_SYMBOL(neigh_event_ns);
2788 EXPORT_SYMBOL(neigh_ifdown);
2789 EXPORT_SYMBOL(neigh_lookup);
2790 EXPORT_SYMBOL(neigh_lookup_nodev);
2791 EXPORT_SYMBOL(neigh_parms_alloc);
2792 EXPORT_SYMBOL(neigh_parms_release);
2793 EXPORT_SYMBOL(neigh_rand_reach_time);
2794 EXPORT_SYMBOL(neigh_resolve_output);
2795 EXPORT_SYMBOL(neigh_table_clear);
2796 EXPORT_SYMBOL(neigh_table_init);
2797 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2798 EXPORT_SYMBOL(neigh_update);
2799 EXPORT_SYMBOL(pneigh_enqueue);
2800 EXPORT_SYMBOL(pneigh_lookup);
2801
2802 #ifdef CONFIG_ARPD
2803 EXPORT_SYMBOL(neigh_app_ns);
2804 #endif
2805 #ifdef CONFIG_SYSCTL
2806 EXPORT_SYMBOL(neigh_sysctl_register);
2807 EXPORT_SYMBOL(neigh_sysctl_unregister);
2808 #endif
This page took 0.156949 seconds and 6 git commands to generate.