[NETNS]: Modify the neighbour table code so it handles multiple network namespaces
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK 0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
68
69 /*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100 static DEFINE_RWLOCK(neigh_tbl_lock);
101
102 static int neigh_blackhole(struct sk_buff *skb)
103 {
104 kfree_skb(skb);
105 return -ENETDOWN;
106 }
107
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 {
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
114 neigh_release(neigh);
115 }
116
117 /*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123 unsigned long neigh_rand_reach_time(unsigned long base)
124 {
125 return (base ? (net_random() % base) + (base >> 1) : 0);
126 }
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166 }
167
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
169 {
170 neigh_hold(n);
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
173 n->nud_state);
174 dump_stack();
175 }
176 }
177
178 static int neigh_del_timer(struct neighbour *n)
179 {
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
182 neigh_release(n);
183 return 1;
184 }
185 return 0;
186 }
187
188 static void pneigh_queue_purge(struct sk_buff_head *list)
189 {
190 struct sk_buff *skb;
191
192 while ((skb = skb_dequeue(list)) != NULL) {
193 dev_put(skb->dev);
194 kfree_skb(skb);
195 }
196 }
197
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
199 {
200 int i;
201
202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
204
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
207 np = &n->next;
208 continue;
209 }
210 *np = n->next;
211 write_lock(&n->lock);
212 neigh_del_timer(n);
213 n->dead = 1;
214
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
219
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
223 it to safe state.
224 */
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
229 else
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 }
233 write_unlock(&n->lock);
234 neigh_cleanup_and_release(n);
235 }
236 }
237 }
238
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240 {
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
244 }
245
246 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
247 {
248 write_lock_bh(&tbl->lock);
249 neigh_flush_dev(tbl, dev);
250 pneigh_ifdown(tbl, dev);
251 write_unlock_bh(&tbl->lock);
252
253 del_timer_sync(&tbl->proxy_timer);
254 pneigh_queue_purge(&tbl->proxy_queue);
255 return 0;
256 }
257
258 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
259 {
260 struct neighbour *n = NULL;
261 unsigned long now = jiffies;
262 int entries;
263
264 entries = atomic_inc_return(&tbl->entries) - 1;
265 if (entries >= tbl->gc_thresh3 ||
266 (entries >= tbl->gc_thresh2 &&
267 time_after(now, tbl->last_flush + 5 * HZ))) {
268 if (!neigh_forced_gc(tbl) &&
269 entries >= tbl->gc_thresh3)
270 goto out_entries;
271 }
272
273 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
274 if (!n)
275 goto out_entries;
276
277 skb_queue_head_init(&n->arp_queue);
278 rwlock_init(&n->lock);
279 n->updated = n->used = now;
280 n->nud_state = NUD_NONE;
281 n->output = neigh_blackhole;
282 n->parms = neigh_parms_clone(&tbl->parms);
283 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
284
285 NEIGH_CACHE_STAT_INC(tbl, allocs);
286 n->tbl = tbl;
287 atomic_set(&n->refcnt, 1);
288 n->dead = 1;
289 out:
290 return n;
291
292 out_entries:
293 atomic_dec(&tbl->entries);
294 goto out;
295 }
296
297 static struct neighbour **neigh_hash_alloc(unsigned int entries)
298 {
299 unsigned long size = entries * sizeof(struct neighbour *);
300 struct neighbour **ret;
301
302 if (size <= PAGE_SIZE) {
303 ret = kzalloc(size, GFP_ATOMIC);
304 } else {
305 ret = (struct neighbour **)
306 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
307 }
308 return ret;
309 }
310
311 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
312 {
313 unsigned long size = entries * sizeof(struct neighbour *);
314
315 if (size <= PAGE_SIZE)
316 kfree(hash);
317 else
318 free_pages((unsigned long)hash, get_order(size));
319 }
320
321 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
322 {
323 struct neighbour **new_hash, **old_hash;
324 unsigned int i, new_hash_mask, old_entries;
325
326 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
327
328 BUG_ON(!is_power_of_2(new_entries));
329 new_hash = neigh_hash_alloc(new_entries);
330 if (!new_hash)
331 return;
332
333 old_entries = tbl->hash_mask + 1;
334 new_hash_mask = new_entries - 1;
335 old_hash = tbl->hash_buckets;
336
337 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
338 for (i = 0; i < old_entries; i++) {
339 struct neighbour *n, *next;
340
341 for (n = old_hash[i]; n; n = next) {
342 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
343
344 hash_val &= new_hash_mask;
345 next = n->next;
346
347 n->next = new_hash[hash_val];
348 new_hash[hash_val] = n;
349 }
350 }
351 tbl->hash_buckets = new_hash;
352 tbl->hash_mask = new_hash_mask;
353
354 neigh_hash_free(old_hash, old_entries);
355 }
356
357 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
358 struct net_device *dev)
359 {
360 struct neighbour *n;
361 int key_len = tbl->key_len;
362 u32 hash_val = tbl->hash(pkey, dev);
363
364 NEIGH_CACHE_STAT_INC(tbl, lookups);
365
366 read_lock_bh(&tbl->lock);
367 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
368 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
369 neigh_hold(n);
370 NEIGH_CACHE_STAT_INC(tbl, hits);
371 break;
372 }
373 }
374 read_unlock_bh(&tbl->lock);
375 return n;
376 }
377
378 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
379 const void *pkey)
380 {
381 struct neighbour *n;
382 int key_len = tbl->key_len;
383 u32 hash_val = tbl->hash(pkey, NULL);
384
385 NEIGH_CACHE_STAT_INC(tbl, lookups);
386
387 read_lock_bh(&tbl->lock);
388 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
389 if (!memcmp(n->primary_key, pkey, key_len) &&
390 (net == n->dev->nd_net)) {
391 neigh_hold(n);
392 NEIGH_CACHE_STAT_INC(tbl, hits);
393 break;
394 }
395 }
396 read_unlock_bh(&tbl->lock);
397 return n;
398 }
399
400 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
401 struct net_device *dev)
402 {
403 u32 hash_val;
404 int key_len = tbl->key_len;
405 int error;
406 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
407
408 if (!n) {
409 rc = ERR_PTR(-ENOBUFS);
410 goto out;
411 }
412
413 memcpy(n->primary_key, pkey, key_len);
414 n->dev = dev;
415 dev_hold(dev);
416
417 /* Protocol specific setup. */
418 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
419 rc = ERR_PTR(error);
420 goto out_neigh_release;
421 }
422
423 /* Device specific setup. */
424 if (n->parms->neigh_setup &&
425 (error = n->parms->neigh_setup(n)) < 0) {
426 rc = ERR_PTR(error);
427 goto out_neigh_release;
428 }
429
430 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
431
432 write_lock_bh(&tbl->lock);
433
434 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
435 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
436
437 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
438
439 if (n->parms->dead) {
440 rc = ERR_PTR(-EINVAL);
441 goto out_tbl_unlock;
442 }
443
444 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
445 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
446 neigh_hold(n1);
447 rc = n1;
448 goto out_tbl_unlock;
449 }
450 }
451
452 n->next = tbl->hash_buckets[hash_val];
453 tbl->hash_buckets[hash_val] = n;
454 n->dead = 0;
455 neigh_hold(n);
456 write_unlock_bh(&tbl->lock);
457 NEIGH_PRINTK2("neigh %p is created.\n", n);
458 rc = n;
459 out:
460 return rc;
461 out_tbl_unlock:
462 write_unlock_bh(&tbl->lock);
463 out_neigh_release:
464 neigh_release(n);
465 goto out;
466 }
467
468 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
469 struct net *net, const void *pkey,
470 struct net_device *dev, int creat)
471 {
472 struct pneigh_entry *n;
473 int key_len = tbl->key_len;
474 u32 hash_val = *(u32 *)(pkey + key_len - 4);
475
476 hash_val ^= (hash_val >> 16);
477 hash_val ^= hash_val >> 8;
478 hash_val ^= hash_val >> 4;
479 hash_val &= PNEIGH_HASHMASK;
480
481 read_lock_bh(&tbl->lock);
482
483 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
484 if (!memcmp(n->key, pkey, key_len) &&
485 (n->net == net) &&
486 (n->dev == dev || !n->dev)) {
487 read_unlock_bh(&tbl->lock);
488 goto out;
489 }
490 }
491 read_unlock_bh(&tbl->lock);
492 n = NULL;
493 if (!creat)
494 goto out;
495
496 ASSERT_RTNL();
497
498 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
499 if (!n)
500 goto out;
501
502 n->net = hold_net(net);
503 memcpy(n->key, pkey, key_len);
504 n->dev = dev;
505 if (dev)
506 dev_hold(dev);
507
508 if (tbl->pconstructor && tbl->pconstructor(n)) {
509 if (dev)
510 dev_put(dev);
511 kfree(n);
512 n = NULL;
513 goto out;
514 }
515
516 write_lock_bh(&tbl->lock);
517 n->next = tbl->phash_buckets[hash_val];
518 tbl->phash_buckets[hash_val] = n;
519 write_unlock_bh(&tbl->lock);
520 out:
521 return n;
522 }
523
524
525 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
526 struct net_device *dev)
527 {
528 struct pneigh_entry *n, **np;
529 int key_len = tbl->key_len;
530 u32 hash_val = *(u32 *)(pkey + key_len - 4);
531
532 hash_val ^= (hash_val >> 16);
533 hash_val ^= hash_val >> 8;
534 hash_val ^= hash_val >> 4;
535 hash_val &= PNEIGH_HASHMASK;
536
537 write_lock_bh(&tbl->lock);
538 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
539 np = &n->next) {
540 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
541 (n->net == net)) {
542 *np = n->next;
543 write_unlock_bh(&tbl->lock);
544 if (tbl->pdestructor)
545 tbl->pdestructor(n);
546 if (n->dev)
547 dev_put(n->dev);
548 release_net(n->net);
549 kfree(n);
550 return 0;
551 }
552 }
553 write_unlock_bh(&tbl->lock);
554 return -ENOENT;
555 }
556
557 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
558 {
559 struct pneigh_entry *n, **np;
560 u32 h;
561
562 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
563 np = &tbl->phash_buckets[h];
564 while ((n = *np) != NULL) {
565 if (!dev || n->dev == dev) {
566 *np = n->next;
567 if (tbl->pdestructor)
568 tbl->pdestructor(n);
569 if (n->dev)
570 dev_put(n->dev);
571 release_net(n->net);
572 kfree(n);
573 continue;
574 }
575 np = &n->next;
576 }
577 }
578 return -ENOENT;
579 }
580
581
582 /*
583 * neighbour must already be out of the table;
584 *
585 */
586 void neigh_destroy(struct neighbour *neigh)
587 {
588 struct hh_cache *hh;
589
590 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
591
592 if (!neigh->dead) {
593 printk(KERN_WARNING
594 "Destroying alive neighbour %p\n", neigh);
595 dump_stack();
596 return;
597 }
598
599 if (neigh_del_timer(neigh))
600 printk(KERN_WARNING "Impossible event.\n");
601
602 while ((hh = neigh->hh) != NULL) {
603 neigh->hh = hh->hh_next;
604 hh->hh_next = NULL;
605
606 write_seqlock_bh(&hh->hh_lock);
607 hh->hh_output = neigh_blackhole;
608 write_sequnlock_bh(&hh->hh_lock);
609 if (atomic_dec_and_test(&hh->hh_refcnt))
610 kfree(hh);
611 }
612
613 skb_queue_purge(&neigh->arp_queue);
614
615 dev_put(neigh->dev);
616 neigh_parms_put(neigh->parms);
617
618 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
619
620 atomic_dec(&neigh->tbl->entries);
621 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
622 }
623
624 /* Neighbour state is suspicious;
625 disable fast path.
626
627 Called with write_locked neigh.
628 */
629 static void neigh_suspect(struct neighbour *neigh)
630 {
631 struct hh_cache *hh;
632
633 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
634
635 neigh->output = neigh->ops->output;
636
637 for (hh = neigh->hh; hh; hh = hh->hh_next)
638 hh->hh_output = neigh->ops->output;
639 }
640
641 /* Neighbour state is OK;
642 enable fast path.
643
644 Called with write_locked neigh.
645 */
646 static void neigh_connect(struct neighbour *neigh)
647 {
648 struct hh_cache *hh;
649
650 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
651
652 neigh->output = neigh->ops->connected_output;
653
654 for (hh = neigh->hh; hh; hh = hh->hh_next)
655 hh->hh_output = neigh->ops->hh_output;
656 }
657
658 static void neigh_periodic_timer(unsigned long arg)
659 {
660 struct neigh_table *tbl = (struct neigh_table *)arg;
661 struct neighbour *n, **np;
662 unsigned long expire, now = jiffies;
663
664 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
665
666 write_lock(&tbl->lock);
667
668 /*
669 * periodically recompute ReachableTime from random function
670 */
671
672 if (time_after(now, tbl->last_rand + 300 * HZ)) {
673 struct neigh_parms *p;
674 tbl->last_rand = now;
675 for (p = &tbl->parms; p; p = p->next)
676 p->reachable_time =
677 neigh_rand_reach_time(p->base_reachable_time);
678 }
679
680 np = &tbl->hash_buckets[tbl->hash_chain_gc];
681 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
682
683 while ((n = *np) != NULL) {
684 unsigned int state;
685
686 write_lock(&n->lock);
687
688 state = n->nud_state;
689 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
690 write_unlock(&n->lock);
691 goto next_elt;
692 }
693
694 if (time_before(n->used, n->confirmed))
695 n->used = n->confirmed;
696
697 if (atomic_read(&n->refcnt) == 1 &&
698 (state == NUD_FAILED ||
699 time_after(now, n->used + n->parms->gc_staletime))) {
700 *np = n->next;
701 n->dead = 1;
702 write_unlock(&n->lock);
703 neigh_cleanup_and_release(n);
704 continue;
705 }
706 write_unlock(&n->lock);
707
708 next_elt:
709 np = &n->next;
710 }
711
712 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
713 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
714 * base_reachable_time.
715 */
716 expire = tbl->parms.base_reachable_time >> 1;
717 expire /= (tbl->hash_mask + 1);
718 if (!expire)
719 expire = 1;
720
721 if (expire>HZ)
722 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
723 else
724 mod_timer(&tbl->gc_timer, now + expire);
725
726 write_unlock(&tbl->lock);
727 }
728
729 static __inline__ int neigh_max_probes(struct neighbour *n)
730 {
731 struct neigh_parms *p = n->parms;
732 return (n->nud_state & NUD_PROBE ?
733 p->ucast_probes :
734 p->ucast_probes + p->app_probes + p->mcast_probes);
735 }
736
737 /* Called when a timer expires for a neighbour entry. */
738
739 static void neigh_timer_handler(unsigned long arg)
740 {
741 unsigned long now, next;
742 struct neighbour *neigh = (struct neighbour *)arg;
743 unsigned state;
744 int notify = 0;
745
746 write_lock(&neigh->lock);
747
748 state = neigh->nud_state;
749 now = jiffies;
750 next = now + HZ;
751
752 if (!(state & NUD_IN_TIMER)) {
753 #ifndef CONFIG_SMP
754 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
755 #endif
756 goto out;
757 }
758
759 if (state & NUD_REACHABLE) {
760 if (time_before_eq(now,
761 neigh->confirmed + neigh->parms->reachable_time)) {
762 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
763 next = neigh->confirmed + neigh->parms->reachable_time;
764 } else if (time_before_eq(now,
765 neigh->used + neigh->parms->delay_probe_time)) {
766 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
767 neigh->nud_state = NUD_DELAY;
768 neigh->updated = jiffies;
769 neigh_suspect(neigh);
770 next = now + neigh->parms->delay_probe_time;
771 } else {
772 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
773 neigh->nud_state = NUD_STALE;
774 neigh->updated = jiffies;
775 neigh_suspect(neigh);
776 notify = 1;
777 }
778 } else if (state & NUD_DELAY) {
779 if (time_before_eq(now,
780 neigh->confirmed + neigh->parms->delay_probe_time)) {
781 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
782 neigh->nud_state = NUD_REACHABLE;
783 neigh->updated = jiffies;
784 neigh_connect(neigh);
785 notify = 1;
786 next = neigh->confirmed + neigh->parms->reachable_time;
787 } else {
788 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
789 neigh->nud_state = NUD_PROBE;
790 neigh->updated = jiffies;
791 atomic_set(&neigh->probes, 0);
792 next = now + neigh->parms->retrans_time;
793 }
794 } else {
795 /* NUD_PROBE|NUD_INCOMPLETE */
796 next = now + neigh->parms->retrans_time;
797 }
798
799 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
800 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
801 struct sk_buff *skb;
802
803 neigh->nud_state = NUD_FAILED;
804 neigh->updated = jiffies;
805 notify = 1;
806 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
807 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
808
809 /* It is very thin place. report_unreachable is very complicated
810 routine. Particularly, it can hit the same neighbour entry!
811
812 So that, we try to be accurate and avoid dead loop. --ANK
813 */
814 while (neigh->nud_state == NUD_FAILED &&
815 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
816 write_unlock(&neigh->lock);
817 neigh->ops->error_report(neigh, skb);
818 write_lock(&neigh->lock);
819 }
820 skb_queue_purge(&neigh->arp_queue);
821 }
822
823 if (neigh->nud_state & NUD_IN_TIMER) {
824 if (time_before(next, jiffies + HZ/2))
825 next = jiffies + HZ/2;
826 if (!mod_timer(&neigh->timer, next))
827 neigh_hold(neigh);
828 }
829 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
830 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
831 /* keep skb alive even if arp_queue overflows */
832 if (skb)
833 skb_get(skb);
834 write_unlock(&neigh->lock);
835 neigh->ops->solicit(neigh, skb);
836 atomic_inc(&neigh->probes);
837 if (skb)
838 kfree_skb(skb);
839 } else {
840 out:
841 write_unlock(&neigh->lock);
842 }
843
844 if (notify)
845 neigh_update_notify(neigh);
846
847 neigh_release(neigh);
848 }
849
850 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
851 {
852 int rc;
853 unsigned long now;
854
855 write_lock_bh(&neigh->lock);
856
857 rc = 0;
858 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
859 goto out_unlock_bh;
860
861 now = jiffies;
862
863 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
864 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
865 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
866 neigh->nud_state = NUD_INCOMPLETE;
867 neigh->updated = jiffies;
868 neigh_add_timer(neigh, now + 1);
869 } else {
870 neigh->nud_state = NUD_FAILED;
871 neigh->updated = jiffies;
872 write_unlock_bh(&neigh->lock);
873
874 if (skb)
875 kfree_skb(skb);
876 return 1;
877 }
878 } else if (neigh->nud_state & NUD_STALE) {
879 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
880 neigh->nud_state = NUD_DELAY;
881 neigh->updated = jiffies;
882 neigh_add_timer(neigh,
883 jiffies + neigh->parms->delay_probe_time);
884 }
885
886 if (neigh->nud_state == NUD_INCOMPLETE) {
887 if (skb) {
888 if (skb_queue_len(&neigh->arp_queue) >=
889 neigh->parms->queue_len) {
890 struct sk_buff *buff;
891 buff = neigh->arp_queue.next;
892 __skb_unlink(buff, &neigh->arp_queue);
893 kfree_skb(buff);
894 }
895 __skb_queue_tail(&neigh->arp_queue, skb);
896 }
897 rc = 1;
898 }
899 out_unlock_bh:
900 write_unlock_bh(&neigh->lock);
901 return rc;
902 }
903
904 static void neigh_update_hhs(struct neighbour *neigh)
905 {
906 struct hh_cache *hh;
907 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
908 = neigh->dev->header_ops->cache_update;
909
910 if (update) {
911 for (hh = neigh->hh; hh; hh = hh->hh_next) {
912 write_seqlock_bh(&hh->hh_lock);
913 update(hh, neigh->dev, neigh->ha);
914 write_sequnlock_bh(&hh->hh_lock);
915 }
916 }
917 }
918
919
920
921 /* Generic update routine.
922 -- lladdr is new lladdr or NULL, if it is not supplied.
923 -- new is new state.
924 -- flags
925 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
926 if it is different.
927 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
928 lladdr instead of overriding it
929 if it is different.
930 It also allows to retain current state
931 if lladdr is unchanged.
932 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
933
934 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
935 NTF_ROUTER flag.
936 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
937 a router.
938
939 Caller MUST hold reference count on the entry.
940 */
941
942 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
943 u32 flags)
944 {
945 u8 old;
946 int err;
947 int notify = 0;
948 struct net_device *dev;
949 int update_isrouter = 0;
950
951 write_lock_bh(&neigh->lock);
952
953 dev = neigh->dev;
954 old = neigh->nud_state;
955 err = -EPERM;
956
957 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
958 (old & (NUD_NOARP | NUD_PERMANENT)))
959 goto out;
960
961 if (!(new & NUD_VALID)) {
962 neigh_del_timer(neigh);
963 if (old & NUD_CONNECTED)
964 neigh_suspect(neigh);
965 neigh->nud_state = new;
966 err = 0;
967 notify = old & NUD_VALID;
968 goto out;
969 }
970
971 /* Compare new lladdr with cached one */
972 if (!dev->addr_len) {
973 /* First case: device needs no address. */
974 lladdr = neigh->ha;
975 } else if (lladdr) {
976 /* The second case: if something is already cached
977 and a new address is proposed:
978 - compare new & old
979 - if they are different, check override flag
980 */
981 if ((old & NUD_VALID) &&
982 !memcmp(lladdr, neigh->ha, dev->addr_len))
983 lladdr = neigh->ha;
984 } else {
985 /* No address is supplied; if we know something,
986 use it, otherwise discard the request.
987 */
988 err = -EINVAL;
989 if (!(old & NUD_VALID))
990 goto out;
991 lladdr = neigh->ha;
992 }
993
994 if (new & NUD_CONNECTED)
995 neigh->confirmed = jiffies;
996 neigh->updated = jiffies;
997
998 /* If entry was valid and address is not changed,
999 do not change entry state, if new one is STALE.
1000 */
1001 err = 0;
1002 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1003 if (old & NUD_VALID) {
1004 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1005 update_isrouter = 0;
1006 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1007 (old & NUD_CONNECTED)) {
1008 lladdr = neigh->ha;
1009 new = NUD_STALE;
1010 } else
1011 goto out;
1012 } else {
1013 if (lladdr == neigh->ha && new == NUD_STALE &&
1014 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1015 (old & NUD_CONNECTED))
1016 )
1017 new = old;
1018 }
1019 }
1020
1021 if (new != old) {
1022 neigh_del_timer(neigh);
1023 if (new & NUD_IN_TIMER)
1024 neigh_add_timer(neigh, (jiffies +
1025 ((new & NUD_REACHABLE) ?
1026 neigh->parms->reachable_time :
1027 0)));
1028 neigh->nud_state = new;
1029 }
1030
1031 if (lladdr != neigh->ha) {
1032 memcpy(&neigh->ha, lladdr, dev->addr_len);
1033 neigh_update_hhs(neigh);
1034 if (!(new & NUD_CONNECTED))
1035 neigh->confirmed = jiffies -
1036 (neigh->parms->base_reachable_time << 1);
1037 notify = 1;
1038 }
1039 if (new == old)
1040 goto out;
1041 if (new & NUD_CONNECTED)
1042 neigh_connect(neigh);
1043 else
1044 neigh_suspect(neigh);
1045 if (!(old & NUD_VALID)) {
1046 struct sk_buff *skb;
1047
1048 /* Again: avoid dead loop if something went wrong */
1049
1050 while (neigh->nud_state & NUD_VALID &&
1051 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1052 struct neighbour *n1 = neigh;
1053 write_unlock_bh(&neigh->lock);
1054 /* On shaper/eql skb->dst->neighbour != neigh :( */
1055 if (skb->dst && skb->dst->neighbour)
1056 n1 = skb->dst->neighbour;
1057 n1->output(skb);
1058 write_lock_bh(&neigh->lock);
1059 }
1060 skb_queue_purge(&neigh->arp_queue);
1061 }
1062 out:
1063 if (update_isrouter) {
1064 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1065 (neigh->flags | NTF_ROUTER) :
1066 (neigh->flags & ~NTF_ROUTER);
1067 }
1068 write_unlock_bh(&neigh->lock);
1069
1070 if (notify)
1071 neigh_update_notify(neigh);
1072
1073 return err;
1074 }
1075
1076 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1077 u8 *lladdr, void *saddr,
1078 struct net_device *dev)
1079 {
1080 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1081 lladdr || !dev->addr_len);
1082 if (neigh)
1083 neigh_update(neigh, lladdr, NUD_STALE,
1084 NEIGH_UPDATE_F_OVERRIDE);
1085 return neigh;
1086 }
1087
1088 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1089 __be16 protocol)
1090 {
1091 struct hh_cache *hh;
1092 struct net_device *dev = dst->dev;
1093
1094 for (hh = n->hh; hh; hh = hh->hh_next)
1095 if (hh->hh_type == protocol)
1096 break;
1097
1098 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1099 seqlock_init(&hh->hh_lock);
1100 hh->hh_type = protocol;
1101 atomic_set(&hh->hh_refcnt, 0);
1102 hh->hh_next = NULL;
1103
1104 if (dev->header_ops->cache(n, hh)) {
1105 kfree(hh);
1106 hh = NULL;
1107 } else {
1108 atomic_inc(&hh->hh_refcnt);
1109 hh->hh_next = n->hh;
1110 n->hh = hh;
1111 if (n->nud_state & NUD_CONNECTED)
1112 hh->hh_output = n->ops->hh_output;
1113 else
1114 hh->hh_output = n->ops->output;
1115 }
1116 }
1117 if (hh) {
1118 atomic_inc(&hh->hh_refcnt);
1119 dst->hh = hh;
1120 }
1121 }
1122
1123 /* This function can be used in contexts, where only old dev_queue_xmit
1124 worked, f.e. if you want to override normal output path (eql, shaper),
1125 but resolution is not made yet.
1126 */
1127
1128 int neigh_compat_output(struct sk_buff *skb)
1129 {
1130 struct net_device *dev = skb->dev;
1131
1132 __skb_pull(skb, skb_network_offset(skb));
1133
1134 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1135 skb->len) < 0 &&
1136 dev->header_ops->rebuild(skb))
1137 return 0;
1138
1139 return dev_queue_xmit(skb);
1140 }
1141
1142 /* Slow and careful. */
1143
1144 int neigh_resolve_output(struct sk_buff *skb)
1145 {
1146 struct dst_entry *dst = skb->dst;
1147 struct neighbour *neigh;
1148 int rc = 0;
1149
1150 if (!dst || !(neigh = dst->neighbour))
1151 goto discard;
1152
1153 __skb_pull(skb, skb_network_offset(skb));
1154
1155 if (!neigh_event_send(neigh, skb)) {
1156 int err;
1157 struct net_device *dev = neigh->dev;
1158 if (dev->header_ops->cache && !dst->hh) {
1159 write_lock_bh(&neigh->lock);
1160 if (!dst->hh)
1161 neigh_hh_init(neigh, dst, dst->ops->protocol);
1162 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1163 neigh->ha, NULL, skb->len);
1164 write_unlock_bh(&neigh->lock);
1165 } else {
1166 read_lock_bh(&neigh->lock);
1167 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1168 neigh->ha, NULL, skb->len);
1169 read_unlock_bh(&neigh->lock);
1170 }
1171 if (err >= 0)
1172 rc = neigh->ops->queue_xmit(skb);
1173 else
1174 goto out_kfree_skb;
1175 }
1176 out:
1177 return rc;
1178 discard:
1179 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1180 dst, dst ? dst->neighbour : NULL);
1181 out_kfree_skb:
1182 rc = -EINVAL;
1183 kfree_skb(skb);
1184 goto out;
1185 }
1186
1187 /* As fast as possible without hh cache */
1188
1189 int neigh_connected_output(struct sk_buff *skb)
1190 {
1191 int err;
1192 struct dst_entry *dst = skb->dst;
1193 struct neighbour *neigh = dst->neighbour;
1194 struct net_device *dev = neigh->dev;
1195
1196 __skb_pull(skb, skb_network_offset(skb));
1197
1198 read_lock_bh(&neigh->lock);
1199 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1200 neigh->ha, NULL, skb->len);
1201 read_unlock_bh(&neigh->lock);
1202 if (err >= 0)
1203 err = neigh->ops->queue_xmit(skb);
1204 else {
1205 err = -EINVAL;
1206 kfree_skb(skb);
1207 }
1208 return err;
1209 }
1210
1211 static void neigh_proxy_process(unsigned long arg)
1212 {
1213 struct neigh_table *tbl = (struct neigh_table *)arg;
1214 long sched_next = 0;
1215 unsigned long now = jiffies;
1216 struct sk_buff *skb;
1217
1218 spin_lock(&tbl->proxy_queue.lock);
1219
1220 skb = tbl->proxy_queue.next;
1221
1222 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1223 struct sk_buff *back = skb;
1224 long tdif = NEIGH_CB(back)->sched_next - now;
1225
1226 skb = skb->next;
1227 if (tdif <= 0) {
1228 struct net_device *dev = back->dev;
1229 __skb_unlink(back, &tbl->proxy_queue);
1230 if (tbl->proxy_redo && netif_running(dev))
1231 tbl->proxy_redo(back);
1232 else
1233 kfree_skb(back);
1234
1235 dev_put(dev);
1236 } else if (!sched_next || tdif < sched_next)
1237 sched_next = tdif;
1238 }
1239 del_timer(&tbl->proxy_timer);
1240 if (sched_next)
1241 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1242 spin_unlock(&tbl->proxy_queue.lock);
1243 }
1244
1245 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1246 struct sk_buff *skb)
1247 {
1248 unsigned long now = jiffies;
1249 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1250
1251 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1252 kfree_skb(skb);
1253 return;
1254 }
1255
1256 NEIGH_CB(skb)->sched_next = sched_next;
1257 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1258
1259 spin_lock(&tbl->proxy_queue.lock);
1260 if (del_timer(&tbl->proxy_timer)) {
1261 if (time_before(tbl->proxy_timer.expires, sched_next))
1262 sched_next = tbl->proxy_timer.expires;
1263 }
1264 dst_release(skb->dst);
1265 skb->dst = NULL;
1266 dev_hold(skb->dev);
1267 __skb_queue_tail(&tbl->proxy_queue, skb);
1268 mod_timer(&tbl->proxy_timer, sched_next);
1269 spin_unlock(&tbl->proxy_queue.lock);
1270 }
1271
1272 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1273 struct net *net, int ifindex)
1274 {
1275 struct neigh_parms *p;
1276
1277 for (p = &tbl->parms; p; p = p->next) {
1278 if (p->net != net)
1279 continue;
1280 if ((p->dev && p->dev->ifindex == ifindex) ||
1281 (!p->dev && !ifindex))
1282 return p;
1283 }
1284
1285 return NULL;
1286 }
1287
1288 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1289 struct neigh_table *tbl)
1290 {
1291 struct neigh_parms *p, *ref;
1292 struct net *net;
1293
1294 net = &init_net;
1295 if (dev)
1296 net = dev->nd_net;
1297
1298 ref = lookup_neigh_params(tbl, net, 0);
1299 if (!ref)
1300 return NULL;
1301
1302 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1303 if (p) {
1304 p->tbl = tbl;
1305 atomic_set(&p->refcnt, 1);
1306 INIT_RCU_HEAD(&p->rcu_head);
1307 p->reachable_time =
1308 neigh_rand_reach_time(p->base_reachable_time);
1309 if (dev) {
1310 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1311 kfree(p);
1312 return NULL;
1313 }
1314
1315 dev_hold(dev);
1316 p->dev = dev;
1317 }
1318 p->net = hold_net(net);
1319 p->sysctl_table = NULL;
1320 write_lock_bh(&tbl->lock);
1321 p->next = tbl->parms.next;
1322 tbl->parms.next = p;
1323 write_unlock_bh(&tbl->lock);
1324 }
1325 return p;
1326 }
1327
1328 static void neigh_rcu_free_parms(struct rcu_head *head)
1329 {
1330 struct neigh_parms *parms =
1331 container_of(head, struct neigh_parms, rcu_head);
1332
1333 neigh_parms_put(parms);
1334 }
1335
1336 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1337 {
1338 struct neigh_parms **p;
1339
1340 if (!parms || parms == &tbl->parms)
1341 return;
1342 write_lock_bh(&tbl->lock);
1343 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1344 if (*p == parms) {
1345 *p = parms->next;
1346 parms->dead = 1;
1347 write_unlock_bh(&tbl->lock);
1348 if (parms->dev)
1349 dev_put(parms->dev);
1350 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1351 return;
1352 }
1353 }
1354 write_unlock_bh(&tbl->lock);
1355 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1356 }
1357
1358 void neigh_parms_destroy(struct neigh_parms *parms)
1359 {
1360 release_net(parms->net);
1361 kfree(parms);
1362 }
1363
1364 static struct lock_class_key neigh_table_proxy_queue_class;
1365
1366 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1367 {
1368 unsigned long now = jiffies;
1369 unsigned long phsize;
1370
1371 tbl->parms.net = &init_net;
1372 atomic_set(&tbl->parms.refcnt, 1);
1373 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1374 tbl->parms.reachable_time =
1375 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1376
1377 if (!tbl->kmem_cachep)
1378 tbl->kmem_cachep =
1379 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1380 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1381 NULL);
1382 tbl->stats = alloc_percpu(struct neigh_statistics);
1383 if (!tbl->stats)
1384 panic("cannot create neighbour cache statistics");
1385
1386 #ifdef CONFIG_PROC_FS
1387 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
1388 if (!tbl->pde)
1389 panic("cannot create neighbour proc dir entry");
1390 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1391 tbl->pde->data = tbl;
1392 #endif
1393
1394 tbl->hash_mask = 1;
1395 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1396
1397 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1398 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1399
1400 if (!tbl->hash_buckets || !tbl->phash_buckets)
1401 panic("cannot allocate neighbour cache hashes");
1402
1403 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1404
1405 rwlock_init(&tbl->lock);
1406 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1407 tbl->gc_timer.expires = now + 1;
1408 add_timer(&tbl->gc_timer);
1409
1410 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1411 skb_queue_head_init_class(&tbl->proxy_queue,
1412 &neigh_table_proxy_queue_class);
1413
1414 tbl->last_flush = now;
1415 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1416 }
1417
1418 void neigh_table_init(struct neigh_table *tbl)
1419 {
1420 struct neigh_table *tmp;
1421
1422 neigh_table_init_no_netlink(tbl);
1423 write_lock(&neigh_tbl_lock);
1424 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1425 if (tmp->family == tbl->family)
1426 break;
1427 }
1428 tbl->next = neigh_tables;
1429 neigh_tables = tbl;
1430 write_unlock(&neigh_tbl_lock);
1431
1432 if (unlikely(tmp)) {
1433 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1434 "family %d\n", tbl->family);
1435 dump_stack();
1436 }
1437 }
1438
1439 int neigh_table_clear(struct neigh_table *tbl)
1440 {
1441 struct neigh_table **tp;
1442
1443 /* It is not clean... Fix it to unload IPv6 module safely */
1444 del_timer_sync(&tbl->gc_timer);
1445 del_timer_sync(&tbl->proxy_timer);
1446 pneigh_queue_purge(&tbl->proxy_queue);
1447 neigh_ifdown(tbl, NULL);
1448 if (atomic_read(&tbl->entries))
1449 printk(KERN_CRIT "neighbour leakage\n");
1450 write_lock(&neigh_tbl_lock);
1451 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1452 if (*tp == tbl) {
1453 *tp = tbl->next;
1454 break;
1455 }
1456 }
1457 write_unlock(&neigh_tbl_lock);
1458
1459 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1460 tbl->hash_buckets = NULL;
1461
1462 kfree(tbl->phash_buckets);
1463 tbl->phash_buckets = NULL;
1464
1465 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1466
1467 free_percpu(tbl->stats);
1468 tbl->stats = NULL;
1469
1470 kmem_cache_destroy(tbl->kmem_cachep);
1471 tbl->kmem_cachep = NULL;
1472
1473 return 0;
1474 }
1475
1476 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1477 {
1478 struct net *net = skb->sk->sk_net;
1479 struct ndmsg *ndm;
1480 struct nlattr *dst_attr;
1481 struct neigh_table *tbl;
1482 struct net_device *dev = NULL;
1483 int err = -EINVAL;
1484
1485 if (nlmsg_len(nlh) < sizeof(*ndm))
1486 goto out;
1487
1488 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1489 if (dst_attr == NULL)
1490 goto out;
1491
1492 ndm = nlmsg_data(nlh);
1493 if (ndm->ndm_ifindex) {
1494 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1495 if (dev == NULL) {
1496 err = -ENODEV;
1497 goto out;
1498 }
1499 }
1500
1501 read_lock(&neigh_tbl_lock);
1502 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1503 struct neighbour *neigh;
1504
1505 if (tbl->family != ndm->ndm_family)
1506 continue;
1507 read_unlock(&neigh_tbl_lock);
1508
1509 if (nla_len(dst_attr) < tbl->key_len)
1510 goto out_dev_put;
1511
1512 if (ndm->ndm_flags & NTF_PROXY) {
1513 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1514 goto out_dev_put;
1515 }
1516
1517 if (dev == NULL)
1518 goto out_dev_put;
1519
1520 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1521 if (neigh == NULL) {
1522 err = -ENOENT;
1523 goto out_dev_put;
1524 }
1525
1526 err = neigh_update(neigh, NULL, NUD_FAILED,
1527 NEIGH_UPDATE_F_OVERRIDE |
1528 NEIGH_UPDATE_F_ADMIN);
1529 neigh_release(neigh);
1530 goto out_dev_put;
1531 }
1532 read_unlock(&neigh_tbl_lock);
1533 err = -EAFNOSUPPORT;
1534
1535 out_dev_put:
1536 if (dev)
1537 dev_put(dev);
1538 out:
1539 return err;
1540 }
1541
1542 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1543 {
1544 struct net *net = skb->sk->sk_net;
1545 struct ndmsg *ndm;
1546 struct nlattr *tb[NDA_MAX+1];
1547 struct neigh_table *tbl;
1548 struct net_device *dev = NULL;
1549 int err;
1550
1551 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1552 if (err < 0)
1553 goto out;
1554
1555 err = -EINVAL;
1556 if (tb[NDA_DST] == NULL)
1557 goto out;
1558
1559 ndm = nlmsg_data(nlh);
1560 if (ndm->ndm_ifindex) {
1561 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1562 if (dev == NULL) {
1563 err = -ENODEV;
1564 goto out;
1565 }
1566
1567 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1568 goto out_dev_put;
1569 }
1570
1571 read_lock(&neigh_tbl_lock);
1572 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1573 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1574 struct neighbour *neigh;
1575 void *dst, *lladdr;
1576
1577 if (tbl->family != ndm->ndm_family)
1578 continue;
1579 read_unlock(&neigh_tbl_lock);
1580
1581 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1582 goto out_dev_put;
1583 dst = nla_data(tb[NDA_DST]);
1584 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1585
1586 if (ndm->ndm_flags & NTF_PROXY) {
1587 struct pneigh_entry *pn;
1588
1589 err = -ENOBUFS;
1590 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1591 if (pn) {
1592 pn->flags = ndm->ndm_flags;
1593 err = 0;
1594 }
1595 goto out_dev_put;
1596 }
1597
1598 if (dev == NULL)
1599 goto out_dev_put;
1600
1601 neigh = neigh_lookup(tbl, dst, dev);
1602 if (neigh == NULL) {
1603 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1604 err = -ENOENT;
1605 goto out_dev_put;
1606 }
1607
1608 neigh = __neigh_lookup_errno(tbl, dst, dev);
1609 if (IS_ERR(neigh)) {
1610 err = PTR_ERR(neigh);
1611 goto out_dev_put;
1612 }
1613 } else {
1614 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1615 err = -EEXIST;
1616 neigh_release(neigh);
1617 goto out_dev_put;
1618 }
1619
1620 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1621 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1622 }
1623
1624 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1625 neigh_release(neigh);
1626 goto out_dev_put;
1627 }
1628
1629 read_unlock(&neigh_tbl_lock);
1630 err = -EAFNOSUPPORT;
1631
1632 out_dev_put:
1633 if (dev)
1634 dev_put(dev);
1635 out:
1636 return err;
1637 }
1638
1639 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1640 {
1641 struct nlattr *nest;
1642
1643 nest = nla_nest_start(skb, NDTA_PARMS);
1644 if (nest == NULL)
1645 return -ENOBUFS;
1646
1647 if (parms->dev)
1648 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1649
1650 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1651 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1652 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1653 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1654 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1655 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1656 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1657 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1658 parms->base_reachable_time);
1659 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1660 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1661 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1662 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1663 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1664 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1665
1666 return nla_nest_end(skb, nest);
1667
1668 nla_put_failure:
1669 return nla_nest_cancel(skb, nest);
1670 }
1671
1672 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1673 u32 pid, u32 seq, int type, int flags)
1674 {
1675 struct nlmsghdr *nlh;
1676 struct ndtmsg *ndtmsg;
1677
1678 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1679 if (nlh == NULL)
1680 return -EMSGSIZE;
1681
1682 ndtmsg = nlmsg_data(nlh);
1683
1684 read_lock_bh(&tbl->lock);
1685 ndtmsg->ndtm_family = tbl->family;
1686 ndtmsg->ndtm_pad1 = 0;
1687 ndtmsg->ndtm_pad2 = 0;
1688
1689 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1690 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1691 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1692 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1693 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1694
1695 {
1696 unsigned long now = jiffies;
1697 unsigned int flush_delta = now - tbl->last_flush;
1698 unsigned int rand_delta = now - tbl->last_rand;
1699
1700 struct ndt_config ndc = {
1701 .ndtc_key_len = tbl->key_len,
1702 .ndtc_entry_size = tbl->entry_size,
1703 .ndtc_entries = atomic_read(&tbl->entries),
1704 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1705 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1706 .ndtc_hash_rnd = tbl->hash_rnd,
1707 .ndtc_hash_mask = tbl->hash_mask,
1708 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1709 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1710 };
1711
1712 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1713 }
1714
1715 {
1716 int cpu;
1717 struct ndt_stats ndst;
1718
1719 memset(&ndst, 0, sizeof(ndst));
1720
1721 for_each_possible_cpu(cpu) {
1722 struct neigh_statistics *st;
1723
1724 st = per_cpu_ptr(tbl->stats, cpu);
1725 ndst.ndts_allocs += st->allocs;
1726 ndst.ndts_destroys += st->destroys;
1727 ndst.ndts_hash_grows += st->hash_grows;
1728 ndst.ndts_res_failed += st->res_failed;
1729 ndst.ndts_lookups += st->lookups;
1730 ndst.ndts_hits += st->hits;
1731 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1732 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1733 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1734 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1735 }
1736
1737 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1738 }
1739
1740 BUG_ON(tbl->parms.dev);
1741 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1742 goto nla_put_failure;
1743
1744 read_unlock_bh(&tbl->lock);
1745 return nlmsg_end(skb, nlh);
1746
1747 nla_put_failure:
1748 read_unlock_bh(&tbl->lock);
1749 nlmsg_cancel(skb, nlh);
1750 return -EMSGSIZE;
1751 }
1752
1753 static int neightbl_fill_param_info(struct sk_buff *skb,
1754 struct neigh_table *tbl,
1755 struct neigh_parms *parms,
1756 u32 pid, u32 seq, int type,
1757 unsigned int flags)
1758 {
1759 struct ndtmsg *ndtmsg;
1760 struct nlmsghdr *nlh;
1761
1762 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1763 if (nlh == NULL)
1764 return -EMSGSIZE;
1765
1766 ndtmsg = nlmsg_data(nlh);
1767
1768 read_lock_bh(&tbl->lock);
1769 ndtmsg->ndtm_family = tbl->family;
1770 ndtmsg->ndtm_pad1 = 0;
1771 ndtmsg->ndtm_pad2 = 0;
1772
1773 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1774 neightbl_fill_parms(skb, parms) < 0)
1775 goto errout;
1776
1777 read_unlock_bh(&tbl->lock);
1778 return nlmsg_end(skb, nlh);
1779 errout:
1780 read_unlock_bh(&tbl->lock);
1781 nlmsg_cancel(skb, nlh);
1782 return -EMSGSIZE;
1783 }
1784
1785 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1786 [NDTA_NAME] = { .type = NLA_STRING },
1787 [NDTA_THRESH1] = { .type = NLA_U32 },
1788 [NDTA_THRESH2] = { .type = NLA_U32 },
1789 [NDTA_THRESH3] = { .type = NLA_U32 },
1790 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1791 [NDTA_PARMS] = { .type = NLA_NESTED },
1792 };
1793
1794 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1795 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1796 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1797 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1798 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1799 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1800 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1801 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1802 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1803 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1804 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1805 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1806 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1807 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1808 };
1809
1810 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1811 {
1812 struct net *net = skb->sk->sk_net;
1813 struct neigh_table *tbl;
1814 struct ndtmsg *ndtmsg;
1815 struct nlattr *tb[NDTA_MAX+1];
1816 int err;
1817
1818 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1819 nl_neightbl_policy);
1820 if (err < 0)
1821 goto errout;
1822
1823 if (tb[NDTA_NAME] == NULL) {
1824 err = -EINVAL;
1825 goto errout;
1826 }
1827
1828 ndtmsg = nlmsg_data(nlh);
1829 read_lock(&neigh_tbl_lock);
1830 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1831 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1832 continue;
1833
1834 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1835 break;
1836 }
1837
1838 if (tbl == NULL) {
1839 err = -ENOENT;
1840 goto errout_locked;
1841 }
1842
1843 /*
1844 * We acquire tbl->lock to be nice to the periodic timers and
1845 * make sure they always see a consistent set of values.
1846 */
1847 write_lock_bh(&tbl->lock);
1848
1849 if (tb[NDTA_PARMS]) {
1850 struct nlattr *tbp[NDTPA_MAX+1];
1851 struct neigh_parms *p;
1852 int i, ifindex = 0;
1853
1854 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1855 nl_ntbl_parm_policy);
1856 if (err < 0)
1857 goto errout_tbl_lock;
1858
1859 if (tbp[NDTPA_IFINDEX])
1860 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1861
1862 p = lookup_neigh_params(tbl, net, ifindex);
1863 if (p == NULL) {
1864 err = -ENOENT;
1865 goto errout_tbl_lock;
1866 }
1867
1868 for (i = 1; i <= NDTPA_MAX; i++) {
1869 if (tbp[i] == NULL)
1870 continue;
1871
1872 switch (i) {
1873 case NDTPA_QUEUE_LEN:
1874 p->queue_len = nla_get_u32(tbp[i]);
1875 break;
1876 case NDTPA_PROXY_QLEN:
1877 p->proxy_qlen = nla_get_u32(tbp[i]);
1878 break;
1879 case NDTPA_APP_PROBES:
1880 p->app_probes = nla_get_u32(tbp[i]);
1881 break;
1882 case NDTPA_UCAST_PROBES:
1883 p->ucast_probes = nla_get_u32(tbp[i]);
1884 break;
1885 case NDTPA_MCAST_PROBES:
1886 p->mcast_probes = nla_get_u32(tbp[i]);
1887 break;
1888 case NDTPA_BASE_REACHABLE_TIME:
1889 p->base_reachable_time = nla_get_msecs(tbp[i]);
1890 break;
1891 case NDTPA_GC_STALETIME:
1892 p->gc_staletime = nla_get_msecs(tbp[i]);
1893 break;
1894 case NDTPA_DELAY_PROBE_TIME:
1895 p->delay_probe_time = nla_get_msecs(tbp[i]);
1896 break;
1897 case NDTPA_RETRANS_TIME:
1898 p->retrans_time = nla_get_msecs(tbp[i]);
1899 break;
1900 case NDTPA_ANYCAST_DELAY:
1901 p->anycast_delay = nla_get_msecs(tbp[i]);
1902 break;
1903 case NDTPA_PROXY_DELAY:
1904 p->proxy_delay = nla_get_msecs(tbp[i]);
1905 break;
1906 case NDTPA_LOCKTIME:
1907 p->locktime = nla_get_msecs(tbp[i]);
1908 break;
1909 }
1910 }
1911 }
1912
1913 if (tb[NDTA_THRESH1])
1914 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1915
1916 if (tb[NDTA_THRESH2])
1917 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1918
1919 if (tb[NDTA_THRESH3])
1920 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1921
1922 if (tb[NDTA_GC_INTERVAL])
1923 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1924
1925 err = 0;
1926
1927 errout_tbl_lock:
1928 write_unlock_bh(&tbl->lock);
1929 errout_locked:
1930 read_unlock(&neigh_tbl_lock);
1931 errout:
1932 return err;
1933 }
1934
1935 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1936 {
1937 struct net *net = skb->sk->sk_net;
1938 int family, tidx, nidx = 0;
1939 int tbl_skip = cb->args[0];
1940 int neigh_skip = cb->args[1];
1941 struct neigh_table *tbl;
1942
1943 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1944
1945 read_lock(&neigh_tbl_lock);
1946 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1947 struct neigh_parms *p;
1948
1949 if (tidx < tbl_skip || (family && tbl->family != family))
1950 continue;
1951
1952 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1953 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1954 NLM_F_MULTI) <= 0)
1955 break;
1956
1957 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
1958 if (net != p->net)
1959 continue;
1960
1961 if (nidx++ < neigh_skip)
1962 continue;
1963
1964 if (neightbl_fill_param_info(skb, tbl, p,
1965 NETLINK_CB(cb->skb).pid,
1966 cb->nlh->nlmsg_seq,
1967 RTM_NEWNEIGHTBL,
1968 NLM_F_MULTI) <= 0)
1969 goto out;
1970 }
1971
1972 neigh_skip = 0;
1973 }
1974 out:
1975 read_unlock(&neigh_tbl_lock);
1976 cb->args[0] = tidx;
1977 cb->args[1] = nidx;
1978
1979 return skb->len;
1980 }
1981
1982 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1983 u32 pid, u32 seq, int type, unsigned int flags)
1984 {
1985 unsigned long now = jiffies;
1986 struct nda_cacheinfo ci;
1987 struct nlmsghdr *nlh;
1988 struct ndmsg *ndm;
1989
1990 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1991 if (nlh == NULL)
1992 return -EMSGSIZE;
1993
1994 ndm = nlmsg_data(nlh);
1995 ndm->ndm_family = neigh->ops->family;
1996 ndm->ndm_pad1 = 0;
1997 ndm->ndm_pad2 = 0;
1998 ndm->ndm_flags = neigh->flags;
1999 ndm->ndm_type = neigh->type;
2000 ndm->ndm_ifindex = neigh->dev->ifindex;
2001
2002 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2003
2004 read_lock_bh(&neigh->lock);
2005 ndm->ndm_state = neigh->nud_state;
2006 if ((neigh->nud_state & NUD_VALID) &&
2007 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2008 read_unlock_bh(&neigh->lock);
2009 goto nla_put_failure;
2010 }
2011
2012 ci.ndm_used = now - neigh->used;
2013 ci.ndm_confirmed = now - neigh->confirmed;
2014 ci.ndm_updated = now - neigh->updated;
2015 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2016 read_unlock_bh(&neigh->lock);
2017
2018 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2019 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2020
2021 return nlmsg_end(skb, nlh);
2022
2023 nla_put_failure:
2024 nlmsg_cancel(skb, nlh);
2025 return -EMSGSIZE;
2026 }
2027
2028 static void neigh_update_notify(struct neighbour *neigh)
2029 {
2030 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2031 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2032 }
2033
2034 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2035 struct netlink_callback *cb)
2036 {
2037 struct net * net = skb->sk->sk_net;
2038 struct neighbour *n;
2039 int rc, h, s_h = cb->args[1];
2040 int idx, s_idx = idx = cb->args[2];
2041
2042 read_lock_bh(&tbl->lock);
2043 for (h = 0; h <= tbl->hash_mask; h++) {
2044 if (h < s_h)
2045 continue;
2046 if (h > s_h)
2047 s_idx = 0;
2048 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2049 int lidx;
2050 if (n->dev->nd_net != net)
2051 continue;
2052 lidx = idx++;
2053 if (lidx < s_idx)
2054 continue;
2055 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2056 cb->nlh->nlmsg_seq,
2057 RTM_NEWNEIGH,
2058 NLM_F_MULTI) <= 0) {
2059 read_unlock_bh(&tbl->lock);
2060 rc = -1;
2061 goto out;
2062 }
2063 }
2064 }
2065 read_unlock_bh(&tbl->lock);
2066 rc = skb->len;
2067 out:
2068 cb->args[1] = h;
2069 cb->args[2] = idx;
2070 return rc;
2071 }
2072
2073 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2074 {
2075 struct neigh_table *tbl;
2076 int t, family, s_t;
2077
2078 read_lock(&neigh_tbl_lock);
2079 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2080 s_t = cb->args[0];
2081
2082 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2083 if (t < s_t || (family && tbl->family != family))
2084 continue;
2085 if (t > s_t)
2086 memset(&cb->args[1], 0, sizeof(cb->args) -
2087 sizeof(cb->args[0]));
2088 if (neigh_dump_table(tbl, skb, cb) < 0)
2089 break;
2090 }
2091 read_unlock(&neigh_tbl_lock);
2092
2093 cb->args[0] = t;
2094 return skb->len;
2095 }
2096
2097 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2098 {
2099 int chain;
2100
2101 read_lock_bh(&tbl->lock);
2102 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2103 struct neighbour *n;
2104
2105 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2106 cb(n, cookie);
2107 }
2108 read_unlock_bh(&tbl->lock);
2109 }
2110 EXPORT_SYMBOL(neigh_for_each);
2111
2112 /* The tbl->lock must be held as a writer and BH disabled. */
2113 void __neigh_for_each_release(struct neigh_table *tbl,
2114 int (*cb)(struct neighbour *))
2115 {
2116 int chain;
2117
2118 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2119 struct neighbour *n, **np;
2120
2121 np = &tbl->hash_buckets[chain];
2122 while ((n = *np) != NULL) {
2123 int release;
2124
2125 write_lock(&n->lock);
2126 release = cb(n);
2127 if (release) {
2128 *np = n->next;
2129 n->dead = 1;
2130 } else
2131 np = &n->next;
2132 write_unlock(&n->lock);
2133 if (release)
2134 neigh_cleanup_and_release(n);
2135 }
2136 }
2137 }
2138 EXPORT_SYMBOL(__neigh_for_each_release);
2139
2140 #ifdef CONFIG_PROC_FS
2141
2142 static struct neighbour *neigh_get_first(struct seq_file *seq)
2143 {
2144 struct neigh_seq_state *state = seq->private;
2145 struct net *net = state->net;
2146 struct neigh_table *tbl = state->tbl;
2147 struct neighbour *n = NULL;
2148 int bucket = state->bucket;
2149
2150 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2151 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2152 n = tbl->hash_buckets[bucket];
2153
2154 while (n) {
2155 if (n->dev->nd_net != net)
2156 goto next;
2157 if (state->neigh_sub_iter) {
2158 loff_t fakep = 0;
2159 void *v;
2160
2161 v = state->neigh_sub_iter(state, n, &fakep);
2162 if (!v)
2163 goto next;
2164 }
2165 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2166 break;
2167 if (n->nud_state & ~NUD_NOARP)
2168 break;
2169 next:
2170 n = n->next;
2171 }
2172
2173 if (n)
2174 break;
2175 }
2176 state->bucket = bucket;
2177
2178 return n;
2179 }
2180
2181 static struct neighbour *neigh_get_next(struct seq_file *seq,
2182 struct neighbour *n,
2183 loff_t *pos)
2184 {
2185 struct neigh_seq_state *state = seq->private;
2186 struct net *net = state->net;
2187 struct neigh_table *tbl = state->tbl;
2188
2189 if (state->neigh_sub_iter) {
2190 void *v = state->neigh_sub_iter(state, n, pos);
2191 if (v)
2192 return n;
2193 }
2194 n = n->next;
2195
2196 while (1) {
2197 while (n) {
2198 if (n->dev->nd_net != net)
2199 goto next;
2200 if (state->neigh_sub_iter) {
2201 void *v = state->neigh_sub_iter(state, n, pos);
2202 if (v)
2203 return n;
2204 goto next;
2205 }
2206 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2207 break;
2208
2209 if (n->nud_state & ~NUD_NOARP)
2210 break;
2211 next:
2212 n = n->next;
2213 }
2214
2215 if (n)
2216 break;
2217
2218 if (++state->bucket > tbl->hash_mask)
2219 break;
2220
2221 n = tbl->hash_buckets[state->bucket];
2222 }
2223
2224 if (n && pos)
2225 --(*pos);
2226 return n;
2227 }
2228
2229 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2230 {
2231 struct neighbour *n = neigh_get_first(seq);
2232
2233 if (n) {
2234 while (*pos) {
2235 n = neigh_get_next(seq, n, pos);
2236 if (!n)
2237 break;
2238 }
2239 }
2240 return *pos ? NULL : n;
2241 }
2242
2243 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2244 {
2245 struct neigh_seq_state *state = seq->private;
2246 struct net * net = state->net;
2247 struct neigh_table *tbl = state->tbl;
2248 struct pneigh_entry *pn = NULL;
2249 int bucket = state->bucket;
2250
2251 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2252 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2253 pn = tbl->phash_buckets[bucket];
2254 while (pn && (pn->net != net))
2255 pn = pn->next;
2256 if (pn)
2257 break;
2258 }
2259 state->bucket = bucket;
2260
2261 return pn;
2262 }
2263
2264 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2265 struct pneigh_entry *pn,
2266 loff_t *pos)
2267 {
2268 struct neigh_seq_state *state = seq->private;
2269 struct net * net = state->net;
2270 struct neigh_table *tbl = state->tbl;
2271
2272 pn = pn->next;
2273 while (!pn) {
2274 if (++state->bucket > PNEIGH_HASHMASK)
2275 break;
2276 pn = tbl->phash_buckets[state->bucket];
2277 while (pn && (pn->net != net))
2278 pn = pn->next;
2279 if (pn)
2280 break;
2281 }
2282
2283 if (pn && pos)
2284 --(*pos);
2285
2286 return pn;
2287 }
2288
2289 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2290 {
2291 struct pneigh_entry *pn = pneigh_get_first(seq);
2292
2293 if (pn) {
2294 while (*pos) {
2295 pn = pneigh_get_next(seq, pn, pos);
2296 if (!pn)
2297 break;
2298 }
2299 }
2300 return *pos ? NULL : pn;
2301 }
2302
2303 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2304 {
2305 struct neigh_seq_state *state = seq->private;
2306 void *rc;
2307
2308 rc = neigh_get_idx(seq, pos);
2309 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2310 rc = pneigh_get_idx(seq, pos);
2311
2312 return rc;
2313 }
2314
2315 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2316 {
2317 struct neigh_seq_state *state = seq->private;
2318 loff_t pos_minus_one;
2319
2320 state->tbl = tbl;
2321 state->bucket = 0;
2322 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2323
2324 read_lock_bh(&tbl->lock);
2325
2326 pos_minus_one = *pos - 1;
2327 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2328 }
2329 EXPORT_SYMBOL(neigh_seq_start);
2330
2331 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2332 {
2333 struct neigh_seq_state *state;
2334 void *rc;
2335
2336 if (v == SEQ_START_TOKEN) {
2337 rc = neigh_get_idx(seq, pos);
2338 goto out;
2339 }
2340
2341 state = seq->private;
2342 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2343 rc = neigh_get_next(seq, v, NULL);
2344 if (rc)
2345 goto out;
2346 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2347 rc = pneigh_get_first(seq);
2348 } else {
2349 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2350 rc = pneigh_get_next(seq, v, NULL);
2351 }
2352 out:
2353 ++(*pos);
2354 return rc;
2355 }
2356 EXPORT_SYMBOL(neigh_seq_next);
2357
2358 void neigh_seq_stop(struct seq_file *seq, void *v)
2359 {
2360 struct neigh_seq_state *state = seq->private;
2361 struct neigh_table *tbl = state->tbl;
2362
2363 read_unlock_bh(&tbl->lock);
2364 }
2365 EXPORT_SYMBOL(neigh_seq_stop);
2366
2367 /* statistics via seq_file */
2368
2369 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2370 {
2371 struct proc_dir_entry *pde = seq->private;
2372 struct neigh_table *tbl = pde->data;
2373 int cpu;
2374
2375 if (*pos == 0)
2376 return SEQ_START_TOKEN;
2377
2378 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2379 if (!cpu_possible(cpu))
2380 continue;
2381 *pos = cpu+1;
2382 return per_cpu_ptr(tbl->stats, cpu);
2383 }
2384 return NULL;
2385 }
2386
2387 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2388 {
2389 struct proc_dir_entry *pde = seq->private;
2390 struct neigh_table *tbl = pde->data;
2391 int cpu;
2392
2393 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2394 if (!cpu_possible(cpu))
2395 continue;
2396 *pos = cpu+1;
2397 return per_cpu_ptr(tbl->stats, cpu);
2398 }
2399 return NULL;
2400 }
2401
2402 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2403 {
2404
2405 }
2406
2407 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2408 {
2409 struct proc_dir_entry *pde = seq->private;
2410 struct neigh_table *tbl = pde->data;
2411 struct neigh_statistics *st = v;
2412
2413 if (v == SEQ_START_TOKEN) {
2414 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2415 return 0;
2416 }
2417
2418 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2419 "%08lx %08lx %08lx %08lx\n",
2420 atomic_read(&tbl->entries),
2421
2422 st->allocs,
2423 st->destroys,
2424 st->hash_grows,
2425
2426 st->lookups,
2427 st->hits,
2428
2429 st->res_failed,
2430
2431 st->rcv_probes_mcast,
2432 st->rcv_probes_ucast,
2433
2434 st->periodic_gc_runs,
2435 st->forced_gc_runs
2436 );
2437
2438 return 0;
2439 }
2440
2441 static const struct seq_operations neigh_stat_seq_ops = {
2442 .start = neigh_stat_seq_start,
2443 .next = neigh_stat_seq_next,
2444 .stop = neigh_stat_seq_stop,
2445 .show = neigh_stat_seq_show,
2446 };
2447
2448 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2449 {
2450 int ret = seq_open(file, &neigh_stat_seq_ops);
2451
2452 if (!ret) {
2453 struct seq_file *sf = file->private_data;
2454 sf->private = PDE(inode);
2455 }
2456 return ret;
2457 };
2458
2459 static const struct file_operations neigh_stat_seq_fops = {
2460 .owner = THIS_MODULE,
2461 .open = neigh_stat_seq_open,
2462 .read = seq_read,
2463 .llseek = seq_lseek,
2464 .release = seq_release,
2465 };
2466
2467 #endif /* CONFIG_PROC_FS */
2468
2469 static inline size_t neigh_nlmsg_size(void)
2470 {
2471 return NLMSG_ALIGN(sizeof(struct ndmsg))
2472 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2473 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2474 + nla_total_size(sizeof(struct nda_cacheinfo))
2475 + nla_total_size(4); /* NDA_PROBES */
2476 }
2477
2478 static void __neigh_notify(struct neighbour *n, int type, int flags)
2479 {
2480 struct net *net = n->dev->nd_net;
2481 struct sk_buff *skb;
2482 int err = -ENOBUFS;
2483
2484 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2485 if (skb == NULL)
2486 goto errout;
2487
2488 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2489 if (err < 0) {
2490 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2491 WARN_ON(err == -EMSGSIZE);
2492 kfree_skb(skb);
2493 goto errout;
2494 }
2495 err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2496 errout:
2497 if (err < 0)
2498 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2499 }
2500
2501 #ifdef CONFIG_ARPD
2502 void neigh_app_ns(struct neighbour *n)
2503 {
2504 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2505 }
2506 #endif /* CONFIG_ARPD */
2507
2508 #ifdef CONFIG_SYSCTL
2509
2510 static struct neigh_sysctl_table {
2511 struct ctl_table_header *sysctl_header;
2512 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2513 char *dev_name;
2514 } neigh_sysctl_template __read_mostly = {
2515 .neigh_vars = {
2516 {
2517 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2518 .procname = "mcast_solicit",
2519 .maxlen = sizeof(int),
2520 .mode = 0644,
2521 .proc_handler = &proc_dointvec,
2522 },
2523 {
2524 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2525 .procname = "ucast_solicit",
2526 .maxlen = sizeof(int),
2527 .mode = 0644,
2528 .proc_handler = &proc_dointvec,
2529 },
2530 {
2531 .ctl_name = NET_NEIGH_APP_SOLICIT,
2532 .procname = "app_solicit",
2533 .maxlen = sizeof(int),
2534 .mode = 0644,
2535 .proc_handler = &proc_dointvec,
2536 },
2537 {
2538 .procname = "retrans_time",
2539 .maxlen = sizeof(int),
2540 .mode = 0644,
2541 .proc_handler = &proc_dointvec_userhz_jiffies,
2542 },
2543 {
2544 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2545 .procname = "base_reachable_time",
2546 .maxlen = sizeof(int),
2547 .mode = 0644,
2548 .proc_handler = &proc_dointvec_jiffies,
2549 .strategy = &sysctl_jiffies,
2550 },
2551 {
2552 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2553 .procname = "delay_first_probe_time",
2554 .maxlen = sizeof(int),
2555 .mode = 0644,
2556 .proc_handler = &proc_dointvec_jiffies,
2557 .strategy = &sysctl_jiffies,
2558 },
2559 {
2560 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2561 .procname = "gc_stale_time",
2562 .maxlen = sizeof(int),
2563 .mode = 0644,
2564 .proc_handler = &proc_dointvec_jiffies,
2565 .strategy = &sysctl_jiffies,
2566 },
2567 {
2568 .ctl_name = NET_NEIGH_UNRES_QLEN,
2569 .procname = "unres_qlen",
2570 .maxlen = sizeof(int),
2571 .mode = 0644,
2572 .proc_handler = &proc_dointvec,
2573 },
2574 {
2575 .ctl_name = NET_NEIGH_PROXY_QLEN,
2576 .procname = "proxy_qlen",
2577 .maxlen = sizeof(int),
2578 .mode = 0644,
2579 .proc_handler = &proc_dointvec,
2580 },
2581 {
2582 .procname = "anycast_delay",
2583 .maxlen = sizeof(int),
2584 .mode = 0644,
2585 .proc_handler = &proc_dointvec_userhz_jiffies,
2586 },
2587 {
2588 .procname = "proxy_delay",
2589 .maxlen = sizeof(int),
2590 .mode = 0644,
2591 .proc_handler = &proc_dointvec_userhz_jiffies,
2592 },
2593 {
2594 .procname = "locktime",
2595 .maxlen = sizeof(int),
2596 .mode = 0644,
2597 .proc_handler = &proc_dointvec_userhz_jiffies,
2598 },
2599 {
2600 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2601 .procname = "retrans_time_ms",
2602 .maxlen = sizeof(int),
2603 .mode = 0644,
2604 .proc_handler = &proc_dointvec_ms_jiffies,
2605 .strategy = &sysctl_ms_jiffies,
2606 },
2607 {
2608 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2609 .procname = "base_reachable_time_ms",
2610 .maxlen = sizeof(int),
2611 .mode = 0644,
2612 .proc_handler = &proc_dointvec_ms_jiffies,
2613 .strategy = &sysctl_ms_jiffies,
2614 },
2615 {
2616 .ctl_name = NET_NEIGH_GC_INTERVAL,
2617 .procname = "gc_interval",
2618 .maxlen = sizeof(int),
2619 .mode = 0644,
2620 .proc_handler = &proc_dointvec_jiffies,
2621 .strategy = &sysctl_jiffies,
2622 },
2623 {
2624 .ctl_name = NET_NEIGH_GC_THRESH1,
2625 .procname = "gc_thresh1",
2626 .maxlen = sizeof(int),
2627 .mode = 0644,
2628 .proc_handler = &proc_dointvec,
2629 },
2630 {
2631 .ctl_name = NET_NEIGH_GC_THRESH2,
2632 .procname = "gc_thresh2",
2633 .maxlen = sizeof(int),
2634 .mode = 0644,
2635 .proc_handler = &proc_dointvec,
2636 },
2637 {
2638 .ctl_name = NET_NEIGH_GC_THRESH3,
2639 .procname = "gc_thresh3",
2640 .maxlen = sizeof(int),
2641 .mode = 0644,
2642 .proc_handler = &proc_dointvec,
2643 },
2644 {},
2645 },
2646 };
2647
2648 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2649 int p_id, int pdev_id, char *p_name,
2650 proc_handler *handler, ctl_handler *strategy)
2651 {
2652 struct neigh_sysctl_table *t;
2653 const char *dev_name_source = NULL;
2654
2655 #define NEIGH_CTL_PATH_ROOT 0
2656 #define NEIGH_CTL_PATH_PROTO 1
2657 #define NEIGH_CTL_PATH_NEIGH 2
2658 #define NEIGH_CTL_PATH_DEV 3
2659
2660 struct ctl_path neigh_path[] = {
2661 { .procname = "net", .ctl_name = CTL_NET, },
2662 { .procname = "proto", .ctl_name = 0, },
2663 { .procname = "neigh", .ctl_name = 0, },
2664 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2665 { },
2666 };
2667
2668 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2669 if (!t)
2670 goto err;
2671
2672 t->neigh_vars[0].data = &p->mcast_probes;
2673 t->neigh_vars[1].data = &p->ucast_probes;
2674 t->neigh_vars[2].data = &p->app_probes;
2675 t->neigh_vars[3].data = &p->retrans_time;
2676 t->neigh_vars[4].data = &p->base_reachable_time;
2677 t->neigh_vars[5].data = &p->delay_probe_time;
2678 t->neigh_vars[6].data = &p->gc_staletime;
2679 t->neigh_vars[7].data = &p->queue_len;
2680 t->neigh_vars[8].data = &p->proxy_qlen;
2681 t->neigh_vars[9].data = &p->anycast_delay;
2682 t->neigh_vars[10].data = &p->proxy_delay;
2683 t->neigh_vars[11].data = &p->locktime;
2684 t->neigh_vars[12].data = &p->retrans_time;
2685 t->neigh_vars[13].data = &p->base_reachable_time;
2686
2687 if (dev) {
2688 dev_name_source = dev->name;
2689 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2690 /* Terminate the table early */
2691 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2692 } else {
2693 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2694 t->neigh_vars[14].data = (int *)(p + 1);
2695 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2696 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2697 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2698 }
2699
2700
2701 if (handler || strategy) {
2702 /* RetransTime */
2703 t->neigh_vars[3].proc_handler = handler;
2704 t->neigh_vars[3].strategy = strategy;
2705 t->neigh_vars[3].extra1 = dev;
2706 if (!strategy)
2707 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2708 /* ReachableTime */
2709 t->neigh_vars[4].proc_handler = handler;
2710 t->neigh_vars[4].strategy = strategy;
2711 t->neigh_vars[4].extra1 = dev;
2712 if (!strategy)
2713 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2714 /* RetransTime (in milliseconds)*/
2715 t->neigh_vars[12].proc_handler = handler;
2716 t->neigh_vars[12].strategy = strategy;
2717 t->neigh_vars[12].extra1 = dev;
2718 if (!strategy)
2719 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2720 /* ReachableTime (in milliseconds) */
2721 t->neigh_vars[13].proc_handler = handler;
2722 t->neigh_vars[13].strategy = strategy;
2723 t->neigh_vars[13].extra1 = dev;
2724 if (!strategy)
2725 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2726 }
2727
2728 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2729 if (!t->dev_name)
2730 goto free;
2731
2732 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2733 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2734 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2735 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2736
2737 t->sysctl_header = register_sysctl_paths(neigh_path, t->neigh_vars);
2738 if (!t->sysctl_header)
2739 goto free_procname;
2740
2741 p->sysctl_table = t;
2742 return 0;
2743
2744 free_procname:
2745 kfree(t->dev_name);
2746 free:
2747 kfree(t);
2748 err:
2749 return -ENOBUFS;
2750 }
2751
2752 void neigh_sysctl_unregister(struct neigh_parms *p)
2753 {
2754 if (p->sysctl_table) {
2755 struct neigh_sysctl_table *t = p->sysctl_table;
2756 p->sysctl_table = NULL;
2757 unregister_sysctl_table(t->sysctl_header);
2758 kfree(t->dev_name);
2759 kfree(t);
2760 }
2761 }
2762
2763 #endif /* CONFIG_SYSCTL */
2764
2765 static int __init neigh_init(void)
2766 {
2767 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2768 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2769 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2770
2771 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2772 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2773
2774 return 0;
2775 }
2776
2777 subsys_initcall(neigh_init);
2778
2779 EXPORT_SYMBOL(__neigh_event_send);
2780 EXPORT_SYMBOL(neigh_changeaddr);
2781 EXPORT_SYMBOL(neigh_compat_output);
2782 EXPORT_SYMBOL(neigh_connected_output);
2783 EXPORT_SYMBOL(neigh_create);
2784 EXPORT_SYMBOL(neigh_destroy);
2785 EXPORT_SYMBOL(neigh_event_ns);
2786 EXPORT_SYMBOL(neigh_ifdown);
2787 EXPORT_SYMBOL(neigh_lookup);
2788 EXPORT_SYMBOL(neigh_lookup_nodev);
2789 EXPORT_SYMBOL(neigh_parms_alloc);
2790 EXPORT_SYMBOL(neigh_parms_release);
2791 EXPORT_SYMBOL(neigh_rand_reach_time);
2792 EXPORT_SYMBOL(neigh_resolve_output);
2793 EXPORT_SYMBOL(neigh_table_clear);
2794 EXPORT_SYMBOL(neigh_table_init);
2795 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2796 EXPORT_SYMBOL(neigh_update);
2797 EXPORT_SYMBOL(pneigh_enqueue);
2798 EXPORT_SYMBOL(pneigh_lookup);
2799
2800 #ifdef CONFIG_ARPD
2801 EXPORT_SYMBOL(neigh_app_ns);
2802 #endif
2803 #ifdef CONFIG_SYSCTL
2804 EXPORT_SYMBOL(neigh_sysctl_register);
2805 EXPORT_SYMBOL(neigh_sysctl_unregister);
2806 #endif
This page took 0.09463 seconds and 5 git commands to generate.