ipv4: Make neigh lookups directly in output packet path.
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41
42 #define NEIGH_DEBUG 1
43
44 #define NEIGH_PRINTK(x...) printk(x)
45 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
46 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48
49 #if NEIGH_DEBUG >= 1
50 #undef NEIGH_PRINTK1
51 #define NEIGH_PRINTK1 NEIGH_PRINTK
52 #endif
53 #if NEIGH_DEBUG >= 2
54 #undef NEIGH_PRINTK2
55 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #endif
57
58 #define PNEIGH_HASHMASK 0xF
59
60 static void neigh_timer_handler(unsigned long arg);
61 static void __neigh_notify(struct neighbour *n, int type, int flags);
62 static void neigh_update_notify(struct neighbour *neigh);
63 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
64
65 static struct neigh_table *neigh_tables;
66 #ifdef CONFIG_PROC_FS
67 static const struct file_operations neigh_stat_seq_fops;
68 #endif
69
70 /*
71 Neighbour hash table buckets are protected with rwlock tbl->lock.
72
73 - All the scans/updates to hash buckets MUST be made under this lock.
74 - NOTHING clever should be made under this lock: no callbacks
75 to protocol backends, no attempts to send something to network.
76 It will result in deadlocks, if backend/driver wants to use neighbour
77 cache.
78 - If the entry requires some non-trivial actions, increase
79 its reference count and release table lock.
80
81 Neighbour entries are protected:
82 - with reference count.
83 - with rwlock neigh->lock
84
85 Reference count prevents destruction.
86
87 neigh->lock mainly serializes ll address data and its validity state.
88 However, the same lock is used to protect another entry fields:
89 - timer
90 - resolution queue
91
92 Again, nothing clever shall be made under neigh->lock,
93 the most complicated procedure, which we allow is dev->hard_header.
94 It is supposed, that dev->hard_header is simplistic and does
95 not make callbacks to neighbour tables.
96
97 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
98 list of neighbour tables. This list is used only in process context,
99 */
100
101 static DEFINE_RWLOCK(neigh_tbl_lock);
102
103 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
104 {
105 kfree_skb(skb);
106 return -ENETDOWN;
107 }
108
109 static void neigh_cleanup_and_release(struct neighbour *neigh)
110 {
111 if (neigh->parms->neigh_cleanup)
112 neigh->parms->neigh_cleanup(neigh);
113
114 __neigh_notify(neigh, RTM_DELNEIGH, 0);
115 neigh_release(neigh);
116 }
117
118 /*
119 * It is random distribution in the interval (1/2)*base...(3/2)*base.
120 * It corresponds to default IPv6 settings and is not overridable,
121 * because it is really reasonable choice.
122 */
123
124 unsigned long neigh_rand_reach_time(unsigned long base)
125 {
126 return base ? (net_random() % base) + (base >> 1) : 0;
127 }
128 EXPORT_SYMBOL(neigh_rand_reach_time);
129
130
131 static int neigh_forced_gc(struct neigh_table *tbl)
132 {
133 int shrunk = 0;
134 int i;
135 struct neigh_hash_table *nht;
136
137 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
138
139 write_lock_bh(&tbl->lock);
140 nht = rcu_dereference_protected(tbl->nht,
141 lockdep_is_held(&tbl->lock));
142 for (i = 0; i < (1 << nht->hash_shift); i++) {
143 struct neighbour *n;
144 struct neighbour __rcu **np;
145
146 np = &nht->hash_buckets[i];
147 while ((n = rcu_dereference_protected(*np,
148 lockdep_is_held(&tbl->lock))) != NULL) {
149 /* Neighbour record may be discarded if:
150 * - nobody refers to it.
151 * - it is not permanent
152 */
153 write_lock(&n->lock);
154 if (atomic_read(&n->refcnt) == 1 &&
155 !(n->nud_state & NUD_PERMANENT)) {
156 rcu_assign_pointer(*np,
157 rcu_dereference_protected(n->next,
158 lockdep_is_held(&tbl->lock)));
159 n->dead = 1;
160 shrunk = 1;
161 write_unlock(&n->lock);
162 neigh_cleanup_and_release(n);
163 continue;
164 }
165 write_unlock(&n->lock);
166 np = &n->next;
167 }
168 }
169
170 tbl->last_flush = jiffies;
171
172 write_unlock_bh(&tbl->lock);
173
174 return shrunk;
175 }
176
177 static void neigh_add_timer(struct neighbour *n, unsigned long when)
178 {
179 neigh_hold(n);
180 if (unlikely(mod_timer(&n->timer, when))) {
181 printk("NEIGH: BUG, double timer add, state is %x\n",
182 n->nud_state);
183 dump_stack();
184 }
185 }
186
187 static int neigh_del_timer(struct neighbour *n)
188 {
189 if ((n->nud_state & NUD_IN_TIMER) &&
190 del_timer(&n->timer)) {
191 neigh_release(n);
192 return 1;
193 }
194 return 0;
195 }
196
197 static void pneigh_queue_purge(struct sk_buff_head *list)
198 {
199 struct sk_buff *skb;
200
201 while ((skb = skb_dequeue(list)) != NULL) {
202 dev_put(skb->dev);
203 kfree_skb(skb);
204 }
205 }
206
207 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208 {
209 int i;
210 struct neigh_hash_table *nht;
211
212 nht = rcu_dereference_protected(tbl->nht,
213 lockdep_is_held(&tbl->lock));
214
215 for (i = 0; i < (1 << nht->hash_shift); i++) {
216 struct neighbour *n;
217 struct neighbour __rcu **np = &nht->hash_buckets[i];
218
219 while ((n = rcu_dereference_protected(*np,
220 lockdep_is_held(&tbl->lock))) != NULL) {
221 if (dev && n->dev != dev) {
222 np = &n->next;
223 continue;
224 }
225 rcu_assign_pointer(*np,
226 rcu_dereference_protected(n->next,
227 lockdep_is_held(&tbl->lock)));
228 write_lock(&n->lock);
229 neigh_del_timer(n);
230 n->dead = 1;
231
232 if (atomic_read(&n->refcnt) != 1) {
233 /* The most unpleasant situation.
234 We must destroy neighbour entry,
235 but someone still uses it.
236
237 The destroy will be delayed until
238 the last user releases us, but
239 we must kill timers etc. and move
240 it to safe state.
241 */
242 skb_queue_purge(&n->arp_queue);
243 n->arp_queue_len_bytes = 0;
244 n->output = neigh_blackhole;
245 if (n->nud_state & NUD_VALID)
246 n->nud_state = NUD_NOARP;
247 else
248 n->nud_state = NUD_NONE;
249 NEIGH_PRINTK2("neigh %p is stray.\n", n);
250 }
251 write_unlock(&n->lock);
252 neigh_cleanup_and_release(n);
253 }
254 }
255 }
256
257 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
258 {
259 write_lock_bh(&tbl->lock);
260 neigh_flush_dev(tbl, dev);
261 write_unlock_bh(&tbl->lock);
262 }
263 EXPORT_SYMBOL(neigh_changeaddr);
264
265 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
266 {
267 write_lock_bh(&tbl->lock);
268 neigh_flush_dev(tbl, dev);
269 pneigh_ifdown(tbl, dev);
270 write_unlock_bh(&tbl->lock);
271
272 del_timer_sync(&tbl->proxy_timer);
273 pneigh_queue_purge(&tbl->proxy_queue);
274 return 0;
275 }
276 EXPORT_SYMBOL(neigh_ifdown);
277
278 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
279 {
280 struct neighbour *n = NULL;
281 unsigned long now = jiffies;
282 int entries;
283
284 entries = atomic_inc_return(&tbl->entries) - 1;
285 if (entries >= tbl->gc_thresh3 ||
286 (entries >= tbl->gc_thresh2 &&
287 time_after(now, tbl->last_flush + 5 * HZ))) {
288 if (!neigh_forced_gc(tbl) &&
289 entries >= tbl->gc_thresh3)
290 goto out_entries;
291 }
292
293 if (tbl->entry_size)
294 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
295 else {
296 int sz = sizeof(*n) + tbl->key_len;
297
298 sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
299 sz += dev->neigh_priv_len;
300 n = kzalloc(sz, GFP_ATOMIC);
301 }
302 if (!n)
303 goto out_entries;
304
305 skb_queue_head_init(&n->arp_queue);
306 rwlock_init(&n->lock);
307 seqlock_init(&n->ha_lock);
308 n->updated = n->used = now;
309 n->nud_state = NUD_NONE;
310 n->output = neigh_blackhole;
311 seqlock_init(&n->hh.hh_lock);
312 n->parms = neigh_parms_clone(&tbl->parms);
313 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
314
315 NEIGH_CACHE_STAT_INC(tbl, allocs);
316 n->tbl = tbl;
317 atomic_set(&n->refcnt, 1);
318 n->dead = 1;
319 out:
320 return n;
321
322 out_entries:
323 atomic_dec(&tbl->entries);
324 goto out;
325 }
326
327 static void neigh_get_hash_rnd(u32 *x)
328 {
329 get_random_bytes(x, sizeof(*x));
330 *x |= 1;
331 }
332
333 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
334 {
335 size_t size = (1 << shift) * sizeof(struct neighbour *);
336 struct neigh_hash_table *ret;
337 struct neighbour __rcu **buckets;
338 int i;
339
340 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
341 if (!ret)
342 return NULL;
343 if (size <= PAGE_SIZE)
344 buckets = kzalloc(size, GFP_ATOMIC);
345 else
346 buckets = (struct neighbour __rcu **)
347 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
348 get_order(size));
349 if (!buckets) {
350 kfree(ret);
351 return NULL;
352 }
353 ret->hash_buckets = buckets;
354 ret->hash_shift = shift;
355 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
356 neigh_get_hash_rnd(&ret->hash_rnd[i]);
357 return ret;
358 }
359
360 static void neigh_hash_free_rcu(struct rcu_head *head)
361 {
362 struct neigh_hash_table *nht = container_of(head,
363 struct neigh_hash_table,
364 rcu);
365 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
366 struct neighbour __rcu **buckets = nht->hash_buckets;
367
368 if (size <= PAGE_SIZE)
369 kfree(buckets);
370 else
371 free_pages((unsigned long)buckets, get_order(size));
372 kfree(nht);
373 }
374
375 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
376 unsigned long new_shift)
377 {
378 unsigned int i, hash;
379 struct neigh_hash_table *new_nht, *old_nht;
380
381 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
382
383 old_nht = rcu_dereference_protected(tbl->nht,
384 lockdep_is_held(&tbl->lock));
385 new_nht = neigh_hash_alloc(new_shift);
386 if (!new_nht)
387 return old_nht;
388
389 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
390 struct neighbour *n, *next;
391
392 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
393 lockdep_is_held(&tbl->lock));
394 n != NULL;
395 n = next) {
396 hash = tbl->hash(n->primary_key, n->dev,
397 new_nht->hash_rnd);
398
399 hash >>= (32 - new_nht->hash_shift);
400 next = rcu_dereference_protected(n->next,
401 lockdep_is_held(&tbl->lock));
402
403 rcu_assign_pointer(n->next,
404 rcu_dereference_protected(
405 new_nht->hash_buckets[hash],
406 lockdep_is_held(&tbl->lock)));
407 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
408 }
409 }
410
411 rcu_assign_pointer(tbl->nht, new_nht);
412 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
413 return new_nht;
414 }
415
416 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
417 struct net_device *dev)
418 {
419 struct neighbour *n;
420 int key_len = tbl->key_len;
421 u32 hash_val;
422 struct neigh_hash_table *nht;
423
424 NEIGH_CACHE_STAT_INC(tbl, lookups);
425
426 rcu_read_lock_bh();
427 nht = rcu_dereference_bh(tbl->nht);
428 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
429
430 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
431 n != NULL;
432 n = rcu_dereference_bh(n->next)) {
433 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
434 if (!atomic_inc_not_zero(&n->refcnt))
435 n = NULL;
436 NEIGH_CACHE_STAT_INC(tbl, hits);
437 break;
438 }
439 }
440
441 rcu_read_unlock_bh();
442 return n;
443 }
444 EXPORT_SYMBOL(neigh_lookup);
445
446 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
447 const void *pkey)
448 {
449 struct neighbour *n;
450 int key_len = tbl->key_len;
451 u32 hash_val;
452 struct neigh_hash_table *nht;
453
454 NEIGH_CACHE_STAT_INC(tbl, lookups);
455
456 rcu_read_lock_bh();
457 nht = rcu_dereference_bh(tbl->nht);
458 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
459
460 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
461 n != NULL;
462 n = rcu_dereference_bh(n->next)) {
463 if (!memcmp(n->primary_key, pkey, key_len) &&
464 net_eq(dev_net(n->dev), net)) {
465 if (!atomic_inc_not_zero(&n->refcnt))
466 n = NULL;
467 NEIGH_CACHE_STAT_INC(tbl, hits);
468 break;
469 }
470 }
471
472 rcu_read_unlock_bh();
473 return n;
474 }
475 EXPORT_SYMBOL(neigh_lookup_nodev);
476
477 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
478 struct net_device *dev, bool want_ref)
479 {
480 u32 hash_val;
481 int key_len = tbl->key_len;
482 int error;
483 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
484 struct neigh_hash_table *nht;
485
486 if (!n) {
487 rc = ERR_PTR(-ENOBUFS);
488 goto out;
489 }
490
491 memcpy(n->primary_key, pkey, key_len);
492 n->dev = dev;
493 dev_hold(dev);
494
495 /* Protocol specific setup. */
496 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
497 rc = ERR_PTR(error);
498 goto out_neigh_release;
499 }
500
501 if (dev->netdev_ops->ndo_neigh_construct) {
502 error = dev->netdev_ops->ndo_neigh_construct(n);
503 if (error < 0) {
504 rc = ERR_PTR(error);
505 goto out_neigh_release;
506 }
507 }
508
509 /* Device specific setup. */
510 if (n->parms->neigh_setup &&
511 (error = n->parms->neigh_setup(n)) < 0) {
512 rc = ERR_PTR(error);
513 goto out_neigh_release;
514 }
515
516 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
517
518 write_lock_bh(&tbl->lock);
519 nht = rcu_dereference_protected(tbl->nht,
520 lockdep_is_held(&tbl->lock));
521
522 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
523 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
524
525 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
526
527 if (n->parms->dead) {
528 rc = ERR_PTR(-EINVAL);
529 goto out_tbl_unlock;
530 }
531
532 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
533 lockdep_is_held(&tbl->lock));
534 n1 != NULL;
535 n1 = rcu_dereference_protected(n1->next,
536 lockdep_is_held(&tbl->lock))) {
537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
538 if (want_ref)
539 neigh_hold(n1);
540 rc = n1;
541 goto out_tbl_unlock;
542 }
543 }
544
545 n->dead = 0;
546 if (want_ref)
547 neigh_hold(n);
548 rcu_assign_pointer(n->next,
549 rcu_dereference_protected(nht->hash_buckets[hash_val],
550 lockdep_is_held(&tbl->lock)));
551 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
552 write_unlock_bh(&tbl->lock);
553 NEIGH_PRINTK2("neigh %p is created.\n", n);
554 rc = n;
555 out:
556 return rc;
557 out_tbl_unlock:
558 write_unlock_bh(&tbl->lock);
559 out_neigh_release:
560 neigh_release(n);
561 goto out;
562 }
563 EXPORT_SYMBOL(__neigh_create);
564
565 static u32 pneigh_hash(const void *pkey, int key_len)
566 {
567 u32 hash_val = *(u32 *)(pkey + key_len - 4);
568 hash_val ^= (hash_val >> 16);
569 hash_val ^= hash_val >> 8;
570 hash_val ^= hash_val >> 4;
571 hash_val &= PNEIGH_HASHMASK;
572 return hash_val;
573 }
574
575 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
576 struct net *net,
577 const void *pkey,
578 int key_len,
579 struct net_device *dev)
580 {
581 while (n) {
582 if (!memcmp(n->key, pkey, key_len) &&
583 net_eq(pneigh_net(n), net) &&
584 (n->dev == dev || !n->dev))
585 return n;
586 n = n->next;
587 }
588 return NULL;
589 }
590
591 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
592 struct net *net, const void *pkey, struct net_device *dev)
593 {
594 int key_len = tbl->key_len;
595 u32 hash_val = pneigh_hash(pkey, key_len);
596
597 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
598 net, pkey, key_len, dev);
599 }
600 EXPORT_SYMBOL_GPL(__pneigh_lookup);
601
602 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
603 struct net *net, const void *pkey,
604 struct net_device *dev, int creat)
605 {
606 struct pneigh_entry *n;
607 int key_len = tbl->key_len;
608 u32 hash_val = pneigh_hash(pkey, key_len);
609
610 read_lock_bh(&tbl->lock);
611 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
612 net, pkey, key_len, dev);
613 read_unlock_bh(&tbl->lock);
614
615 if (n || !creat)
616 goto out;
617
618 ASSERT_RTNL();
619
620 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
621 if (!n)
622 goto out;
623
624 write_pnet(&n->net, hold_net(net));
625 memcpy(n->key, pkey, key_len);
626 n->dev = dev;
627 if (dev)
628 dev_hold(dev);
629
630 if (tbl->pconstructor && tbl->pconstructor(n)) {
631 if (dev)
632 dev_put(dev);
633 release_net(net);
634 kfree(n);
635 n = NULL;
636 goto out;
637 }
638
639 write_lock_bh(&tbl->lock);
640 n->next = tbl->phash_buckets[hash_val];
641 tbl->phash_buckets[hash_val] = n;
642 write_unlock_bh(&tbl->lock);
643 out:
644 return n;
645 }
646 EXPORT_SYMBOL(pneigh_lookup);
647
648
649 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
650 struct net_device *dev)
651 {
652 struct pneigh_entry *n, **np;
653 int key_len = tbl->key_len;
654 u32 hash_val = pneigh_hash(pkey, key_len);
655
656 write_lock_bh(&tbl->lock);
657 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
658 np = &n->next) {
659 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
660 net_eq(pneigh_net(n), net)) {
661 *np = n->next;
662 write_unlock_bh(&tbl->lock);
663 if (tbl->pdestructor)
664 tbl->pdestructor(n);
665 if (n->dev)
666 dev_put(n->dev);
667 release_net(pneigh_net(n));
668 kfree(n);
669 return 0;
670 }
671 }
672 write_unlock_bh(&tbl->lock);
673 return -ENOENT;
674 }
675
676 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
677 {
678 struct pneigh_entry *n, **np;
679 u32 h;
680
681 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
682 np = &tbl->phash_buckets[h];
683 while ((n = *np) != NULL) {
684 if (!dev || n->dev == dev) {
685 *np = n->next;
686 if (tbl->pdestructor)
687 tbl->pdestructor(n);
688 if (n->dev)
689 dev_put(n->dev);
690 release_net(pneigh_net(n));
691 kfree(n);
692 continue;
693 }
694 np = &n->next;
695 }
696 }
697 return -ENOENT;
698 }
699
700 static void neigh_parms_destroy(struct neigh_parms *parms);
701
702 static inline void neigh_parms_put(struct neigh_parms *parms)
703 {
704 if (atomic_dec_and_test(&parms->refcnt))
705 neigh_parms_destroy(parms);
706 }
707
708 /*
709 * neighbour must already be out of the table;
710 *
711 */
712 void neigh_destroy(struct neighbour *neigh)
713 {
714 struct net_device *dev = neigh->dev;
715
716 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
717
718 if (!neigh->dead) {
719 pr_warn("Destroying alive neighbour %p\n", neigh);
720 dump_stack();
721 return;
722 }
723
724 if (neigh_del_timer(neigh))
725 pr_warn("Impossible event\n");
726
727 skb_queue_purge(&neigh->arp_queue);
728 neigh->arp_queue_len_bytes = 0;
729
730 if (dev->netdev_ops->ndo_neigh_destroy)
731 dev->netdev_ops->ndo_neigh_destroy(neigh);
732
733 dev_put(dev);
734 neigh_parms_put(neigh->parms);
735
736 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
737
738 atomic_dec(&neigh->tbl->entries);
739 kfree_rcu(neigh, rcu);
740 }
741 EXPORT_SYMBOL(neigh_destroy);
742
743 /* Neighbour state is suspicious;
744 disable fast path.
745
746 Called with write_locked neigh.
747 */
748 static void neigh_suspect(struct neighbour *neigh)
749 {
750 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
751
752 neigh->output = neigh->ops->output;
753 }
754
755 /* Neighbour state is OK;
756 enable fast path.
757
758 Called with write_locked neigh.
759 */
760 static void neigh_connect(struct neighbour *neigh)
761 {
762 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
763
764 neigh->output = neigh->ops->connected_output;
765 }
766
767 static void neigh_periodic_work(struct work_struct *work)
768 {
769 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
770 struct neighbour *n;
771 struct neighbour __rcu **np;
772 unsigned int i;
773 struct neigh_hash_table *nht;
774
775 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
776
777 write_lock_bh(&tbl->lock);
778 nht = rcu_dereference_protected(tbl->nht,
779 lockdep_is_held(&tbl->lock));
780
781 /*
782 * periodically recompute ReachableTime from random function
783 */
784
785 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
786 struct neigh_parms *p;
787 tbl->last_rand = jiffies;
788 for (p = &tbl->parms; p; p = p->next)
789 p->reachable_time =
790 neigh_rand_reach_time(p->base_reachable_time);
791 }
792
793 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
794 np = &nht->hash_buckets[i];
795
796 while ((n = rcu_dereference_protected(*np,
797 lockdep_is_held(&tbl->lock))) != NULL) {
798 unsigned int state;
799
800 write_lock(&n->lock);
801
802 state = n->nud_state;
803 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
804 write_unlock(&n->lock);
805 goto next_elt;
806 }
807
808 if (time_before(n->used, n->confirmed))
809 n->used = n->confirmed;
810
811 if (atomic_read(&n->refcnt) == 1 &&
812 (state == NUD_FAILED ||
813 time_after(jiffies, n->used + n->parms->gc_staletime))) {
814 *np = n->next;
815 n->dead = 1;
816 write_unlock(&n->lock);
817 neigh_cleanup_and_release(n);
818 continue;
819 }
820 write_unlock(&n->lock);
821
822 next_elt:
823 np = &n->next;
824 }
825 /*
826 * It's fine to release lock here, even if hash table
827 * grows while we are preempted.
828 */
829 write_unlock_bh(&tbl->lock);
830 cond_resched();
831 write_lock_bh(&tbl->lock);
832 nht = rcu_dereference_protected(tbl->nht,
833 lockdep_is_held(&tbl->lock));
834 }
835 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
836 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
837 * base_reachable_time.
838 */
839 schedule_delayed_work(&tbl->gc_work,
840 tbl->parms.base_reachable_time >> 1);
841 write_unlock_bh(&tbl->lock);
842 }
843
844 static __inline__ int neigh_max_probes(struct neighbour *n)
845 {
846 struct neigh_parms *p = n->parms;
847 return (n->nud_state & NUD_PROBE) ?
848 p->ucast_probes :
849 p->ucast_probes + p->app_probes + p->mcast_probes;
850 }
851
852 static void neigh_invalidate(struct neighbour *neigh)
853 __releases(neigh->lock)
854 __acquires(neigh->lock)
855 {
856 struct sk_buff *skb;
857
858 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
859 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
860 neigh->updated = jiffies;
861
862 /* It is very thin place. report_unreachable is very complicated
863 routine. Particularly, it can hit the same neighbour entry!
864
865 So that, we try to be accurate and avoid dead loop. --ANK
866 */
867 while (neigh->nud_state == NUD_FAILED &&
868 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
869 write_unlock(&neigh->lock);
870 neigh->ops->error_report(neigh, skb);
871 write_lock(&neigh->lock);
872 }
873 skb_queue_purge(&neigh->arp_queue);
874 neigh->arp_queue_len_bytes = 0;
875 }
876
877 static void neigh_probe(struct neighbour *neigh)
878 __releases(neigh->lock)
879 {
880 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
881 /* keep skb alive even if arp_queue overflows */
882 if (skb)
883 skb = skb_copy(skb, GFP_ATOMIC);
884 write_unlock(&neigh->lock);
885 neigh->ops->solicit(neigh, skb);
886 atomic_inc(&neigh->probes);
887 kfree_skb(skb);
888 }
889
890 /* Called when a timer expires for a neighbour entry. */
891
892 static void neigh_timer_handler(unsigned long arg)
893 {
894 unsigned long now, next;
895 struct neighbour *neigh = (struct neighbour *)arg;
896 unsigned int state;
897 int notify = 0;
898
899 write_lock(&neigh->lock);
900
901 state = neigh->nud_state;
902 now = jiffies;
903 next = now + HZ;
904
905 if (!(state & NUD_IN_TIMER))
906 goto out;
907
908 if (state & NUD_REACHABLE) {
909 if (time_before_eq(now,
910 neigh->confirmed + neigh->parms->reachable_time)) {
911 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
912 next = neigh->confirmed + neigh->parms->reachable_time;
913 } else if (time_before_eq(now,
914 neigh->used + neigh->parms->delay_probe_time)) {
915 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
916 neigh->nud_state = NUD_DELAY;
917 neigh->updated = jiffies;
918 neigh_suspect(neigh);
919 next = now + neigh->parms->delay_probe_time;
920 } else {
921 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
922 neigh->nud_state = NUD_STALE;
923 neigh->updated = jiffies;
924 neigh_suspect(neigh);
925 notify = 1;
926 }
927 } else if (state & NUD_DELAY) {
928 if (time_before_eq(now,
929 neigh->confirmed + neigh->parms->delay_probe_time)) {
930 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
931 neigh->nud_state = NUD_REACHABLE;
932 neigh->updated = jiffies;
933 neigh_connect(neigh);
934 notify = 1;
935 next = neigh->confirmed + neigh->parms->reachable_time;
936 } else {
937 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
938 neigh->nud_state = NUD_PROBE;
939 neigh->updated = jiffies;
940 atomic_set(&neigh->probes, 0);
941 next = now + neigh->parms->retrans_time;
942 }
943 } else {
944 /* NUD_PROBE|NUD_INCOMPLETE */
945 next = now + neigh->parms->retrans_time;
946 }
947
948 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
949 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
950 neigh->nud_state = NUD_FAILED;
951 notify = 1;
952 neigh_invalidate(neigh);
953 }
954
955 if (neigh->nud_state & NUD_IN_TIMER) {
956 if (time_before(next, jiffies + HZ/2))
957 next = jiffies + HZ/2;
958 if (!mod_timer(&neigh->timer, next))
959 neigh_hold(neigh);
960 }
961 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
962 neigh_probe(neigh);
963 } else {
964 out:
965 write_unlock(&neigh->lock);
966 }
967
968 if (notify)
969 neigh_update_notify(neigh);
970
971 neigh_release(neigh);
972 }
973
974 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
975 {
976 int rc;
977 bool immediate_probe = false;
978
979 write_lock_bh(&neigh->lock);
980
981 rc = 0;
982 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
983 goto out_unlock_bh;
984
985 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
986 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
987 unsigned long next, now = jiffies;
988
989 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
990 neigh->nud_state = NUD_INCOMPLETE;
991 neigh->updated = now;
992 next = now + max(neigh->parms->retrans_time, HZ/2);
993 neigh_add_timer(neigh, next);
994 immediate_probe = true;
995 } else {
996 neigh->nud_state = NUD_FAILED;
997 neigh->updated = jiffies;
998 write_unlock_bh(&neigh->lock);
999
1000 kfree_skb(skb);
1001 return 1;
1002 }
1003 } else if (neigh->nud_state & NUD_STALE) {
1004 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1005 neigh->nud_state = NUD_DELAY;
1006 neigh->updated = jiffies;
1007 neigh_add_timer(neigh,
1008 jiffies + neigh->parms->delay_probe_time);
1009 }
1010
1011 if (neigh->nud_state == NUD_INCOMPLETE) {
1012 if (skb) {
1013 while (neigh->arp_queue_len_bytes + skb->truesize >
1014 neigh->parms->queue_len_bytes) {
1015 struct sk_buff *buff;
1016
1017 buff = __skb_dequeue(&neigh->arp_queue);
1018 if (!buff)
1019 break;
1020 neigh->arp_queue_len_bytes -= buff->truesize;
1021 kfree_skb(buff);
1022 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1023 }
1024 skb_dst_force(skb);
1025 __skb_queue_tail(&neigh->arp_queue, skb);
1026 neigh->arp_queue_len_bytes += skb->truesize;
1027 }
1028 rc = 1;
1029 }
1030 out_unlock_bh:
1031 if (immediate_probe)
1032 neigh_probe(neigh);
1033 else
1034 write_unlock(&neigh->lock);
1035 local_bh_enable();
1036 return rc;
1037 }
1038 EXPORT_SYMBOL(__neigh_event_send);
1039
1040 static void neigh_update_hhs(struct neighbour *neigh)
1041 {
1042 struct hh_cache *hh;
1043 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1044 = NULL;
1045
1046 if (neigh->dev->header_ops)
1047 update = neigh->dev->header_ops->cache_update;
1048
1049 if (update) {
1050 hh = &neigh->hh;
1051 if (hh->hh_len) {
1052 write_seqlock_bh(&hh->hh_lock);
1053 update(hh, neigh->dev, neigh->ha);
1054 write_sequnlock_bh(&hh->hh_lock);
1055 }
1056 }
1057 }
1058
1059
1060
1061 /* Generic update routine.
1062 -- lladdr is new lladdr or NULL, if it is not supplied.
1063 -- new is new state.
1064 -- flags
1065 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1066 if it is different.
1067 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1068 lladdr instead of overriding it
1069 if it is different.
1070 It also allows to retain current state
1071 if lladdr is unchanged.
1072 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1073
1074 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1075 NTF_ROUTER flag.
1076 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1077 a router.
1078
1079 Caller MUST hold reference count on the entry.
1080 */
1081
1082 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1083 u32 flags)
1084 {
1085 u8 old;
1086 int err;
1087 int notify = 0;
1088 struct net_device *dev;
1089 int update_isrouter = 0;
1090
1091 write_lock_bh(&neigh->lock);
1092
1093 dev = neigh->dev;
1094 old = neigh->nud_state;
1095 err = -EPERM;
1096
1097 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1098 (old & (NUD_NOARP | NUD_PERMANENT)))
1099 goto out;
1100
1101 if (!(new & NUD_VALID)) {
1102 neigh_del_timer(neigh);
1103 if (old & NUD_CONNECTED)
1104 neigh_suspect(neigh);
1105 neigh->nud_state = new;
1106 err = 0;
1107 notify = old & NUD_VALID;
1108 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1109 (new & NUD_FAILED)) {
1110 neigh_invalidate(neigh);
1111 notify = 1;
1112 }
1113 goto out;
1114 }
1115
1116 /* Compare new lladdr with cached one */
1117 if (!dev->addr_len) {
1118 /* First case: device needs no address. */
1119 lladdr = neigh->ha;
1120 } else if (lladdr) {
1121 /* The second case: if something is already cached
1122 and a new address is proposed:
1123 - compare new & old
1124 - if they are different, check override flag
1125 */
1126 if ((old & NUD_VALID) &&
1127 !memcmp(lladdr, neigh->ha, dev->addr_len))
1128 lladdr = neigh->ha;
1129 } else {
1130 /* No address is supplied; if we know something,
1131 use it, otherwise discard the request.
1132 */
1133 err = -EINVAL;
1134 if (!(old & NUD_VALID))
1135 goto out;
1136 lladdr = neigh->ha;
1137 }
1138
1139 if (new & NUD_CONNECTED)
1140 neigh->confirmed = jiffies;
1141 neigh->updated = jiffies;
1142
1143 /* If entry was valid and address is not changed,
1144 do not change entry state, if new one is STALE.
1145 */
1146 err = 0;
1147 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1148 if (old & NUD_VALID) {
1149 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1150 update_isrouter = 0;
1151 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1152 (old & NUD_CONNECTED)) {
1153 lladdr = neigh->ha;
1154 new = NUD_STALE;
1155 } else
1156 goto out;
1157 } else {
1158 if (lladdr == neigh->ha && new == NUD_STALE &&
1159 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1160 (old & NUD_CONNECTED))
1161 )
1162 new = old;
1163 }
1164 }
1165
1166 if (new != old) {
1167 neigh_del_timer(neigh);
1168 if (new & NUD_IN_TIMER)
1169 neigh_add_timer(neigh, (jiffies +
1170 ((new & NUD_REACHABLE) ?
1171 neigh->parms->reachable_time :
1172 0)));
1173 neigh->nud_state = new;
1174 }
1175
1176 if (lladdr != neigh->ha) {
1177 write_seqlock(&neigh->ha_lock);
1178 memcpy(&neigh->ha, lladdr, dev->addr_len);
1179 write_sequnlock(&neigh->ha_lock);
1180 neigh_update_hhs(neigh);
1181 if (!(new & NUD_CONNECTED))
1182 neigh->confirmed = jiffies -
1183 (neigh->parms->base_reachable_time << 1);
1184 notify = 1;
1185 }
1186 if (new == old)
1187 goto out;
1188 if (new & NUD_CONNECTED)
1189 neigh_connect(neigh);
1190 else
1191 neigh_suspect(neigh);
1192 if (!(old & NUD_VALID)) {
1193 struct sk_buff *skb;
1194
1195 /* Again: avoid dead loop if something went wrong */
1196
1197 while (neigh->nud_state & NUD_VALID &&
1198 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1199 struct dst_entry *dst = skb_dst(skb);
1200 struct neighbour *n2, *n1 = neigh;
1201 write_unlock_bh(&neigh->lock);
1202
1203 rcu_read_lock();
1204 /* On shaper/eql skb->dst->neighbour != neigh :( */
1205 if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
1206 n1 = n2;
1207 n1->output(n1, skb);
1208 rcu_read_unlock();
1209
1210 write_lock_bh(&neigh->lock);
1211 }
1212 skb_queue_purge(&neigh->arp_queue);
1213 neigh->arp_queue_len_bytes = 0;
1214 }
1215 out:
1216 if (update_isrouter) {
1217 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1218 (neigh->flags | NTF_ROUTER) :
1219 (neigh->flags & ~NTF_ROUTER);
1220 }
1221 write_unlock_bh(&neigh->lock);
1222
1223 if (notify)
1224 neigh_update_notify(neigh);
1225
1226 return err;
1227 }
1228 EXPORT_SYMBOL(neigh_update);
1229
1230 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1231 u8 *lladdr, void *saddr,
1232 struct net_device *dev)
1233 {
1234 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1235 lladdr || !dev->addr_len);
1236 if (neigh)
1237 neigh_update(neigh, lladdr, NUD_STALE,
1238 NEIGH_UPDATE_F_OVERRIDE);
1239 return neigh;
1240 }
1241 EXPORT_SYMBOL(neigh_event_ns);
1242
1243 /* called with read_lock_bh(&n->lock); */
1244 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1245 {
1246 struct net_device *dev = dst->dev;
1247 __be16 prot = dst->ops->protocol;
1248 struct hh_cache *hh = &n->hh;
1249
1250 write_lock_bh(&n->lock);
1251
1252 /* Only one thread can come in here and initialize the
1253 * hh_cache entry.
1254 */
1255 if (!hh->hh_len)
1256 dev->header_ops->cache(n, hh, prot);
1257
1258 write_unlock_bh(&n->lock);
1259 }
1260
1261 /* This function can be used in contexts, where only old dev_queue_xmit
1262 * worked, f.e. if you want to override normal output path (eql, shaper),
1263 * but resolution is not made yet.
1264 */
1265
1266 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1267 {
1268 struct net_device *dev = skb->dev;
1269
1270 __skb_pull(skb, skb_network_offset(skb));
1271
1272 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1273 skb->len) < 0 &&
1274 dev->header_ops->rebuild(skb))
1275 return 0;
1276
1277 return dev_queue_xmit(skb);
1278 }
1279 EXPORT_SYMBOL(neigh_compat_output);
1280
1281 /* Slow and careful. */
1282
1283 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1284 {
1285 struct dst_entry *dst = skb_dst(skb);
1286 int rc = 0;
1287
1288 if (!dst)
1289 goto discard;
1290
1291 __skb_pull(skb, skb_network_offset(skb));
1292
1293 if (!neigh_event_send(neigh, skb)) {
1294 int err;
1295 struct net_device *dev = neigh->dev;
1296 unsigned int seq;
1297
1298 if (dev->header_ops->cache && !neigh->hh.hh_len)
1299 neigh_hh_init(neigh, dst);
1300
1301 do {
1302 seq = read_seqbegin(&neigh->ha_lock);
1303 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1304 neigh->ha, NULL, skb->len);
1305 } while (read_seqretry(&neigh->ha_lock, seq));
1306
1307 if (err >= 0)
1308 rc = dev_queue_xmit(skb);
1309 else
1310 goto out_kfree_skb;
1311 }
1312 out:
1313 return rc;
1314 discard:
1315 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1316 dst, neigh);
1317 out_kfree_skb:
1318 rc = -EINVAL;
1319 kfree_skb(skb);
1320 goto out;
1321 }
1322 EXPORT_SYMBOL(neigh_resolve_output);
1323
1324 /* As fast as possible without hh cache */
1325
1326 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1327 {
1328 struct net_device *dev = neigh->dev;
1329 unsigned int seq;
1330 int err;
1331
1332 __skb_pull(skb, skb_network_offset(skb));
1333
1334 do {
1335 seq = read_seqbegin(&neigh->ha_lock);
1336 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1337 neigh->ha, NULL, skb->len);
1338 } while (read_seqretry(&neigh->ha_lock, seq));
1339
1340 if (err >= 0)
1341 err = dev_queue_xmit(skb);
1342 else {
1343 err = -EINVAL;
1344 kfree_skb(skb);
1345 }
1346 return err;
1347 }
1348 EXPORT_SYMBOL(neigh_connected_output);
1349
1350 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1351 {
1352 return dev_queue_xmit(skb);
1353 }
1354 EXPORT_SYMBOL(neigh_direct_output);
1355
1356 static void neigh_proxy_process(unsigned long arg)
1357 {
1358 struct neigh_table *tbl = (struct neigh_table *)arg;
1359 long sched_next = 0;
1360 unsigned long now = jiffies;
1361 struct sk_buff *skb, *n;
1362
1363 spin_lock(&tbl->proxy_queue.lock);
1364
1365 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1366 long tdif = NEIGH_CB(skb)->sched_next - now;
1367
1368 if (tdif <= 0) {
1369 struct net_device *dev = skb->dev;
1370
1371 __skb_unlink(skb, &tbl->proxy_queue);
1372 if (tbl->proxy_redo && netif_running(dev)) {
1373 rcu_read_lock();
1374 tbl->proxy_redo(skb);
1375 rcu_read_unlock();
1376 } else {
1377 kfree_skb(skb);
1378 }
1379
1380 dev_put(dev);
1381 } else if (!sched_next || tdif < sched_next)
1382 sched_next = tdif;
1383 }
1384 del_timer(&tbl->proxy_timer);
1385 if (sched_next)
1386 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1387 spin_unlock(&tbl->proxy_queue.lock);
1388 }
1389
1390 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1391 struct sk_buff *skb)
1392 {
1393 unsigned long now = jiffies;
1394 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1395
1396 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1397 kfree_skb(skb);
1398 return;
1399 }
1400
1401 NEIGH_CB(skb)->sched_next = sched_next;
1402 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1403
1404 spin_lock(&tbl->proxy_queue.lock);
1405 if (del_timer(&tbl->proxy_timer)) {
1406 if (time_before(tbl->proxy_timer.expires, sched_next))
1407 sched_next = tbl->proxy_timer.expires;
1408 }
1409 skb_dst_drop(skb);
1410 dev_hold(skb->dev);
1411 __skb_queue_tail(&tbl->proxy_queue, skb);
1412 mod_timer(&tbl->proxy_timer, sched_next);
1413 spin_unlock(&tbl->proxy_queue.lock);
1414 }
1415 EXPORT_SYMBOL(pneigh_enqueue);
1416
1417 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1418 struct net *net, int ifindex)
1419 {
1420 struct neigh_parms *p;
1421
1422 for (p = &tbl->parms; p; p = p->next) {
1423 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1424 (!p->dev && !ifindex))
1425 return p;
1426 }
1427
1428 return NULL;
1429 }
1430
1431 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1432 struct neigh_table *tbl)
1433 {
1434 struct neigh_parms *p, *ref;
1435 struct net *net = dev_net(dev);
1436 const struct net_device_ops *ops = dev->netdev_ops;
1437
1438 ref = lookup_neigh_parms(tbl, net, 0);
1439 if (!ref)
1440 return NULL;
1441
1442 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1443 if (p) {
1444 p->tbl = tbl;
1445 atomic_set(&p->refcnt, 1);
1446 p->reachable_time =
1447 neigh_rand_reach_time(p->base_reachable_time);
1448
1449 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1450 kfree(p);
1451 return NULL;
1452 }
1453
1454 dev_hold(dev);
1455 p->dev = dev;
1456 write_pnet(&p->net, hold_net(net));
1457 p->sysctl_table = NULL;
1458 write_lock_bh(&tbl->lock);
1459 p->next = tbl->parms.next;
1460 tbl->parms.next = p;
1461 write_unlock_bh(&tbl->lock);
1462 }
1463 return p;
1464 }
1465 EXPORT_SYMBOL(neigh_parms_alloc);
1466
1467 static void neigh_rcu_free_parms(struct rcu_head *head)
1468 {
1469 struct neigh_parms *parms =
1470 container_of(head, struct neigh_parms, rcu_head);
1471
1472 neigh_parms_put(parms);
1473 }
1474
1475 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1476 {
1477 struct neigh_parms **p;
1478
1479 if (!parms || parms == &tbl->parms)
1480 return;
1481 write_lock_bh(&tbl->lock);
1482 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1483 if (*p == parms) {
1484 *p = parms->next;
1485 parms->dead = 1;
1486 write_unlock_bh(&tbl->lock);
1487 if (parms->dev)
1488 dev_put(parms->dev);
1489 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1490 return;
1491 }
1492 }
1493 write_unlock_bh(&tbl->lock);
1494 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1495 }
1496 EXPORT_SYMBOL(neigh_parms_release);
1497
1498 static void neigh_parms_destroy(struct neigh_parms *parms)
1499 {
1500 release_net(neigh_parms_net(parms));
1501 kfree(parms);
1502 }
1503
1504 static struct lock_class_key neigh_table_proxy_queue_class;
1505
1506 static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1507 {
1508 unsigned long now = jiffies;
1509 unsigned long phsize;
1510
1511 write_pnet(&tbl->parms.net, &init_net);
1512 atomic_set(&tbl->parms.refcnt, 1);
1513 tbl->parms.reachable_time =
1514 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1515
1516 tbl->stats = alloc_percpu(struct neigh_statistics);
1517 if (!tbl->stats)
1518 panic("cannot create neighbour cache statistics");
1519
1520 #ifdef CONFIG_PROC_FS
1521 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1522 &neigh_stat_seq_fops, tbl))
1523 panic("cannot create neighbour proc dir entry");
1524 #endif
1525
1526 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1527
1528 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1529 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1530
1531 if (!tbl->nht || !tbl->phash_buckets)
1532 panic("cannot allocate neighbour cache hashes");
1533
1534 rwlock_init(&tbl->lock);
1535 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
1536 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1537 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1538 skb_queue_head_init_class(&tbl->proxy_queue,
1539 &neigh_table_proxy_queue_class);
1540
1541 tbl->last_flush = now;
1542 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1543 }
1544
1545 void neigh_table_init(struct neigh_table *tbl)
1546 {
1547 struct neigh_table *tmp;
1548
1549 neigh_table_init_no_netlink(tbl);
1550 write_lock(&neigh_tbl_lock);
1551 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1552 if (tmp->family == tbl->family)
1553 break;
1554 }
1555 tbl->next = neigh_tables;
1556 neigh_tables = tbl;
1557 write_unlock(&neigh_tbl_lock);
1558
1559 if (unlikely(tmp)) {
1560 pr_err("Registering multiple tables for family %d\n",
1561 tbl->family);
1562 dump_stack();
1563 }
1564 }
1565 EXPORT_SYMBOL(neigh_table_init);
1566
1567 int neigh_table_clear(struct neigh_table *tbl)
1568 {
1569 struct neigh_table **tp;
1570
1571 /* It is not clean... Fix it to unload IPv6 module safely */
1572 cancel_delayed_work_sync(&tbl->gc_work);
1573 del_timer_sync(&tbl->proxy_timer);
1574 pneigh_queue_purge(&tbl->proxy_queue);
1575 neigh_ifdown(tbl, NULL);
1576 if (atomic_read(&tbl->entries))
1577 pr_crit("neighbour leakage\n");
1578 write_lock(&neigh_tbl_lock);
1579 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1580 if (*tp == tbl) {
1581 *tp = tbl->next;
1582 break;
1583 }
1584 }
1585 write_unlock(&neigh_tbl_lock);
1586
1587 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1588 neigh_hash_free_rcu);
1589 tbl->nht = NULL;
1590
1591 kfree(tbl->phash_buckets);
1592 tbl->phash_buckets = NULL;
1593
1594 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1595
1596 free_percpu(tbl->stats);
1597 tbl->stats = NULL;
1598
1599 return 0;
1600 }
1601 EXPORT_SYMBOL(neigh_table_clear);
1602
1603 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1604 {
1605 struct net *net = sock_net(skb->sk);
1606 struct ndmsg *ndm;
1607 struct nlattr *dst_attr;
1608 struct neigh_table *tbl;
1609 struct net_device *dev = NULL;
1610 int err = -EINVAL;
1611
1612 ASSERT_RTNL();
1613 if (nlmsg_len(nlh) < sizeof(*ndm))
1614 goto out;
1615
1616 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1617 if (dst_attr == NULL)
1618 goto out;
1619
1620 ndm = nlmsg_data(nlh);
1621 if (ndm->ndm_ifindex) {
1622 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1623 if (dev == NULL) {
1624 err = -ENODEV;
1625 goto out;
1626 }
1627 }
1628
1629 read_lock(&neigh_tbl_lock);
1630 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1631 struct neighbour *neigh;
1632
1633 if (tbl->family != ndm->ndm_family)
1634 continue;
1635 read_unlock(&neigh_tbl_lock);
1636
1637 if (nla_len(dst_attr) < tbl->key_len)
1638 goto out;
1639
1640 if (ndm->ndm_flags & NTF_PROXY) {
1641 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1642 goto out;
1643 }
1644
1645 if (dev == NULL)
1646 goto out;
1647
1648 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1649 if (neigh == NULL) {
1650 err = -ENOENT;
1651 goto out;
1652 }
1653
1654 err = neigh_update(neigh, NULL, NUD_FAILED,
1655 NEIGH_UPDATE_F_OVERRIDE |
1656 NEIGH_UPDATE_F_ADMIN);
1657 neigh_release(neigh);
1658 goto out;
1659 }
1660 read_unlock(&neigh_tbl_lock);
1661 err = -EAFNOSUPPORT;
1662
1663 out:
1664 return err;
1665 }
1666
1667 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1668 {
1669 struct net *net = sock_net(skb->sk);
1670 struct ndmsg *ndm;
1671 struct nlattr *tb[NDA_MAX+1];
1672 struct neigh_table *tbl;
1673 struct net_device *dev = NULL;
1674 int err;
1675
1676 ASSERT_RTNL();
1677 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1678 if (err < 0)
1679 goto out;
1680
1681 err = -EINVAL;
1682 if (tb[NDA_DST] == NULL)
1683 goto out;
1684
1685 ndm = nlmsg_data(nlh);
1686 if (ndm->ndm_ifindex) {
1687 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1688 if (dev == NULL) {
1689 err = -ENODEV;
1690 goto out;
1691 }
1692
1693 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1694 goto out;
1695 }
1696
1697 read_lock(&neigh_tbl_lock);
1698 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1699 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1700 struct neighbour *neigh;
1701 void *dst, *lladdr;
1702
1703 if (tbl->family != ndm->ndm_family)
1704 continue;
1705 read_unlock(&neigh_tbl_lock);
1706
1707 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1708 goto out;
1709 dst = nla_data(tb[NDA_DST]);
1710 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1711
1712 if (ndm->ndm_flags & NTF_PROXY) {
1713 struct pneigh_entry *pn;
1714
1715 err = -ENOBUFS;
1716 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1717 if (pn) {
1718 pn->flags = ndm->ndm_flags;
1719 err = 0;
1720 }
1721 goto out;
1722 }
1723
1724 if (dev == NULL)
1725 goto out;
1726
1727 neigh = neigh_lookup(tbl, dst, dev);
1728 if (neigh == NULL) {
1729 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1730 err = -ENOENT;
1731 goto out;
1732 }
1733
1734 neigh = __neigh_lookup_errno(tbl, dst, dev);
1735 if (IS_ERR(neigh)) {
1736 err = PTR_ERR(neigh);
1737 goto out;
1738 }
1739 } else {
1740 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1741 err = -EEXIST;
1742 neigh_release(neigh);
1743 goto out;
1744 }
1745
1746 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1747 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1748 }
1749
1750 if (ndm->ndm_flags & NTF_USE) {
1751 neigh_event_send(neigh, NULL);
1752 err = 0;
1753 } else
1754 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1755 neigh_release(neigh);
1756 goto out;
1757 }
1758
1759 read_unlock(&neigh_tbl_lock);
1760 err = -EAFNOSUPPORT;
1761 out:
1762 return err;
1763 }
1764
1765 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1766 {
1767 struct nlattr *nest;
1768
1769 nest = nla_nest_start(skb, NDTA_PARMS);
1770 if (nest == NULL)
1771 return -ENOBUFS;
1772
1773 if ((parms->dev &&
1774 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1775 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1776 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1777 /* approximative value for deprecated QUEUE_LEN (in packets) */
1778 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1779 DIV_ROUND_UP(parms->queue_len_bytes,
1780 SKB_TRUESIZE(ETH_FRAME_LEN))) ||
1781 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1782 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1783 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1784 nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1785 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1786 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1787 parms->base_reachable_time) ||
1788 nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1789 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1790 parms->delay_probe_time) ||
1791 nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1792 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1793 nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1794 nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1795 goto nla_put_failure;
1796 return nla_nest_end(skb, nest);
1797
1798 nla_put_failure:
1799 nla_nest_cancel(skb, nest);
1800 return -EMSGSIZE;
1801 }
1802
1803 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1804 u32 pid, u32 seq, int type, int flags)
1805 {
1806 struct nlmsghdr *nlh;
1807 struct ndtmsg *ndtmsg;
1808
1809 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1810 if (nlh == NULL)
1811 return -EMSGSIZE;
1812
1813 ndtmsg = nlmsg_data(nlh);
1814
1815 read_lock_bh(&tbl->lock);
1816 ndtmsg->ndtm_family = tbl->family;
1817 ndtmsg->ndtm_pad1 = 0;
1818 ndtmsg->ndtm_pad2 = 0;
1819
1820 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1821 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1822 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1823 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1824 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1825 goto nla_put_failure;
1826 {
1827 unsigned long now = jiffies;
1828 unsigned int flush_delta = now - tbl->last_flush;
1829 unsigned int rand_delta = now - tbl->last_rand;
1830 struct neigh_hash_table *nht;
1831 struct ndt_config ndc = {
1832 .ndtc_key_len = tbl->key_len,
1833 .ndtc_entry_size = tbl->entry_size,
1834 .ndtc_entries = atomic_read(&tbl->entries),
1835 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1836 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1837 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1838 };
1839
1840 rcu_read_lock_bh();
1841 nht = rcu_dereference_bh(tbl->nht);
1842 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1843 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1844 rcu_read_unlock_bh();
1845
1846 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1847 goto nla_put_failure;
1848 }
1849
1850 {
1851 int cpu;
1852 struct ndt_stats ndst;
1853
1854 memset(&ndst, 0, sizeof(ndst));
1855
1856 for_each_possible_cpu(cpu) {
1857 struct neigh_statistics *st;
1858
1859 st = per_cpu_ptr(tbl->stats, cpu);
1860 ndst.ndts_allocs += st->allocs;
1861 ndst.ndts_destroys += st->destroys;
1862 ndst.ndts_hash_grows += st->hash_grows;
1863 ndst.ndts_res_failed += st->res_failed;
1864 ndst.ndts_lookups += st->lookups;
1865 ndst.ndts_hits += st->hits;
1866 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1867 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1868 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1869 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1870 }
1871
1872 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1873 goto nla_put_failure;
1874 }
1875
1876 BUG_ON(tbl->parms.dev);
1877 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1878 goto nla_put_failure;
1879
1880 read_unlock_bh(&tbl->lock);
1881 return nlmsg_end(skb, nlh);
1882
1883 nla_put_failure:
1884 read_unlock_bh(&tbl->lock);
1885 nlmsg_cancel(skb, nlh);
1886 return -EMSGSIZE;
1887 }
1888
1889 static int neightbl_fill_param_info(struct sk_buff *skb,
1890 struct neigh_table *tbl,
1891 struct neigh_parms *parms,
1892 u32 pid, u32 seq, int type,
1893 unsigned int flags)
1894 {
1895 struct ndtmsg *ndtmsg;
1896 struct nlmsghdr *nlh;
1897
1898 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1899 if (nlh == NULL)
1900 return -EMSGSIZE;
1901
1902 ndtmsg = nlmsg_data(nlh);
1903
1904 read_lock_bh(&tbl->lock);
1905 ndtmsg->ndtm_family = tbl->family;
1906 ndtmsg->ndtm_pad1 = 0;
1907 ndtmsg->ndtm_pad2 = 0;
1908
1909 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1910 neightbl_fill_parms(skb, parms) < 0)
1911 goto errout;
1912
1913 read_unlock_bh(&tbl->lock);
1914 return nlmsg_end(skb, nlh);
1915 errout:
1916 read_unlock_bh(&tbl->lock);
1917 nlmsg_cancel(skb, nlh);
1918 return -EMSGSIZE;
1919 }
1920
1921 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1922 [NDTA_NAME] = { .type = NLA_STRING },
1923 [NDTA_THRESH1] = { .type = NLA_U32 },
1924 [NDTA_THRESH2] = { .type = NLA_U32 },
1925 [NDTA_THRESH3] = { .type = NLA_U32 },
1926 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1927 [NDTA_PARMS] = { .type = NLA_NESTED },
1928 };
1929
1930 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1931 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1932 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1933 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1934 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1935 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1936 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1937 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1938 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1939 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1940 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1941 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1942 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1943 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1944 };
1945
1946 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1947 {
1948 struct net *net = sock_net(skb->sk);
1949 struct neigh_table *tbl;
1950 struct ndtmsg *ndtmsg;
1951 struct nlattr *tb[NDTA_MAX+1];
1952 int err;
1953
1954 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1955 nl_neightbl_policy);
1956 if (err < 0)
1957 goto errout;
1958
1959 if (tb[NDTA_NAME] == NULL) {
1960 err = -EINVAL;
1961 goto errout;
1962 }
1963
1964 ndtmsg = nlmsg_data(nlh);
1965 read_lock(&neigh_tbl_lock);
1966 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1967 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1968 continue;
1969
1970 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1971 break;
1972 }
1973
1974 if (tbl == NULL) {
1975 err = -ENOENT;
1976 goto errout_locked;
1977 }
1978
1979 /*
1980 * We acquire tbl->lock to be nice to the periodic timers and
1981 * make sure they always see a consistent set of values.
1982 */
1983 write_lock_bh(&tbl->lock);
1984
1985 if (tb[NDTA_PARMS]) {
1986 struct nlattr *tbp[NDTPA_MAX+1];
1987 struct neigh_parms *p;
1988 int i, ifindex = 0;
1989
1990 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1991 nl_ntbl_parm_policy);
1992 if (err < 0)
1993 goto errout_tbl_lock;
1994
1995 if (tbp[NDTPA_IFINDEX])
1996 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1997
1998 p = lookup_neigh_parms(tbl, net, ifindex);
1999 if (p == NULL) {
2000 err = -ENOENT;
2001 goto errout_tbl_lock;
2002 }
2003
2004 for (i = 1; i <= NDTPA_MAX; i++) {
2005 if (tbp[i] == NULL)
2006 continue;
2007
2008 switch (i) {
2009 case NDTPA_QUEUE_LEN:
2010 p->queue_len_bytes = nla_get_u32(tbp[i]) *
2011 SKB_TRUESIZE(ETH_FRAME_LEN);
2012 break;
2013 case NDTPA_QUEUE_LENBYTES:
2014 p->queue_len_bytes = nla_get_u32(tbp[i]);
2015 break;
2016 case NDTPA_PROXY_QLEN:
2017 p->proxy_qlen = nla_get_u32(tbp[i]);
2018 break;
2019 case NDTPA_APP_PROBES:
2020 p->app_probes = nla_get_u32(tbp[i]);
2021 break;
2022 case NDTPA_UCAST_PROBES:
2023 p->ucast_probes = nla_get_u32(tbp[i]);
2024 break;
2025 case NDTPA_MCAST_PROBES:
2026 p->mcast_probes = nla_get_u32(tbp[i]);
2027 break;
2028 case NDTPA_BASE_REACHABLE_TIME:
2029 p->base_reachable_time = nla_get_msecs(tbp[i]);
2030 break;
2031 case NDTPA_GC_STALETIME:
2032 p->gc_staletime = nla_get_msecs(tbp[i]);
2033 break;
2034 case NDTPA_DELAY_PROBE_TIME:
2035 p->delay_probe_time = nla_get_msecs(tbp[i]);
2036 break;
2037 case NDTPA_RETRANS_TIME:
2038 p->retrans_time = nla_get_msecs(tbp[i]);
2039 break;
2040 case NDTPA_ANYCAST_DELAY:
2041 p->anycast_delay = nla_get_msecs(tbp[i]);
2042 break;
2043 case NDTPA_PROXY_DELAY:
2044 p->proxy_delay = nla_get_msecs(tbp[i]);
2045 break;
2046 case NDTPA_LOCKTIME:
2047 p->locktime = nla_get_msecs(tbp[i]);
2048 break;
2049 }
2050 }
2051 }
2052
2053 if (tb[NDTA_THRESH1])
2054 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2055
2056 if (tb[NDTA_THRESH2])
2057 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2058
2059 if (tb[NDTA_THRESH3])
2060 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2061
2062 if (tb[NDTA_GC_INTERVAL])
2063 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2064
2065 err = 0;
2066
2067 errout_tbl_lock:
2068 write_unlock_bh(&tbl->lock);
2069 errout_locked:
2070 read_unlock(&neigh_tbl_lock);
2071 errout:
2072 return err;
2073 }
2074
2075 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2076 {
2077 struct net *net = sock_net(skb->sk);
2078 int family, tidx, nidx = 0;
2079 int tbl_skip = cb->args[0];
2080 int neigh_skip = cb->args[1];
2081 struct neigh_table *tbl;
2082
2083 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2084
2085 read_lock(&neigh_tbl_lock);
2086 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2087 struct neigh_parms *p;
2088
2089 if (tidx < tbl_skip || (family && tbl->family != family))
2090 continue;
2091
2092 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
2093 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2094 NLM_F_MULTI) <= 0)
2095 break;
2096
2097 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2098 if (!net_eq(neigh_parms_net(p), net))
2099 continue;
2100
2101 if (nidx < neigh_skip)
2102 goto next;
2103
2104 if (neightbl_fill_param_info(skb, tbl, p,
2105 NETLINK_CB(cb->skb).pid,
2106 cb->nlh->nlmsg_seq,
2107 RTM_NEWNEIGHTBL,
2108 NLM_F_MULTI) <= 0)
2109 goto out;
2110 next:
2111 nidx++;
2112 }
2113
2114 neigh_skip = 0;
2115 }
2116 out:
2117 read_unlock(&neigh_tbl_lock);
2118 cb->args[0] = tidx;
2119 cb->args[1] = nidx;
2120
2121 return skb->len;
2122 }
2123
2124 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2125 u32 pid, u32 seq, int type, unsigned int flags)
2126 {
2127 unsigned long now = jiffies;
2128 struct nda_cacheinfo ci;
2129 struct nlmsghdr *nlh;
2130 struct ndmsg *ndm;
2131
2132 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2133 if (nlh == NULL)
2134 return -EMSGSIZE;
2135
2136 ndm = nlmsg_data(nlh);
2137 ndm->ndm_family = neigh->ops->family;
2138 ndm->ndm_pad1 = 0;
2139 ndm->ndm_pad2 = 0;
2140 ndm->ndm_flags = neigh->flags;
2141 ndm->ndm_type = neigh->type;
2142 ndm->ndm_ifindex = neigh->dev->ifindex;
2143
2144 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2145 goto nla_put_failure;
2146
2147 read_lock_bh(&neigh->lock);
2148 ndm->ndm_state = neigh->nud_state;
2149 if (neigh->nud_state & NUD_VALID) {
2150 char haddr[MAX_ADDR_LEN];
2151
2152 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2153 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2154 read_unlock_bh(&neigh->lock);
2155 goto nla_put_failure;
2156 }
2157 }
2158
2159 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2160 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2161 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2162 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2163 read_unlock_bh(&neigh->lock);
2164
2165 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2166 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2167 goto nla_put_failure;
2168
2169 return nlmsg_end(skb, nlh);
2170
2171 nla_put_failure:
2172 nlmsg_cancel(skb, nlh);
2173 return -EMSGSIZE;
2174 }
2175
2176 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2177 u32 pid, u32 seq, int type, unsigned int flags,
2178 struct neigh_table *tbl)
2179 {
2180 struct nlmsghdr *nlh;
2181 struct ndmsg *ndm;
2182
2183 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2184 if (nlh == NULL)
2185 return -EMSGSIZE;
2186
2187 ndm = nlmsg_data(nlh);
2188 ndm->ndm_family = tbl->family;
2189 ndm->ndm_pad1 = 0;
2190 ndm->ndm_pad2 = 0;
2191 ndm->ndm_flags = pn->flags | NTF_PROXY;
2192 ndm->ndm_type = NDA_DST;
2193 ndm->ndm_ifindex = pn->dev->ifindex;
2194 ndm->ndm_state = NUD_NONE;
2195
2196 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2197 goto nla_put_failure;
2198
2199 return nlmsg_end(skb, nlh);
2200
2201 nla_put_failure:
2202 nlmsg_cancel(skb, nlh);
2203 return -EMSGSIZE;
2204 }
2205
2206 static void neigh_update_notify(struct neighbour *neigh)
2207 {
2208 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2209 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2210 }
2211
2212 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2213 struct netlink_callback *cb)
2214 {
2215 struct net *net = sock_net(skb->sk);
2216 struct neighbour *n;
2217 int rc, h, s_h = cb->args[1];
2218 int idx, s_idx = idx = cb->args[2];
2219 struct neigh_hash_table *nht;
2220
2221 rcu_read_lock_bh();
2222 nht = rcu_dereference_bh(tbl->nht);
2223
2224 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2225 if (h > s_h)
2226 s_idx = 0;
2227 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2228 n != NULL;
2229 n = rcu_dereference_bh(n->next)) {
2230 if (!net_eq(dev_net(n->dev), net))
2231 continue;
2232 if (idx < s_idx)
2233 goto next;
2234 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2235 cb->nlh->nlmsg_seq,
2236 RTM_NEWNEIGH,
2237 NLM_F_MULTI) <= 0) {
2238 rc = -1;
2239 goto out;
2240 }
2241 next:
2242 idx++;
2243 }
2244 }
2245 rc = skb->len;
2246 out:
2247 rcu_read_unlock_bh();
2248 cb->args[1] = h;
2249 cb->args[2] = idx;
2250 return rc;
2251 }
2252
2253 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2254 struct netlink_callback *cb)
2255 {
2256 struct pneigh_entry *n;
2257 struct net *net = sock_net(skb->sk);
2258 int rc, h, s_h = cb->args[3];
2259 int idx, s_idx = idx = cb->args[4];
2260
2261 read_lock_bh(&tbl->lock);
2262
2263 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2264 if (h > s_h)
2265 s_idx = 0;
2266 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2267 if (dev_net(n->dev) != net)
2268 continue;
2269 if (idx < s_idx)
2270 goto next;
2271 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2272 cb->nlh->nlmsg_seq,
2273 RTM_NEWNEIGH,
2274 NLM_F_MULTI, tbl) <= 0) {
2275 read_unlock_bh(&tbl->lock);
2276 rc = -1;
2277 goto out;
2278 }
2279 next:
2280 idx++;
2281 }
2282 }
2283
2284 read_unlock_bh(&tbl->lock);
2285 rc = skb->len;
2286 out:
2287 cb->args[3] = h;
2288 cb->args[4] = idx;
2289 return rc;
2290
2291 }
2292
2293 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2294 {
2295 struct neigh_table *tbl;
2296 int t, family, s_t;
2297 int proxy = 0;
2298 int err;
2299
2300 read_lock(&neigh_tbl_lock);
2301 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2302
2303 /* check for full ndmsg structure presence, family member is
2304 * the same for both structures
2305 */
2306 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2307 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2308 proxy = 1;
2309
2310 s_t = cb->args[0];
2311
2312 for (tbl = neigh_tables, t = 0; tbl;
2313 tbl = tbl->next, t++) {
2314 if (t < s_t || (family && tbl->family != family))
2315 continue;
2316 if (t > s_t)
2317 memset(&cb->args[1], 0, sizeof(cb->args) -
2318 sizeof(cb->args[0]));
2319 if (proxy)
2320 err = pneigh_dump_table(tbl, skb, cb);
2321 else
2322 err = neigh_dump_table(tbl, skb, cb);
2323 if (err < 0)
2324 break;
2325 }
2326 read_unlock(&neigh_tbl_lock);
2327
2328 cb->args[0] = t;
2329 return skb->len;
2330 }
2331
2332 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2333 {
2334 int chain;
2335 struct neigh_hash_table *nht;
2336
2337 rcu_read_lock_bh();
2338 nht = rcu_dereference_bh(tbl->nht);
2339
2340 read_lock(&tbl->lock); /* avoid resizes */
2341 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2342 struct neighbour *n;
2343
2344 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2345 n != NULL;
2346 n = rcu_dereference_bh(n->next))
2347 cb(n, cookie);
2348 }
2349 read_unlock(&tbl->lock);
2350 rcu_read_unlock_bh();
2351 }
2352 EXPORT_SYMBOL(neigh_for_each);
2353
2354 /* The tbl->lock must be held as a writer and BH disabled. */
2355 void __neigh_for_each_release(struct neigh_table *tbl,
2356 int (*cb)(struct neighbour *))
2357 {
2358 int chain;
2359 struct neigh_hash_table *nht;
2360
2361 nht = rcu_dereference_protected(tbl->nht,
2362 lockdep_is_held(&tbl->lock));
2363 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2364 struct neighbour *n;
2365 struct neighbour __rcu **np;
2366
2367 np = &nht->hash_buckets[chain];
2368 while ((n = rcu_dereference_protected(*np,
2369 lockdep_is_held(&tbl->lock))) != NULL) {
2370 int release;
2371
2372 write_lock(&n->lock);
2373 release = cb(n);
2374 if (release) {
2375 rcu_assign_pointer(*np,
2376 rcu_dereference_protected(n->next,
2377 lockdep_is_held(&tbl->lock)));
2378 n->dead = 1;
2379 } else
2380 np = &n->next;
2381 write_unlock(&n->lock);
2382 if (release)
2383 neigh_cleanup_and_release(n);
2384 }
2385 }
2386 }
2387 EXPORT_SYMBOL(__neigh_for_each_release);
2388
2389 #ifdef CONFIG_PROC_FS
2390
2391 static struct neighbour *neigh_get_first(struct seq_file *seq)
2392 {
2393 struct neigh_seq_state *state = seq->private;
2394 struct net *net = seq_file_net(seq);
2395 struct neigh_hash_table *nht = state->nht;
2396 struct neighbour *n = NULL;
2397 int bucket = state->bucket;
2398
2399 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2400 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2401 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2402
2403 while (n) {
2404 if (!net_eq(dev_net(n->dev), net))
2405 goto next;
2406 if (state->neigh_sub_iter) {
2407 loff_t fakep = 0;
2408 void *v;
2409
2410 v = state->neigh_sub_iter(state, n, &fakep);
2411 if (!v)
2412 goto next;
2413 }
2414 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2415 break;
2416 if (n->nud_state & ~NUD_NOARP)
2417 break;
2418 next:
2419 n = rcu_dereference_bh(n->next);
2420 }
2421
2422 if (n)
2423 break;
2424 }
2425 state->bucket = bucket;
2426
2427 return n;
2428 }
2429
2430 static struct neighbour *neigh_get_next(struct seq_file *seq,
2431 struct neighbour *n,
2432 loff_t *pos)
2433 {
2434 struct neigh_seq_state *state = seq->private;
2435 struct net *net = seq_file_net(seq);
2436 struct neigh_hash_table *nht = state->nht;
2437
2438 if (state->neigh_sub_iter) {
2439 void *v = state->neigh_sub_iter(state, n, pos);
2440 if (v)
2441 return n;
2442 }
2443 n = rcu_dereference_bh(n->next);
2444
2445 while (1) {
2446 while (n) {
2447 if (!net_eq(dev_net(n->dev), net))
2448 goto next;
2449 if (state->neigh_sub_iter) {
2450 void *v = state->neigh_sub_iter(state, n, pos);
2451 if (v)
2452 return n;
2453 goto next;
2454 }
2455 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2456 break;
2457
2458 if (n->nud_state & ~NUD_NOARP)
2459 break;
2460 next:
2461 n = rcu_dereference_bh(n->next);
2462 }
2463
2464 if (n)
2465 break;
2466
2467 if (++state->bucket >= (1 << nht->hash_shift))
2468 break;
2469
2470 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2471 }
2472
2473 if (n && pos)
2474 --(*pos);
2475 return n;
2476 }
2477
2478 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2479 {
2480 struct neighbour *n = neigh_get_first(seq);
2481
2482 if (n) {
2483 --(*pos);
2484 while (*pos) {
2485 n = neigh_get_next(seq, n, pos);
2486 if (!n)
2487 break;
2488 }
2489 }
2490 return *pos ? NULL : n;
2491 }
2492
2493 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2494 {
2495 struct neigh_seq_state *state = seq->private;
2496 struct net *net = seq_file_net(seq);
2497 struct neigh_table *tbl = state->tbl;
2498 struct pneigh_entry *pn = NULL;
2499 int bucket = state->bucket;
2500
2501 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2502 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2503 pn = tbl->phash_buckets[bucket];
2504 while (pn && !net_eq(pneigh_net(pn), net))
2505 pn = pn->next;
2506 if (pn)
2507 break;
2508 }
2509 state->bucket = bucket;
2510
2511 return pn;
2512 }
2513
2514 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2515 struct pneigh_entry *pn,
2516 loff_t *pos)
2517 {
2518 struct neigh_seq_state *state = seq->private;
2519 struct net *net = seq_file_net(seq);
2520 struct neigh_table *tbl = state->tbl;
2521
2522 do {
2523 pn = pn->next;
2524 } while (pn && !net_eq(pneigh_net(pn), net));
2525
2526 while (!pn) {
2527 if (++state->bucket > PNEIGH_HASHMASK)
2528 break;
2529 pn = tbl->phash_buckets[state->bucket];
2530 while (pn && !net_eq(pneigh_net(pn), net))
2531 pn = pn->next;
2532 if (pn)
2533 break;
2534 }
2535
2536 if (pn && pos)
2537 --(*pos);
2538
2539 return pn;
2540 }
2541
2542 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2543 {
2544 struct pneigh_entry *pn = pneigh_get_first(seq);
2545
2546 if (pn) {
2547 --(*pos);
2548 while (*pos) {
2549 pn = pneigh_get_next(seq, pn, pos);
2550 if (!pn)
2551 break;
2552 }
2553 }
2554 return *pos ? NULL : pn;
2555 }
2556
2557 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2558 {
2559 struct neigh_seq_state *state = seq->private;
2560 void *rc;
2561 loff_t idxpos = *pos;
2562
2563 rc = neigh_get_idx(seq, &idxpos);
2564 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2565 rc = pneigh_get_idx(seq, &idxpos);
2566
2567 return rc;
2568 }
2569
2570 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2571 __acquires(rcu_bh)
2572 {
2573 struct neigh_seq_state *state = seq->private;
2574
2575 state->tbl = tbl;
2576 state->bucket = 0;
2577 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2578
2579 rcu_read_lock_bh();
2580 state->nht = rcu_dereference_bh(tbl->nht);
2581
2582 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2583 }
2584 EXPORT_SYMBOL(neigh_seq_start);
2585
2586 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2587 {
2588 struct neigh_seq_state *state;
2589 void *rc;
2590
2591 if (v == SEQ_START_TOKEN) {
2592 rc = neigh_get_first(seq);
2593 goto out;
2594 }
2595
2596 state = seq->private;
2597 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2598 rc = neigh_get_next(seq, v, NULL);
2599 if (rc)
2600 goto out;
2601 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2602 rc = pneigh_get_first(seq);
2603 } else {
2604 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2605 rc = pneigh_get_next(seq, v, NULL);
2606 }
2607 out:
2608 ++(*pos);
2609 return rc;
2610 }
2611 EXPORT_SYMBOL(neigh_seq_next);
2612
2613 void neigh_seq_stop(struct seq_file *seq, void *v)
2614 __releases(rcu_bh)
2615 {
2616 rcu_read_unlock_bh();
2617 }
2618 EXPORT_SYMBOL(neigh_seq_stop);
2619
2620 /* statistics via seq_file */
2621
2622 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2623 {
2624 struct neigh_table *tbl = seq->private;
2625 int cpu;
2626
2627 if (*pos == 0)
2628 return SEQ_START_TOKEN;
2629
2630 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2631 if (!cpu_possible(cpu))
2632 continue;
2633 *pos = cpu+1;
2634 return per_cpu_ptr(tbl->stats, cpu);
2635 }
2636 return NULL;
2637 }
2638
2639 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2640 {
2641 struct neigh_table *tbl = seq->private;
2642 int cpu;
2643
2644 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2645 if (!cpu_possible(cpu))
2646 continue;
2647 *pos = cpu+1;
2648 return per_cpu_ptr(tbl->stats, cpu);
2649 }
2650 return NULL;
2651 }
2652
2653 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2654 {
2655
2656 }
2657
2658 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2659 {
2660 struct neigh_table *tbl = seq->private;
2661 struct neigh_statistics *st = v;
2662
2663 if (v == SEQ_START_TOKEN) {
2664 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2665 return 0;
2666 }
2667
2668 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2669 "%08lx %08lx %08lx %08lx %08lx\n",
2670 atomic_read(&tbl->entries),
2671
2672 st->allocs,
2673 st->destroys,
2674 st->hash_grows,
2675
2676 st->lookups,
2677 st->hits,
2678
2679 st->res_failed,
2680
2681 st->rcv_probes_mcast,
2682 st->rcv_probes_ucast,
2683
2684 st->periodic_gc_runs,
2685 st->forced_gc_runs,
2686 st->unres_discards
2687 );
2688
2689 return 0;
2690 }
2691
2692 static const struct seq_operations neigh_stat_seq_ops = {
2693 .start = neigh_stat_seq_start,
2694 .next = neigh_stat_seq_next,
2695 .stop = neigh_stat_seq_stop,
2696 .show = neigh_stat_seq_show,
2697 };
2698
2699 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2700 {
2701 int ret = seq_open(file, &neigh_stat_seq_ops);
2702
2703 if (!ret) {
2704 struct seq_file *sf = file->private_data;
2705 sf->private = PDE(inode)->data;
2706 }
2707 return ret;
2708 };
2709
2710 static const struct file_operations neigh_stat_seq_fops = {
2711 .owner = THIS_MODULE,
2712 .open = neigh_stat_seq_open,
2713 .read = seq_read,
2714 .llseek = seq_lseek,
2715 .release = seq_release,
2716 };
2717
2718 #endif /* CONFIG_PROC_FS */
2719
2720 static inline size_t neigh_nlmsg_size(void)
2721 {
2722 return NLMSG_ALIGN(sizeof(struct ndmsg))
2723 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2724 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2725 + nla_total_size(sizeof(struct nda_cacheinfo))
2726 + nla_total_size(4); /* NDA_PROBES */
2727 }
2728
2729 static void __neigh_notify(struct neighbour *n, int type, int flags)
2730 {
2731 struct net *net = dev_net(n->dev);
2732 struct sk_buff *skb;
2733 int err = -ENOBUFS;
2734
2735 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2736 if (skb == NULL)
2737 goto errout;
2738
2739 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2740 if (err < 0) {
2741 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2742 WARN_ON(err == -EMSGSIZE);
2743 kfree_skb(skb);
2744 goto errout;
2745 }
2746 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2747 return;
2748 errout:
2749 if (err < 0)
2750 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2751 }
2752
2753 #ifdef CONFIG_ARPD
2754 void neigh_app_ns(struct neighbour *n)
2755 {
2756 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2757 }
2758 EXPORT_SYMBOL(neigh_app_ns);
2759 #endif /* CONFIG_ARPD */
2760
2761 #ifdef CONFIG_SYSCTL
2762
2763 static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2764 size_t *lenp, loff_t *ppos)
2765 {
2766 int size, ret;
2767 ctl_table tmp = *ctl;
2768
2769 tmp.data = &size;
2770 size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN));
2771 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
2772 if (write && !ret)
2773 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2774 return ret;
2775 }
2776
2777 enum {
2778 NEIGH_VAR_MCAST_PROBE,
2779 NEIGH_VAR_UCAST_PROBE,
2780 NEIGH_VAR_APP_PROBE,
2781 NEIGH_VAR_RETRANS_TIME,
2782 NEIGH_VAR_BASE_REACHABLE_TIME,
2783 NEIGH_VAR_DELAY_PROBE_TIME,
2784 NEIGH_VAR_GC_STALETIME,
2785 NEIGH_VAR_QUEUE_LEN,
2786 NEIGH_VAR_QUEUE_LEN_BYTES,
2787 NEIGH_VAR_PROXY_QLEN,
2788 NEIGH_VAR_ANYCAST_DELAY,
2789 NEIGH_VAR_PROXY_DELAY,
2790 NEIGH_VAR_LOCKTIME,
2791 NEIGH_VAR_RETRANS_TIME_MS,
2792 NEIGH_VAR_BASE_REACHABLE_TIME_MS,
2793 NEIGH_VAR_GC_INTERVAL,
2794 NEIGH_VAR_GC_THRESH1,
2795 NEIGH_VAR_GC_THRESH2,
2796 NEIGH_VAR_GC_THRESH3,
2797 NEIGH_VAR_MAX
2798 };
2799
2800 static struct neigh_sysctl_table {
2801 struct ctl_table_header *sysctl_header;
2802 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2803 } neigh_sysctl_template __read_mostly = {
2804 .neigh_vars = {
2805 [NEIGH_VAR_MCAST_PROBE] = {
2806 .procname = "mcast_solicit",
2807 .maxlen = sizeof(int),
2808 .mode = 0644,
2809 .proc_handler = proc_dointvec,
2810 },
2811 [NEIGH_VAR_UCAST_PROBE] = {
2812 .procname = "ucast_solicit",
2813 .maxlen = sizeof(int),
2814 .mode = 0644,
2815 .proc_handler = proc_dointvec,
2816 },
2817 [NEIGH_VAR_APP_PROBE] = {
2818 .procname = "app_solicit",
2819 .maxlen = sizeof(int),
2820 .mode = 0644,
2821 .proc_handler = proc_dointvec,
2822 },
2823 [NEIGH_VAR_RETRANS_TIME] = {
2824 .procname = "retrans_time",
2825 .maxlen = sizeof(int),
2826 .mode = 0644,
2827 .proc_handler = proc_dointvec_userhz_jiffies,
2828 },
2829 [NEIGH_VAR_BASE_REACHABLE_TIME] = {
2830 .procname = "base_reachable_time",
2831 .maxlen = sizeof(int),
2832 .mode = 0644,
2833 .proc_handler = proc_dointvec_jiffies,
2834 },
2835 [NEIGH_VAR_DELAY_PROBE_TIME] = {
2836 .procname = "delay_first_probe_time",
2837 .maxlen = sizeof(int),
2838 .mode = 0644,
2839 .proc_handler = proc_dointvec_jiffies,
2840 },
2841 [NEIGH_VAR_GC_STALETIME] = {
2842 .procname = "gc_stale_time",
2843 .maxlen = sizeof(int),
2844 .mode = 0644,
2845 .proc_handler = proc_dointvec_jiffies,
2846 },
2847 [NEIGH_VAR_QUEUE_LEN] = {
2848 .procname = "unres_qlen",
2849 .maxlen = sizeof(int),
2850 .mode = 0644,
2851 .proc_handler = proc_unres_qlen,
2852 },
2853 [NEIGH_VAR_QUEUE_LEN_BYTES] = {
2854 .procname = "unres_qlen_bytes",
2855 .maxlen = sizeof(int),
2856 .mode = 0644,
2857 .proc_handler = proc_dointvec,
2858 },
2859 [NEIGH_VAR_PROXY_QLEN] = {
2860 .procname = "proxy_qlen",
2861 .maxlen = sizeof(int),
2862 .mode = 0644,
2863 .proc_handler = proc_dointvec,
2864 },
2865 [NEIGH_VAR_ANYCAST_DELAY] = {
2866 .procname = "anycast_delay",
2867 .maxlen = sizeof(int),
2868 .mode = 0644,
2869 .proc_handler = proc_dointvec_userhz_jiffies,
2870 },
2871 [NEIGH_VAR_PROXY_DELAY] = {
2872 .procname = "proxy_delay",
2873 .maxlen = sizeof(int),
2874 .mode = 0644,
2875 .proc_handler = proc_dointvec_userhz_jiffies,
2876 },
2877 [NEIGH_VAR_LOCKTIME] = {
2878 .procname = "locktime",
2879 .maxlen = sizeof(int),
2880 .mode = 0644,
2881 .proc_handler = proc_dointvec_userhz_jiffies,
2882 },
2883 [NEIGH_VAR_RETRANS_TIME_MS] = {
2884 .procname = "retrans_time_ms",
2885 .maxlen = sizeof(int),
2886 .mode = 0644,
2887 .proc_handler = proc_dointvec_ms_jiffies,
2888 },
2889 [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
2890 .procname = "base_reachable_time_ms",
2891 .maxlen = sizeof(int),
2892 .mode = 0644,
2893 .proc_handler = proc_dointvec_ms_jiffies,
2894 },
2895 [NEIGH_VAR_GC_INTERVAL] = {
2896 .procname = "gc_interval",
2897 .maxlen = sizeof(int),
2898 .mode = 0644,
2899 .proc_handler = proc_dointvec_jiffies,
2900 },
2901 [NEIGH_VAR_GC_THRESH1] = {
2902 .procname = "gc_thresh1",
2903 .maxlen = sizeof(int),
2904 .mode = 0644,
2905 .proc_handler = proc_dointvec,
2906 },
2907 [NEIGH_VAR_GC_THRESH2] = {
2908 .procname = "gc_thresh2",
2909 .maxlen = sizeof(int),
2910 .mode = 0644,
2911 .proc_handler = proc_dointvec,
2912 },
2913 [NEIGH_VAR_GC_THRESH3] = {
2914 .procname = "gc_thresh3",
2915 .maxlen = sizeof(int),
2916 .mode = 0644,
2917 .proc_handler = proc_dointvec,
2918 },
2919 {},
2920 },
2921 };
2922
2923 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2924 char *p_name, proc_handler *handler)
2925 {
2926 struct neigh_sysctl_table *t;
2927 const char *dev_name_source = NULL;
2928 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2929
2930 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2931 if (!t)
2932 goto err;
2933
2934 t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
2935 t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
2936 t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
2937 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
2938 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
2939 t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
2940 t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
2941 t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
2942 t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
2943 t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
2944 t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
2945 t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
2946 t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
2947 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
2948 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
2949
2950 if (dev) {
2951 dev_name_source = dev->name;
2952 /* Terminate the table early */
2953 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2954 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2955 } else {
2956 dev_name_source = "default";
2957 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2958 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2959 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
2960 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
2961 }
2962
2963
2964 if (handler) {
2965 /* RetransTime */
2966 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
2967 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
2968 /* ReachableTime */
2969 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
2970 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
2971 /* RetransTime (in milliseconds)*/
2972 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
2973 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
2974 /* ReachableTime (in milliseconds) */
2975 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
2976 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2977 }
2978
2979 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
2980 p_name, dev_name_source);
2981 t->sysctl_header =
2982 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
2983 if (!t->sysctl_header)
2984 goto free;
2985
2986 p->sysctl_table = t;
2987 return 0;
2988
2989 free:
2990 kfree(t);
2991 err:
2992 return -ENOBUFS;
2993 }
2994 EXPORT_SYMBOL(neigh_sysctl_register);
2995
2996 void neigh_sysctl_unregister(struct neigh_parms *p)
2997 {
2998 if (p->sysctl_table) {
2999 struct neigh_sysctl_table *t = p->sysctl_table;
3000 p->sysctl_table = NULL;
3001 unregister_net_sysctl_table(t->sysctl_header);
3002 kfree(t);
3003 }
3004 }
3005 EXPORT_SYMBOL(neigh_sysctl_unregister);
3006
3007 #endif /* CONFIG_SYSCTL */
3008
3009 static int __init neigh_init(void)
3010 {
3011 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3012 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3013 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3014
3015 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3016 NULL);
3017 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3018
3019 return 0;
3020 }
3021
3022 subsys_initcall(neigh_init);
3023
This page took 0.135516 seconds and 5 git commands to generate.