net: avoid to hang up on sending due to sysctl configuration overflow.
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/socket.h>
25 #include <linux/netdevice.h>
26 #include <linux/proc_fs.h>
27 #ifdef CONFIG_SYSCTL
28 #include <linux/sysctl.h>
29 #endif
30 #include <linux/times.h>
31 #include <net/net_namespace.h>
32 #include <net/neighbour.h>
33 #include <net/dst.h>
34 #include <net/sock.h>
35 #include <net/netevent.h>
36 #include <net/netlink.h>
37 #include <linux/rtnetlink.h>
38 #include <linux/random.h>
39 #include <linux/string.h>
40 #include <linux/log2.h>
41
42 #define NEIGH_DEBUG 1
43
44 #define NEIGH_PRINTK(x...) printk(x)
45 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
46 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
47 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
48
49 #if NEIGH_DEBUG >= 1
50 #undef NEIGH_PRINTK1
51 #define NEIGH_PRINTK1 NEIGH_PRINTK
52 #endif
53 #if NEIGH_DEBUG >= 2
54 #undef NEIGH_PRINTK2
55 #define NEIGH_PRINTK2 NEIGH_PRINTK
56 #endif
57
58 #define PNEIGH_HASHMASK 0xF
59
60 static void neigh_timer_handler(unsigned long arg);
61 static void __neigh_notify(struct neighbour *n, int type, int flags);
62 static void neigh_update_notify(struct neighbour *neigh);
63 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
64
65 static struct neigh_table *neigh_tables;
66 #ifdef CONFIG_PROC_FS
67 static const struct file_operations neigh_stat_seq_fops;
68 #endif
69
70 /*
71 Neighbour hash table buckets are protected with rwlock tbl->lock.
72
73 - All the scans/updates to hash buckets MUST be made under this lock.
74 - NOTHING clever should be made under this lock: no callbacks
75 to protocol backends, no attempts to send something to network.
76 It will result in deadlocks, if backend/driver wants to use neighbour
77 cache.
78 - If the entry requires some non-trivial actions, increase
79 its reference count and release table lock.
80
81 Neighbour entries are protected:
82 - with reference count.
83 - with rwlock neigh->lock
84
85 Reference count prevents destruction.
86
87 neigh->lock mainly serializes ll address data and its validity state.
88 However, the same lock is used to protect another entry fields:
89 - timer
90 - resolution queue
91
92 Again, nothing clever shall be made under neigh->lock,
93 the most complicated procedure, which we allow is dev->hard_header.
94 It is supposed, that dev->hard_header is simplistic and does
95 not make callbacks to neighbour tables.
96
97 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
98 list of neighbour tables. This list is used only in process context,
99 */
100
101 static DEFINE_RWLOCK(neigh_tbl_lock);
102
103 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
104 {
105 kfree_skb(skb);
106 return -ENETDOWN;
107 }
108
109 static void neigh_cleanup_and_release(struct neighbour *neigh)
110 {
111 if (neigh->parms->neigh_cleanup)
112 neigh->parms->neigh_cleanup(neigh);
113
114 __neigh_notify(neigh, RTM_DELNEIGH, 0);
115 neigh_release(neigh);
116 }
117
118 /*
119 * It is random distribution in the interval (1/2)*base...(3/2)*base.
120 * It corresponds to default IPv6 settings and is not overridable,
121 * because it is really reasonable choice.
122 */
123
124 unsigned long neigh_rand_reach_time(unsigned long base)
125 {
126 return base ? (net_random() % base) + (base >> 1) : 0;
127 }
128 EXPORT_SYMBOL(neigh_rand_reach_time);
129
130
131 static int neigh_forced_gc(struct neigh_table *tbl)
132 {
133 int shrunk = 0;
134 int i;
135 struct neigh_hash_table *nht;
136
137 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
138
139 write_lock_bh(&tbl->lock);
140 nht = rcu_dereference_protected(tbl->nht,
141 lockdep_is_held(&tbl->lock));
142 for (i = 0; i < (1 << nht->hash_shift); i++) {
143 struct neighbour *n;
144 struct neighbour __rcu **np;
145
146 np = &nht->hash_buckets[i];
147 while ((n = rcu_dereference_protected(*np,
148 lockdep_is_held(&tbl->lock))) != NULL) {
149 /* Neighbour record may be discarded if:
150 * - nobody refers to it.
151 * - it is not permanent
152 */
153 write_lock(&n->lock);
154 if (atomic_read(&n->refcnt) == 1 &&
155 !(n->nud_state & NUD_PERMANENT)) {
156 rcu_assign_pointer(*np,
157 rcu_dereference_protected(n->next,
158 lockdep_is_held(&tbl->lock)));
159 n->dead = 1;
160 shrunk = 1;
161 write_unlock(&n->lock);
162 neigh_cleanup_and_release(n);
163 continue;
164 }
165 write_unlock(&n->lock);
166 np = &n->next;
167 }
168 }
169
170 tbl->last_flush = jiffies;
171
172 write_unlock_bh(&tbl->lock);
173
174 return shrunk;
175 }
176
177 static void neigh_add_timer(struct neighbour *n, unsigned long when)
178 {
179 neigh_hold(n);
180 if (unlikely(mod_timer(&n->timer, when))) {
181 printk("NEIGH: BUG, double timer add, state is %x\n",
182 n->nud_state);
183 dump_stack();
184 }
185 }
186
187 static int neigh_del_timer(struct neighbour *n)
188 {
189 if ((n->nud_state & NUD_IN_TIMER) &&
190 del_timer(&n->timer)) {
191 neigh_release(n);
192 return 1;
193 }
194 return 0;
195 }
196
197 static void pneigh_queue_purge(struct sk_buff_head *list)
198 {
199 struct sk_buff *skb;
200
201 while ((skb = skb_dequeue(list)) != NULL) {
202 dev_put(skb->dev);
203 kfree_skb(skb);
204 }
205 }
206
207 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
208 {
209 int i;
210 struct neigh_hash_table *nht;
211
212 nht = rcu_dereference_protected(tbl->nht,
213 lockdep_is_held(&tbl->lock));
214
215 for (i = 0; i < (1 << nht->hash_shift); i++) {
216 struct neighbour *n;
217 struct neighbour __rcu **np = &nht->hash_buckets[i];
218
219 while ((n = rcu_dereference_protected(*np,
220 lockdep_is_held(&tbl->lock))) != NULL) {
221 if (dev && n->dev != dev) {
222 np = &n->next;
223 continue;
224 }
225 rcu_assign_pointer(*np,
226 rcu_dereference_protected(n->next,
227 lockdep_is_held(&tbl->lock)));
228 write_lock(&n->lock);
229 neigh_del_timer(n);
230 n->dead = 1;
231
232 if (atomic_read(&n->refcnt) != 1) {
233 /* The most unpleasant situation.
234 We must destroy neighbour entry,
235 but someone still uses it.
236
237 The destroy will be delayed until
238 the last user releases us, but
239 we must kill timers etc. and move
240 it to safe state.
241 */
242 skb_queue_purge(&n->arp_queue);
243 n->arp_queue_len_bytes = 0;
244 n->output = neigh_blackhole;
245 if (n->nud_state & NUD_VALID)
246 n->nud_state = NUD_NOARP;
247 else
248 n->nud_state = NUD_NONE;
249 NEIGH_PRINTK2("neigh %p is stray.\n", n);
250 }
251 write_unlock(&n->lock);
252 neigh_cleanup_and_release(n);
253 }
254 }
255 }
256
257 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
258 {
259 write_lock_bh(&tbl->lock);
260 neigh_flush_dev(tbl, dev);
261 write_unlock_bh(&tbl->lock);
262 }
263 EXPORT_SYMBOL(neigh_changeaddr);
264
265 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
266 {
267 write_lock_bh(&tbl->lock);
268 neigh_flush_dev(tbl, dev);
269 pneigh_ifdown(tbl, dev);
270 write_unlock_bh(&tbl->lock);
271
272 del_timer_sync(&tbl->proxy_timer);
273 pneigh_queue_purge(&tbl->proxy_queue);
274 return 0;
275 }
276 EXPORT_SYMBOL(neigh_ifdown);
277
278 static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
279 {
280 struct neighbour *n = NULL;
281 unsigned long now = jiffies;
282 int entries;
283
284 entries = atomic_inc_return(&tbl->entries) - 1;
285 if (entries >= tbl->gc_thresh3 ||
286 (entries >= tbl->gc_thresh2 &&
287 time_after(now, tbl->last_flush + 5 * HZ))) {
288 if (!neigh_forced_gc(tbl) &&
289 entries >= tbl->gc_thresh3)
290 goto out_entries;
291 }
292
293 if (tbl->entry_size)
294 n = kzalloc(tbl->entry_size, GFP_ATOMIC);
295 else {
296 int sz = sizeof(*n) + tbl->key_len;
297
298 sz = ALIGN(sz, NEIGH_PRIV_ALIGN);
299 sz += dev->neigh_priv_len;
300 n = kzalloc(sz, GFP_ATOMIC);
301 }
302 if (!n)
303 goto out_entries;
304
305 skb_queue_head_init(&n->arp_queue);
306 rwlock_init(&n->lock);
307 seqlock_init(&n->ha_lock);
308 n->updated = n->used = now;
309 n->nud_state = NUD_NONE;
310 n->output = neigh_blackhole;
311 seqlock_init(&n->hh.hh_lock);
312 n->parms = neigh_parms_clone(&tbl->parms);
313 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
314
315 NEIGH_CACHE_STAT_INC(tbl, allocs);
316 n->tbl = tbl;
317 atomic_set(&n->refcnt, 1);
318 n->dead = 1;
319 out:
320 return n;
321
322 out_entries:
323 atomic_dec(&tbl->entries);
324 goto out;
325 }
326
327 static void neigh_get_hash_rnd(u32 *x)
328 {
329 get_random_bytes(x, sizeof(*x));
330 *x |= 1;
331 }
332
333 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
334 {
335 size_t size = (1 << shift) * sizeof(struct neighbour *);
336 struct neigh_hash_table *ret;
337 struct neighbour __rcu **buckets;
338 int i;
339
340 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
341 if (!ret)
342 return NULL;
343 if (size <= PAGE_SIZE)
344 buckets = kzalloc(size, GFP_ATOMIC);
345 else
346 buckets = (struct neighbour __rcu **)
347 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
348 get_order(size));
349 if (!buckets) {
350 kfree(ret);
351 return NULL;
352 }
353 ret->hash_buckets = buckets;
354 ret->hash_shift = shift;
355 for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
356 neigh_get_hash_rnd(&ret->hash_rnd[i]);
357 return ret;
358 }
359
360 static void neigh_hash_free_rcu(struct rcu_head *head)
361 {
362 struct neigh_hash_table *nht = container_of(head,
363 struct neigh_hash_table,
364 rcu);
365 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
366 struct neighbour __rcu **buckets = nht->hash_buckets;
367
368 if (size <= PAGE_SIZE)
369 kfree(buckets);
370 else
371 free_pages((unsigned long)buckets, get_order(size));
372 kfree(nht);
373 }
374
375 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
376 unsigned long new_shift)
377 {
378 unsigned int i, hash;
379 struct neigh_hash_table *new_nht, *old_nht;
380
381 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
382
383 old_nht = rcu_dereference_protected(tbl->nht,
384 lockdep_is_held(&tbl->lock));
385 new_nht = neigh_hash_alloc(new_shift);
386 if (!new_nht)
387 return old_nht;
388
389 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
390 struct neighbour *n, *next;
391
392 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
393 lockdep_is_held(&tbl->lock));
394 n != NULL;
395 n = next) {
396 hash = tbl->hash(n->primary_key, n->dev,
397 new_nht->hash_rnd);
398
399 hash >>= (32 - new_nht->hash_shift);
400 next = rcu_dereference_protected(n->next,
401 lockdep_is_held(&tbl->lock));
402
403 rcu_assign_pointer(n->next,
404 rcu_dereference_protected(
405 new_nht->hash_buckets[hash],
406 lockdep_is_held(&tbl->lock)));
407 rcu_assign_pointer(new_nht->hash_buckets[hash], n);
408 }
409 }
410
411 rcu_assign_pointer(tbl->nht, new_nht);
412 call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
413 return new_nht;
414 }
415
416 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
417 struct net_device *dev)
418 {
419 struct neighbour *n;
420 int key_len = tbl->key_len;
421 u32 hash_val;
422 struct neigh_hash_table *nht;
423
424 NEIGH_CACHE_STAT_INC(tbl, lookups);
425
426 rcu_read_lock_bh();
427 nht = rcu_dereference_bh(tbl->nht);
428 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
429
430 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
431 n != NULL;
432 n = rcu_dereference_bh(n->next)) {
433 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
434 if (!atomic_inc_not_zero(&n->refcnt))
435 n = NULL;
436 NEIGH_CACHE_STAT_INC(tbl, hits);
437 break;
438 }
439 }
440
441 rcu_read_unlock_bh();
442 return n;
443 }
444 EXPORT_SYMBOL(neigh_lookup);
445
446 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
447 const void *pkey)
448 {
449 struct neighbour *n;
450 int key_len = tbl->key_len;
451 u32 hash_val;
452 struct neigh_hash_table *nht;
453
454 NEIGH_CACHE_STAT_INC(tbl, lookups);
455
456 rcu_read_lock_bh();
457 nht = rcu_dereference_bh(tbl->nht);
458 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
459
460 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
461 n != NULL;
462 n = rcu_dereference_bh(n->next)) {
463 if (!memcmp(n->primary_key, pkey, key_len) &&
464 net_eq(dev_net(n->dev), net)) {
465 if (!atomic_inc_not_zero(&n->refcnt))
466 n = NULL;
467 NEIGH_CACHE_STAT_INC(tbl, hits);
468 break;
469 }
470 }
471
472 rcu_read_unlock_bh();
473 return n;
474 }
475 EXPORT_SYMBOL(neigh_lookup_nodev);
476
477 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
478 struct net_device *dev, bool want_ref)
479 {
480 u32 hash_val;
481 int key_len = tbl->key_len;
482 int error;
483 struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
484 struct neigh_hash_table *nht;
485
486 if (!n) {
487 rc = ERR_PTR(-ENOBUFS);
488 goto out;
489 }
490
491 memcpy(n->primary_key, pkey, key_len);
492 n->dev = dev;
493 dev_hold(dev);
494
495 /* Protocol specific setup. */
496 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
497 rc = ERR_PTR(error);
498 goto out_neigh_release;
499 }
500
501 if (dev->netdev_ops->ndo_neigh_construct) {
502 error = dev->netdev_ops->ndo_neigh_construct(n);
503 if (error < 0) {
504 rc = ERR_PTR(error);
505 goto out_neigh_release;
506 }
507 }
508
509 /* Device specific setup. */
510 if (n->parms->neigh_setup &&
511 (error = n->parms->neigh_setup(n)) < 0) {
512 rc = ERR_PTR(error);
513 goto out_neigh_release;
514 }
515
516 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
517
518 write_lock_bh(&tbl->lock);
519 nht = rcu_dereference_protected(tbl->nht,
520 lockdep_is_held(&tbl->lock));
521
522 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
523 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
524
525 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
526
527 if (n->parms->dead) {
528 rc = ERR_PTR(-EINVAL);
529 goto out_tbl_unlock;
530 }
531
532 for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
533 lockdep_is_held(&tbl->lock));
534 n1 != NULL;
535 n1 = rcu_dereference_protected(n1->next,
536 lockdep_is_held(&tbl->lock))) {
537 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
538 if (want_ref)
539 neigh_hold(n1);
540 rc = n1;
541 goto out_tbl_unlock;
542 }
543 }
544
545 n->dead = 0;
546 if (want_ref)
547 neigh_hold(n);
548 rcu_assign_pointer(n->next,
549 rcu_dereference_protected(nht->hash_buckets[hash_val],
550 lockdep_is_held(&tbl->lock)));
551 rcu_assign_pointer(nht->hash_buckets[hash_val], n);
552 write_unlock_bh(&tbl->lock);
553 NEIGH_PRINTK2("neigh %p is created.\n", n);
554 rc = n;
555 out:
556 return rc;
557 out_tbl_unlock:
558 write_unlock_bh(&tbl->lock);
559 out_neigh_release:
560 neigh_release(n);
561 goto out;
562 }
563 EXPORT_SYMBOL(__neigh_create);
564
565 static u32 pneigh_hash(const void *pkey, int key_len)
566 {
567 u32 hash_val = *(u32 *)(pkey + key_len - 4);
568 hash_val ^= (hash_val >> 16);
569 hash_val ^= hash_val >> 8;
570 hash_val ^= hash_val >> 4;
571 hash_val &= PNEIGH_HASHMASK;
572 return hash_val;
573 }
574
575 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
576 struct net *net,
577 const void *pkey,
578 int key_len,
579 struct net_device *dev)
580 {
581 while (n) {
582 if (!memcmp(n->key, pkey, key_len) &&
583 net_eq(pneigh_net(n), net) &&
584 (n->dev == dev || !n->dev))
585 return n;
586 n = n->next;
587 }
588 return NULL;
589 }
590
591 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
592 struct net *net, const void *pkey, struct net_device *dev)
593 {
594 int key_len = tbl->key_len;
595 u32 hash_val = pneigh_hash(pkey, key_len);
596
597 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
598 net, pkey, key_len, dev);
599 }
600 EXPORT_SYMBOL_GPL(__pneigh_lookup);
601
602 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
603 struct net *net, const void *pkey,
604 struct net_device *dev, int creat)
605 {
606 struct pneigh_entry *n;
607 int key_len = tbl->key_len;
608 u32 hash_val = pneigh_hash(pkey, key_len);
609
610 read_lock_bh(&tbl->lock);
611 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
612 net, pkey, key_len, dev);
613 read_unlock_bh(&tbl->lock);
614
615 if (n || !creat)
616 goto out;
617
618 ASSERT_RTNL();
619
620 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
621 if (!n)
622 goto out;
623
624 write_pnet(&n->net, hold_net(net));
625 memcpy(n->key, pkey, key_len);
626 n->dev = dev;
627 if (dev)
628 dev_hold(dev);
629
630 if (tbl->pconstructor && tbl->pconstructor(n)) {
631 if (dev)
632 dev_put(dev);
633 release_net(net);
634 kfree(n);
635 n = NULL;
636 goto out;
637 }
638
639 write_lock_bh(&tbl->lock);
640 n->next = tbl->phash_buckets[hash_val];
641 tbl->phash_buckets[hash_val] = n;
642 write_unlock_bh(&tbl->lock);
643 out:
644 return n;
645 }
646 EXPORT_SYMBOL(pneigh_lookup);
647
648
649 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
650 struct net_device *dev)
651 {
652 struct pneigh_entry *n, **np;
653 int key_len = tbl->key_len;
654 u32 hash_val = pneigh_hash(pkey, key_len);
655
656 write_lock_bh(&tbl->lock);
657 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
658 np = &n->next) {
659 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
660 net_eq(pneigh_net(n), net)) {
661 *np = n->next;
662 write_unlock_bh(&tbl->lock);
663 if (tbl->pdestructor)
664 tbl->pdestructor(n);
665 if (n->dev)
666 dev_put(n->dev);
667 release_net(pneigh_net(n));
668 kfree(n);
669 return 0;
670 }
671 }
672 write_unlock_bh(&tbl->lock);
673 return -ENOENT;
674 }
675
676 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
677 {
678 struct pneigh_entry *n, **np;
679 u32 h;
680
681 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
682 np = &tbl->phash_buckets[h];
683 while ((n = *np) != NULL) {
684 if (!dev || n->dev == dev) {
685 *np = n->next;
686 if (tbl->pdestructor)
687 tbl->pdestructor(n);
688 if (n->dev)
689 dev_put(n->dev);
690 release_net(pneigh_net(n));
691 kfree(n);
692 continue;
693 }
694 np = &n->next;
695 }
696 }
697 return -ENOENT;
698 }
699
700 static void neigh_parms_destroy(struct neigh_parms *parms);
701
702 static inline void neigh_parms_put(struct neigh_parms *parms)
703 {
704 if (atomic_dec_and_test(&parms->refcnt))
705 neigh_parms_destroy(parms);
706 }
707
708 /*
709 * neighbour must already be out of the table;
710 *
711 */
712 void neigh_destroy(struct neighbour *neigh)
713 {
714 struct net_device *dev = neigh->dev;
715
716 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
717
718 if (!neigh->dead) {
719 pr_warn("Destroying alive neighbour %p\n", neigh);
720 dump_stack();
721 return;
722 }
723
724 if (neigh_del_timer(neigh))
725 pr_warn("Impossible event\n");
726
727 skb_queue_purge(&neigh->arp_queue);
728 neigh->arp_queue_len_bytes = 0;
729
730 if (dev->netdev_ops->ndo_neigh_destroy)
731 dev->netdev_ops->ndo_neigh_destroy(neigh);
732
733 dev_put(dev);
734 neigh_parms_put(neigh->parms);
735
736 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
737
738 atomic_dec(&neigh->tbl->entries);
739 kfree_rcu(neigh, rcu);
740 }
741 EXPORT_SYMBOL(neigh_destroy);
742
743 /* Neighbour state is suspicious;
744 disable fast path.
745
746 Called with write_locked neigh.
747 */
748 static void neigh_suspect(struct neighbour *neigh)
749 {
750 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
751
752 neigh->output = neigh->ops->output;
753 }
754
755 /* Neighbour state is OK;
756 enable fast path.
757
758 Called with write_locked neigh.
759 */
760 static void neigh_connect(struct neighbour *neigh)
761 {
762 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
763
764 neigh->output = neigh->ops->connected_output;
765 }
766
767 static void neigh_periodic_work(struct work_struct *work)
768 {
769 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
770 struct neighbour *n;
771 struct neighbour __rcu **np;
772 unsigned int i;
773 struct neigh_hash_table *nht;
774
775 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
776
777 write_lock_bh(&tbl->lock);
778 nht = rcu_dereference_protected(tbl->nht,
779 lockdep_is_held(&tbl->lock));
780
781 if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
782 goto out;
783
784 /*
785 * periodically recompute ReachableTime from random function
786 */
787
788 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
789 struct neigh_parms *p;
790 tbl->last_rand = jiffies;
791 for (p = &tbl->parms; p; p = p->next)
792 p->reachable_time =
793 neigh_rand_reach_time(p->base_reachable_time);
794 }
795
796 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
797 np = &nht->hash_buckets[i];
798
799 while ((n = rcu_dereference_protected(*np,
800 lockdep_is_held(&tbl->lock))) != NULL) {
801 unsigned int state;
802
803 write_lock(&n->lock);
804
805 state = n->nud_state;
806 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
807 write_unlock(&n->lock);
808 goto next_elt;
809 }
810
811 if (time_before(n->used, n->confirmed))
812 n->used = n->confirmed;
813
814 if (atomic_read(&n->refcnt) == 1 &&
815 (state == NUD_FAILED ||
816 time_after(jiffies, n->used + n->parms->gc_staletime))) {
817 *np = n->next;
818 n->dead = 1;
819 write_unlock(&n->lock);
820 neigh_cleanup_and_release(n);
821 continue;
822 }
823 write_unlock(&n->lock);
824
825 next_elt:
826 np = &n->next;
827 }
828 /*
829 * It's fine to release lock here, even if hash table
830 * grows while we are preempted.
831 */
832 write_unlock_bh(&tbl->lock);
833 cond_resched();
834 write_lock_bh(&tbl->lock);
835 nht = rcu_dereference_protected(tbl->nht,
836 lockdep_is_held(&tbl->lock));
837 }
838 out:
839 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
840 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
841 * base_reachable_time.
842 */
843 schedule_delayed_work(&tbl->gc_work,
844 tbl->parms.base_reachable_time >> 1);
845 write_unlock_bh(&tbl->lock);
846 }
847
848 static __inline__ int neigh_max_probes(struct neighbour *n)
849 {
850 struct neigh_parms *p = n->parms;
851 return (n->nud_state & NUD_PROBE) ?
852 p->ucast_probes :
853 p->ucast_probes + p->app_probes + p->mcast_probes;
854 }
855
856 static void neigh_invalidate(struct neighbour *neigh)
857 __releases(neigh->lock)
858 __acquires(neigh->lock)
859 {
860 struct sk_buff *skb;
861
862 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
863 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
864 neigh->updated = jiffies;
865
866 /* It is very thin place. report_unreachable is very complicated
867 routine. Particularly, it can hit the same neighbour entry!
868
869 So that, we try to be accurate and avoid dead loop. --ANK
870 */
871 while (neigh->nud_state == NUD_FAILED &&
872 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
873 write_unlock(&neigh->lock);
874 neigh->ops->error_report(neigh, skb);
875 write_lock(&neigh->lock);
876 }
877 skb_queue_purge(&neigh->arp_queue);
878 neigh->arp_queue_len_bytes = 0;
879 }
880
881 static void neigh_probe(struct neighbour *neigh)
882 __releases(neigh->lock)
883 {
884 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
885 /* keep skb alive even if arp_queue overflows */
886 if (skb)
887 skb = skb_copy(skb, GFP_ATOMIC);
888 write_unlock(&neigh->lock);
889 neigh->ops->solicit(neigh, skb);
890 atomic_inc(&neigh->probes);
891 kfree_skb(skb);
892 }
893
894 /* Called when a timer expires for a neighbour entry. */
895
896 static void neigh_timer_handler(unsigned long arg)
897 {
898 unsigned long now, next;
899 struct neighbour *neigh = (struct neighbour *)arg;
900 unsigned int state;
901 int notify = 0;
902
903 write_lock(&neigh->lock);
904
905 state = neigh->nud_state;
906 now = jiffies;
907 next = now + HZ;
908
909 if (!(state & NUD_IN_TIMER))
910 goto out;
911
912 if (state & NUD_REACHABLE) {
913 if (time_before_eq(now,
914 neigh->confirmed + neigh->parms->reachable_time)) {
915 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
916 next = neigh->confirmed + neigh->parms->reachable_time;
917 } else if (time_before_eq(now,
918 neigh->used + neigh->parms->delay_probe_time)) {
919 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
920 neigh->nud_state = NUD_DELAY;
921 neigh->updated = jiffies;
922 neigh_suspect(neigh);
923 next = now + neigh->parms->delay_probe_time;
924 } else {
925 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
926 neigh->nud_state = NUD_STALE;
927 neigh->updated = jiffies;
928 neigh_suspect(neigh);
929 notify = 1;
930 }
931 } else if (state & NUD_DELAY) {
932 if (time_before_eq(now,
933 neigh->confirmed + neigh->parms->delay_probe_time)) {
934 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
935 neigh->nud_state = NUD_REACHABLE;
936 neigh->updated = jiffies;
937 neigh_connect(neigh);
938 notify = 1;
939 next = neigh->confirmed + neigh->parms->reachable_time;
940 } else {
941 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
942 neigh->nud_state = NUD_PROBE;
943 neigh->updated = jiffies;
944 atomic_set(&neigh->probes, 0);
945 next = now + neigh->parms->retrans_time;
946 }
947 } else {
948 /* NUD_PROBE|NUD_INCOMPLETE */
949 next = now + neigh->parms->retrans_time;
950 }
951
952 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
953 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
954 neigh->nud_state = NUD_FAILED;
955 notify = 1;
956 neigh_invalidate(neigh);
957 }
958
959 if (neigh->nud_state & NUD_IN_TIMER) {
960 if (time_before(next, jiffies + HZ/2))
961 next = jiffies + HZ/2;
962 if (!mod_timer(&neigh->timer, next))
963 neigh_hold(neigh);
964 }
965 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
966 neigh_probe(neigh);
967 } else {
968 out:
969 write_unlock(&neigh->lock);
970 }
971
972 if (notify)
973 neigh_update_notify(neigh);
974
975 neigh_release(neigh);
976 }
977
978 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
979 {
980 int rc;
981 bool immediate_probe = false;
982
983 write_lock_bh(&neigh->lock);
984
985 rc = 0;
986 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
987 goto out_unlock_bh;
988
989 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
990 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
991 unsigned long next, now = jiffies;
992
993 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
994 neigh->nud_state = NUD_INCOMPLETE;
995 neigh->updated = now;
996 next = now + max(neigh->parms->retrans_time, HZ/2);
997 neigh_add_timer(neigh, next);
998 immediate_probe = true;
999 } else {
1000 neigh->nud_state = NUD_FAILED;
1001 neigh->updated = jiffies;
1002 write_unlock_bh(&neigh->lock);
1003
1004 kfree_skb(skb);
1005 return 1;
1006 }
1007 } else if (neigh->nud_state & NUD_STALE) {
1008 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
1009 neigh->nud_state = NUD_DELAY;
1010 neigh->updated = jiffies;
1011 neigh_add_timer(neigh,
1012 jiffies + neigh->parms->delay_probe_time);
1013 }
1014
1015 if (neigh->nud_state == NUD_INCOMPLETE) {
1016 if (skb) {
1017 while (neigh->arp_queue_len_bytes + skb->truesize >
1018 neigh->parms->queue_len_bytes) {
1019 struct sk_buff *buff;
1020
1021 buff = __skb_dequeue(&neigh->arp_queue);
1022 if (!buff)
1023 break;
1024 neigh->arp_queue_len_bytes -= buff->truesize;
1025 kfree_skb(buff);
1026 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1027 }
1028 skb_dst_force(skb);
1029 __skb_queue_tail(&neigh->arp_queue, skb);
1030 neigh->arp_queue_len_bytes += skb->truesize;
1031 }
1032 rc = 1;
1033 }
1034 out_unlock_bh:
1035 if (immediate_probe)
1036 neigh_probe(neigh);
1037 else
1038 write_unlock(&neigh->lock);
1039 local_bh_enable();
1040 return rc;
1041 }
1042 EXPORT_SYMBOL(__neigh_event_send);
1043
1044 static void neigh_update_hhs(struct neighbour *neigh)
1045 {
1046 struct hh_cache *hh;
1047 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
1048 = NULL;
1049
1050 if (neigh->dev->header_ops)
1051 update = neigh->dev->header_ops->cache_update;
1052
1053 if (update) {
1054 hh = &neigh->hh;
1055 if (hh->hh_len) {
1056 write_seqlock_bh(&hh->hh_lock);
1057 update(hh, neigh->dev, neigh->ha);
1058 write_sequnlock_bh(&hh->hh_lock);
1059 }
1060 }
1061 }
1062
1063
1064
1065 /* Generic update routine.
1066 -- lladdr is new lladdr or NULL, if it is not supplied.
1067 -- new is new state.
1068 -- flags
1069 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1070 if it is different.
1071 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1072 lladdr instead of overriding it
1073 if it is different.
1074 It also allows to retain current state
1075 if lladdr is unchanged.
1076 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1077
1078 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1079 NTF_ROUTER flag.
1080 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1081 a router.
1082
1083 Caller MUST hold reference count on the entry.
1084 */
1085
1086 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1087 u32 flags)
1088 {
1089 u8 old;
1090 int err;
1091 int notify = 0;
1092 struct net_device *dev;
1093 int update_isrouter = 0;
1094
1095 write_lock_bh(&neigh->lock);
1096
1097 dev = neigh->dev;
1098 old = neigh->nud_state;
1099 err = -EPERM;
1100
1101 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
1102 (old & (NUD_NOARP | NUD_PERMANENT)))
1103 goto out;
1104
1105 if (!(new & NUD_VALID)) {
1106 neigh_del_timer(neigh);
1107 if (old & NUD_CONNECTED)
1108 neigh_suspect(neigh);
1109 neigh->nud_state = new;
1110 err = 0;
1111 notify = old & NUD_VALID;
1112 if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
1113 (new & NUD_FAILED)) {
1114 neigh_invalidate(neigh);
1115 notify = 1;
1116 }
1117 goto out;
1118 }
1119
1120 /* Compare new lladdr with cached one */
1121 if (!dev->addr_len) {
1122 /* First case: device needs no address. */
1123 lladdr = neigh->ha;
1124 } else if (lladdr) {
1125 /* The second case: if something is already cached
1126 and a new address is proposed:
1127 - compare new & old
1128 - if they are different, check override flag
1129 */
1130 if ((old & NUD_VALID) &&
1131 !memcmp(lladdr, neigh->ha, dev->addr_len))
1132 lladdr = neigh->ha;
1133 } else {
1134 /* No address is supplied; if we know something,
1135 use it, otherwise discard the request.
1136 */
1137 err = -EINVAL;
1138 if (!(old & NUD_VALID))
1139 goto out;
1140 lladdr = neigh->ha;
1141 }
1142
1143 if (new & NUD_CONNECTED)
1144 neigh->confirmed = jiffies;
1145 neigh->updated = jiffies;
1146
1147 /* If entry was valid and address is not changed,
1148 do not change entry state, if new one is STALE.
1149 */
1150 err = 0;
1151 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1152 if (old & NUD_VALID) {
1153 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1154 update_isrouter = 0;
1155 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1156 (old & NUD_CONNECTED)) {
1157 lladdr = neigh->ha;
1158 new = NUD_STALE;
1159 } else
1160 goto out;
1161 } else {
1162 if (lladdr == neigh->ha && new == NUD_STALE &&
1163 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1164 (old & NUD_CONNECTED))
1165 )
1166 new = old;
1167 }
1168 }
1169
1170 if (new != old) {
1171 neigh_del_timer(neigh);
1172 if (new & NUD_IN_TIMER)
1173 neigh_add_timer(neigh, (jiffies +
1174 ((new & NUD_REACHABLE) ?
1175 neigh->parms->reachable_time :
1176 0)));
1177 neigh->nud_state = new;
1178 }
1179
1180 if (lladdr != neigh->ha) {
1181 write_seqlock(&neigh->ha_lock);
1182 memcpy(&neigh->ha, lladdr, dev->addr_len);
1183 write_sequnlock(&neigh->ha_lock);
1184 neigh_update_hhs(neigh);
1185 if (!(new & NUD_CONNECTED))
1186 neigh->confirmed = jiffies -
1187 (neigh->parms->base_reachable_time << 1);
1188 notify = 1;
1189 }
1190 if (new == old)
1191 goto out;
1192 if (new & NUD_CONNECTED)
1193 neigh_connect(neigh);
1194 else
1195 neigh_suspect(neigh);
1196 if (!(old & NUD_VALID)) {
1197 struct sk_buff *skb;
1198
1199 /* Again: avoid dead loop if something went wrong */
1200
1201 while (neigh->nud_state & NUD_VALID &&
1202 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1203 struct dst_entry *dst = skb_dst(skb);
1204 struct neighbour *n2, *n1 = neigh;
1205 write_unlock_bh(&neigh->lock);
1206
1207 rcu_read_lock();
1208
1209 /* Why not just use 'neigh' as-is? The problem is that
1210 * things such as shaper, eql, and sch_teql can end up
1211 * using alternative, different, neigh objects to output
1212 * the packet in the output path. So what we need to do
1213 * here is re-lookup the top-level neigh in the path so
1214 * we can reinject the packet there.
1215 */
1216 n2 = NULL;
1217 if (dst) {
1218 n2 = dst_neigh_lookup_skb(dst, skb);
1219 if (n2)
1220 n1 = n2;
1221 }
1222 n1->output(n1, skb);
1223 if (n2)
1224 neigh_release(n2);
1225 rcu_read_unlock();
1226
1227 write_lock_bh(&neigh->lock);
1228 }
1229 skb_queue_purge(&neigh->arp_queue);
1230 neigh->arp_queue_len_bytes = 0;
1231 }
1232 out:
1233 if (update_isrouter) {
1234 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1235 (neigh->flags | NTF_ROUTER) :
1236 (neigh->flags & ~NTF_ROUTER);
1237 }
1238 write_unlock_bh(&neigh->lock);
1239
1240 if (notify)
1241 neigh_update_notify(neigh);
1242
1243 return err;
1244 }
1245 EXPORT_SYMBOL(neigh_update);
1246
1247 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1248 u8 *lladdr, void *saddr,
1249 struct net_device *dev)
1250 {
1251 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1252 lladdr || !dev->addr_len);
1253 if (neigh)
1254 neigh_update(neigh, lladdr, NUD_STALE,
1255 NEIGH_UPDATE_F_OVERRIDE);
1256 return neigh;
1257 }
1258 EXPORT_SYMBOL(neigh_event_ns);
1259
1260 /* called with read_lock_bh(&n->lock); */
1261 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1262 {
1263 struct net_device *dev = dst->dev;
1264 __be16 prot = dst->ops->protocol;
1265 struct hh_cache *hh = &n->hh;
1266
1267 write_lock_bh(&n->lock);
1268
1269 /* Only one thread can come in here and initialize the
1270 * hh_cache entry.
1271 */
1272 if (!hh->hh_len)
1273 dev->header_ops->cache(n, hh, prot);
1274
1275 write_unlock_bh(&n->lock);
1276 }
1277
1278 /* This function can be used in contexts, where only old dev_queue_xmit
1279 * worked, f.e. if you want to override normal output path (eql, shaper),
1280 * but resolution is not made yet.
1281 */
1282
1283 int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1284 {
1285 struct net_device *dev = skb->dev;
1286
1287 __skb_pull(skb, skb_network_offset(skb));
1288
1289 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1290 skb->len) < 0 &&
1291 dev->header_ops->rebuild(skb))
1292 return 0;
1293
1294 return dev_queue_xmit(skb);
1295 }
1296 EXPORT_SYMBOL(neigh_compat_output);
1297
1298 /* Slow and careful. */
1299
1300 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1301 {
1302 struct dst_entry *dst = skb_dst(skb);
1303 int rc = 0;
1304
1305 if (!dst)
1306 goto discard;
1307
1308 if (!neigh_event_send(neigh, skb)) {
1309 int err;
1310 struct net_device *dev = neigh->dev;
1311 unsigned int seq;
1312
1313 if (dev->header_ops->cache && !neigh->hh.hh_len)
1314 neigh_hh_init(neigh, dst);
1315
1316 do {
1317 __skb_pull(skb, skb_network_offset(skb));
1318 seq = read_seqbegin(&neigh->ha_lock);
1319 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1320 neigh->ha, NULL, skb->len);
1321 } while (read_seqretry(&neigh->ha_lock, seq));
1322
1323 if (err >= 0)
1324 rc = dev_queue_xmit(skb);
1325 else
1326 goto out_kfree_skb;
1327 }
1328 out:
1329 return rc;
1330 discard:
1331 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1332 dst, neigh);
1333 out_kfree_skb:
1334 rc = -EINVAL;
1335 kfree_skb(skb);
1336 goto out;
1337 }
1338 EXPORT_SYMBOL(neigh_resolve_output);
1339
1340 /* As fast as possible without hh cache */
1341
1342 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1343 {
1344 struct net_device *dev = neigh->dev;
1345 unsigned int seq;
1346 int err;
1347
1348 do {
1349 __skb_pull(skb, skb_network_offset(skb));
1350 seq = read_seqbegin(&neigh->ha_lock);
1351 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1352 neigh->ha, NULL, skb->len);
1353 } while (read_seqretry(&neigh->ha_lock, seq));
1354
1355 if (err >= 0)
1356 err = dev_queue_xmit(skb);
1357 else {
1358 err = -EINVAL;
1359 kfree_skb(skb);
1360 }
1361 return err;
1362 }
1363 EXPORT_SYMBOL(neigh_connected_output);
1364
1365 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1366 {
1367 return dev_queue_xmit(skb);
1368 }
1369 EXPORT_SYMBOL(neigh_direct_output);
1370
1371 static void neigh_proxy_process(unsigned long arg)
1372 {
1373 struct neigh_table *tbl = (struct neigh_table *)arg;
1374 long sched_next = 0;
1375 unsigned long now = jiffies;
1376 struct sk_buff *skb, *n;
1377
1378 spin_lock(&tbl->proxy_queue.lock);
1379
1380 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1381 long tdif = NEIGH_CB(skb)->sched_next - now;
1382
1383 if (tdif <= 0) {
1384 struct net_device *dev = skb->dev;
1385
1386 __skb_unlink(skb, &tbl->proxy_queue);
1387 if (tbl->proxy_redo && netif_running(dev)) {
1388 rcu_read_lock();
1389 tbl->proxy_redo(skb);
1390 rcu_read_unlock();
1391 } else {
1392 kfree_skb(skb);
1393 }
1394
1395 dev_put(dev);
1396 } else if (!sched_next || tdif < sched_next)
1397 sched_next = tdif;
1398 }
1399 del_timer(&tbl->proxy_timer);
1400 if (sched_next)
1401 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1402 spin_unlock(&tbl->proxy_queue.lock);
1403 }
1404
1405 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1406 struct sk_buff *skb)
1407 {
1408 unsigned long now = jiffies;
1409 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1410
1411 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1412 kfree_skb(skb);
1413 return;
1414 }
1415
1416 NEIGH_CB(skb)->sched_next = sched_next;
1417 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1418
1419 spin_lock(&tbl->proxy_queue.lock);
1420 if (del_timer(&tbl->proxy_timer)) {
1421 if (time_before(tbl->proxy_timer.expires, sched_next))
1422 sched_next = tbl->proxy_timer.expires;
1423 }
1424 skb_dst_drop(skb);
1425 dev_hold(skb->dev);
1426 __skb_queue_tail(&tbl->proxy_queue, skb);
1427 mod_timer(&tbl->proxy_timer, sched_next);
1428 spin_unlock(&tbl->proxy_queue.lock);
1429 }
1430 EXPORT_SYMBOL(pneigh_enqueue);
1431
1432 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1433 struct net *net, int ifindex)
1434 {
1435 struct neigh_parms *p;
1436
1437 for (p = &tbl->parms; p; p = p->next) {
1438 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1439 (!p->dev && !ifindex))
1440 return p;
1441 }
1442
1443 return NULL;
1444 }
1445
1446 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1447 struct neigh_table *tbl)
1448 {
1449 struct neigh_parms *p, *ref;
1450 struct net *net = dev_net(dev);
1451 const struct net_device_ops *ops = dev->netdev_ops;
1452
1453 ref = lookup_neigh_parms(tbl, net, 0);
1454 if (!ref)
1455 return NULL;
1456
1457 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1458 if (p) {
1459 p->tbl = tbl;
1460 atomic_set(&p->refcnt, 1);
1461 p->reachable_time =
1462 neigh_rand_reach_time(p->base_reachable_time);
1463
1464 if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
1465 kfree(p);
1466 return NULL;
1467 }
1468
1469 dev_hold(dev);
1470 p->dev = dev;
1471 write_pnet(&p->net, hold_net(net));
1472 p->sysctl_table = NULL;
1473 write_lock_bh(&tbl->lock);
1474 p->next = tbl->parms.next;
1475 tbl->parms.next = p;
1476 write_unlock_bh(&tbl->lock);
1477 }
1478 return p;
1479 }
1480 EXPORT_SYMBOL(neigh_parms_alloc);
1481
1482 static void neigh_rcu_free_parms(struct rcu_head *head)
1483 {
1484 struct neigh_parms *parms =
1485 container_of(head, struct neigh_parms, rcu_head);
1486
1487 neigh_parms_put(parms);
1488 }
1489
1490 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1491 {
1492 struct neigh_parms **p;
1493
1494 if (!parms || parms == &tbl->parms)
1495 return;
1496 write_lock_bh(&tbl->lock);
1497 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1498 if (*p == parms) {
1499 *p = parms->next;
1500 parms->dead = 1;
1501 write_unlock_bh(&tbl->lock);
1502 if (parms->dev)
1503 dev_put(parms->dev);
1504 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1505 return;
1506 }
1507 }
1508 write_unlock_bh(&tbl->lock);
1509 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1510 }
1511 EXPORT_SYMBOL(neigh_parms_release);
1512
1513 static void neigh_parms_destroy(struct neigh_parms *parms)
1514 {
1515 release_net(neigh_parms_net(parms));
1516 kfree(parms);
1517 }
1518
1519 static struct lock_class_key neigh_table_proxy_queue_class;
1520
1521 static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1522 {
1523 unsigned long now = jiffies;
1524 unsigned long phsize;
1525
1526 write_pnet(&tbl->parms.net, &init_net);
1527 atomic_set(&tbl->parms.refcnt, 1);
1528 tbl->parms.reachable_time =
1529 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1530
1531 tbl->stats = alloc_percpu(struct neigh_statistics);
1532 if (!tbl->stats)
1533 panic("cannot create neighbour cache statistics");
1534
1535 #ifdef CONFIG_PROC_FS
1536 if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1537 &neigh_stat_seq_fops, tbl))
1538 panic("cannot create neighbour proc dir entry");
1539 #endif
1540
1541 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1542
1543 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1544 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1545
1546 if (!tbl->nht || !tbl->phash_buckets)
1547 panic("cannot allocate neighbour cache hashes");
1548
1549 rwlock_init(&tbl->lock);
1550 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1551 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1552 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1553 skb_queue_head_init_class(&tbl->proxy_queue,
1554 &neigh_table_proxy_queue_class);
1555
1556 tbl->last_flush = now;
1557 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1558 }
1559
1560 void neigh_table_init(struct neigh_table *tbl)
1561 {
1562 struct neigh_table *tmp;
1563
1564 neigh_table_init_no_netlink(tbl);
1565 write_lock(&neigh_tbl_lock);
1566 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1567 if (tmp->family == tbl->family)
1568 break;
1569 }
1570 tbl->next = neigh_tables;
1571 neigh_tables = tbl;
1572 write_unlock(&neigh_tbl_lock);
1573
1574 if (unlikely(tmp)) {
1575 pr_err("Registering multiple tables for family %d\n",
1576 tbl->family);
1577 dump_stack();
1578 }
1579 }
1580 EXPORT_SYMBOL(neigh_table_init);
1581
1582 int neigh_table_clear(struct neigh_table *tbl)
1583 {
1584 struct neigh_table **tp;
1585
1586 /* It is not clean... Fix it to unload IPv6 module safely */
1587 cancel_delayed_work_sync(&tbl->gc_work);
1588 del_timer_sync(&tbl->proxy_timer);
1589 pneigh_queue_purge(&tbl->proxy_queue);
1590 neigh_ifdown(tbl, NULL);
1591 if (atomic_read(&tbl->entries))
1592 pr_crit("neighbour leakage\n");
1593 write_lock(&neigh_tbl_lock);
1594 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1595 if (*tp == tbl) {
1596 *tp = tbl->next;
1597 break;
1598 }
1599 }
1600 write_unlock(&neigh_tbl_lock);
1601
1602 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1603 neigh_hash_free_rcu);
1604 tbl->nht = NULL;
1605
1606 kfree(tbl->phash_buckets);
1607 tbl->phash_buckets = NULL;
1608
1609 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1610
1611 free_percpu(tbl->stats);
1612 tbl->stats = NULL;
1613
1614 return 0;
1615 }
1616 EXPORT_SYMBOL(neigh_table_clear);
1617
1618 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1619 {
1620 struct net *net = sock_net(skb->sk);
1621 struct ndmsg *ndm;
1622 struct nlattr *dst_attr;
1623 struct neigh_table *tbl;
1624 struct net_device *dev = NULL;
1625 int err = -EINVAL;
1626
1627 ASSERT_RTNL();
1628 if (nlmsg_len(nlh) < sizeof(*ndm))
1629 goto out;
1630
1631 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1632 if (dst_attr == NULL)
1633 goto out;
1634
1635 ndm = nlmsg_data(nlh);
1636 if (ndm->ndm_ifindex) {
1637 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1638 if (dev == NULL) {
1639 err = -ENODEV;
1640 goto out;
1641 }
1642 }
1643
1644 read_lock(&neigh_tbl_lock);
1645 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1646 struct neighbour *neigh;
1647
1648 if (tbl->family != ndm->ndm_family)
1649 continue;
1650 read_unlock(&neigh_tbl_lock);
1651
1652 if (nla_len(dst_attr) < tbl->key_len)
1653 goto out;
1654
1655 if (ndm->ndm_flags & NTF_PROXY) {
1656 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1657 goto out;
1658 }
1659
1660 if (dev == NULL)
1661 goto out;
1662
1663 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1664 if (neigh == NULL) {
1665 err = -ENOENT;
1666 goto out;
1667 }
1668
1669 err = neigh_update(neigh, NULL, NUD_FAILED,
1670 NEIGH_UPDATE_F_OVERRIDE |
1671 NEIGH_UPDATE_F_ADMIN);
1672 neigh_release(neigh);
1673 goto out;
1674 }
1675 read_unlock(&neigh_tbl_lock);
1676 err = -EAFNOSUPPORT;
1677
1678 out:
1679 return err;
1680 }
1681
1682 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1683 {
1684 struct net *net = sock_net(skb->sk);
1685 struct ndmsg *ndm;
1686 struct nlattr *tb[NDA_MAX+1];
1687 struct neigh_table *tbl;
1688 struct net_device *dev = NULL;
1689 int err;
1690
1691 ASSERT_RTNL();
1692 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1693 if (err < 0)
1694 goto out;
1695
1696 err = -EINVAL;
1697 if (tb[NDA_DST] == NULL)
1698 goto out;
1699
1700 ndm = nlmsg_data(nlh);
1701 if (ndm->ndm_ifindex) {
1702 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
1703 if (dev == NULL) {
1704 err = -ENODEV;
1705 goto out;
1706 }
1707
1708 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1709 goto out;
1710 }
1711
1712 read_lock(&neigh_tbl_lock);
1713 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1714 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1715 struct neighbour *neigh;
1716 void *dst, *lladdr;
1717
1718 if (tbl->family != ndm->ndm_family)
1719 continue;
1720 read_unlock(&neigh_tbl_lock);
1721
1722 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1723 goto out;
1724 dst = nla_data(tb[NDA_DST]);
1725 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1726
1727 if (ndm->ndm_flags & NTF_PROXY) {
1728 struct pneigh_entry *pn;
1729
1730 err = -ENOBUFS;
1731 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1732 if (pn) {
1733 pn->flags = ndm->ndm_flags;
1734 err = 0;
1735 }
1736 goto out;
1737 }
1738
1739 if (dev == NULL)
1740 goto out;
1741
1742 neigh = neigh_lookup(tbl, dst, dev);
1743 if (neigh == NULL) {
1744 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1745 err = -ENOENT;
1746 goto out;
1747 }
1748
1749 neigh = __neigh_lookup_errno(tbl, dst, dev);
1750 if (IS_ERR(neigh)) {
1751 err = PTR_ERR(neigh);
1752 goto out;
1753 }
1754 } else {
1755 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1756 err = -EEXIST;
1757 neigh_release(neigh);
1758 goto out;
1759 }
1760
1761 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1762 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1763 }
1764
1765 if (ndm->ndm_flags & NTF_USE) {
1766 neigh_event_send(neigh, NULL);
1767 err = 0;
1768 } else
1769 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1770 neigh_release(neigh);
1771 goto out;
1772 }
1773
1774 read_unlock(&neigh_tbl_lock);
1775 err = -EAFNOSUPPORT;
1776 out:
1777 return err;
1778 }
1779
1780 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1781 {
1782 struct nlattr *nest;
1783
1784 nest = nla_nest_start(skb, NDTA_PARMS);
1785 if (nest == NULL)
1786 return -ENOBUFS;
1787
1788 if ((parms->dev &&
1789 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1790 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1791 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1792 /* approximative value for deprecated QUEUE_LEN (in packets) */
1793 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1794 parms->queue_len_bytes / SKB_TRUESIZE(ETH_FRAME_LEN)) ||
1795 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1796 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1797 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1798 nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1799 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1800 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1801 parms->base_reachable_time) ||
1802 nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1803 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1804 parms->delay_probe_time) ||
1805 nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1806 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1807 nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1808 nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1809 goto nla_put_failure;
1810 return nla_nest_end(skb, nest);
1811
1812 nla_put_failure:
1813 nla_nest_cancel(skb, nest);
1814 return -EMSGSIZE;
1815 }
1816
1817 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1818 u32 pid, u32 seq, int type, int flags)
1819 {
1820 struct nlmsghdr *nlh;
1821 struct ndtmsg *ndtmsg;
1822
1823 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1824 if (nlh == NULL)
1825 return -EMSGSIZE;
1826
1827 ndtmsg = nlmsg_data(nlh);
1828
1829 read_lock_bh(&tbl->lock);
1830 ndtmsg->ndtm_family = tbl->family;
1831 ndtmsg->ndtm_pad1 = 0;
1832 ndtmsg->ndtm_pad2 = 0;
1833
1834 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1835 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1836 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1837 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1838 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1839 goto nla_put_failure;
1840 {
1841 unsigned long now = jiffies;
1842 unsigned int flush_delta = now - tbl->last_flush;
1843 unsigned int rand_delta = now - tbl->last_rand;
1844 struct neigh_hash_table *nht;
1845 struct ndt_config ndc = {
1846 .ndtc_key_len = tbl->key_len,
1847 .ndtc_entry_size = tbl->entry_size,
1848 .ndtc_entries = atomic_read(&tbl->entries),
1849 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1850 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1851 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1852 };
1853
1854 rcu_read_lock_bh();
1855 nht = rcu_dereference_bh(tbl->nht);
1856 ndc.ndtc_hash_rnd = nht->hash_rnd[0];
1857 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1858 rcu_read_unlock_bh();
1859
1860 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1861 goto nla_put_failure;
1862 }
1863
1864 {
1865 int cpu;
1866 struct ndt_stats ndst;
1867
1868 memset(&ndst, 0, sizeof(ndst));
1869
1870 for_each_possible_cpu(cpu) {
1871 struct neigh_statistics *st;
1872
1873 st = per_cpu_ptr(tbl->stats, cpu);
1874 ndst.ndts_allocs += st->allocs;
1875 ndst.ndts_destroys += st->destroys;
1876 ndst.ndts_hash_grows += st->hash_grows;
1877 ndst.ndts_res_failed += st->res_failed;
1878 ndst.ndts_lookups += st->lookups;
1879 ndst.ndts_hits += st->hits;
1880 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1881 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1882 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1883 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1884 }
1885
1886 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1887 goto nla_put_failure;
1888 }
1889
1890 BUG_ON(tbl->parms.dev);
1891 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1892 goto nla_put_failure;
1893
1894 read_unlock_bh(&tbl->lock);
1895 return nlmsg_end(skb, nlh);
1896
1897 nla_put_failure:
1898 read_unlock_bh(&tbl->lock);
1899 nlmsg_cancel(skb, nlh);
1900 return -EMSGSIZE;
1901 }
1902
1903 static int neightbl_fill_param_info(struct sk_buff *skb,
1904 struct neigh_table *tbl,
1905 struct neigh_parms *parms,
1906 u32 pid, u32 seq, int type,
1907 unsigned int flags)
1908 {
1909 struct ndtmsg *ndtmsg;
1910 struct nlmsghdr *nlh;
1911
1912 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1913 if (nlh == NULL)
1914 return -EMSGSIZE;
1915
1916 ndtmsg = nlmsg_data(nlh);
1917
1918 read_lock_bh(&tbl->lock);
1919 ndtmsg->ndtm_family = tbl->family;
1920 ndtmsg->ndtm_pad1 = 0;
1921 ndtmsg->ndtm_pad2 = 0;
1922
1923 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1924 neightbl_fill_parms(skb, parms) < 0)
1925 goto errout;
1926
1927 read_unlock_bh(&tbl->lock);
1928 return nlmsg_end(skb, nlh);
1929 errout:
1930 read_unlock_bh(&tbl->lock);
1931 nlmsg_cancel(skb, nlh);
1932 return -EMSGSIZE;
1933 }
1934
1935 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1936 [NDTA_NAME] = { .type = NLA_STRING },
1937 [NDTA_THRESH1] = { .type = NLA_U32 },
1938 [NDTA_THRESH2] = { .type = NLA_U32 },
1939 [NDTA_THRESH3] = { .type = NLA_U32 },
1940 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1941 [NDTA_PARMS] = { .type = NLA_NESTED },
1942 };
1943
1944 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1945 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1946 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1947 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1948 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1949 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1950 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1951 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1952 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1953 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1954 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1955 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1956 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1957 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1958 };
1959
1960 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1961 {
1962 struct net *net = sock_net(skb->sk);
1963 struct neigh_table *tbl;
1964 struct ndtmsg *ndtmsg;
1965 struct nlattr *tb[NDTA_MAX+1];
1966 int err;
1967
1968 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1969 nl_neightbl_policy);
1970 if (err < 0)
1971 goto errout;
1972
1973 if (tb[NDTA_NAME] == NULL) {
1974 err = -EINVAL;
1975 goto errout;
1976 }
1977
1978 ndtmsg = nlmsg_data(nlh);
1979 read_lock(&neigh_tbl_lock);
1980 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1981 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1982 continue;
1983
1984 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1985 break;
1986 }
1987
1988 if (tbl == NULL) {
1989 err = -ENOENT;
1990 goto errout_locked;
1991 }
1992
1993 /*
1994 * We acquire tbl->lock to be nice to the periodic timers and
1995 * make sure they always see a consistent set of values.
1996 */
1997 write_lock_bh(&tbl->lock);
1998
1999 if (tb[NDTA_PARMS]) {
2000 struct nlattr *tbp[NDTPA_MAX+1];
2001 struct neigh_parms *p;
2002 int i, ifindex = 0;
2003
2004 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
2005 nl_ntbl_parm_policy);
2006 if (err < 0)
2007 goto errout_tbl_lock;
2008
2009 if (tbp[NDTPA_IFINDEX])
2010 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
2011
2012 p = lookup_neigh_parms(tbl, net, ifindex);
2013 if (p == NULL) {
2014 err = -ENOENT;
2015 goto errout_tbl_lock;
2016 }
2017
2018 for (i = 1; i <= NDTPA_MAX; i++) {
2019 if (tbp[i] == NULL)
2020 continue;
2021
2022 switch (i) {
2023 case NDTPA_QUEUE_LEN:
2024 p->queue_len_bytes = nla_get_u32(tbp[i]) *
2025 SKB_TRUESIZE(ETH_FRAME_LEN);
2026 break;
2027 case NDTPA_QUEUE_LENBYTES:
2028 p->queue_len_bytes = nla_get_u32(tbp[i]);
2029 break;
2030 case NDTPA_PROXY_QLEN:
2031 p->proxy_qlen = nla_get_u32(tbp[i]);
2032 break;
2033 case NDTPA_APP_PROBES:
2034 p->app_probes = nla_get_u32(tbp[i]);
2035 break;
2036 case NDTPA_UCAST_PROBES:
2037 p->ucast_probes = nla_get_u32(tbp[i]);
2038 break;
2039 case NDTPA_MCAST_PROBES:
2040 p->mcast_probes = nla_get_u32(tbp[i]);
2041 break;
2042 case NDTPA_BASE_REACHABLE_TIME:
2043 p->base_reachable_time = nla_get_msecs(tbp[i]);
2044 break;
2045 case NDTPA_GC_STALETIME:
2046 p->gc_staletime = nla_get_msecs(tbp[i]);
2047 break;
2048 case NDTPA_DELAY_PROBE_TIME:
2049 p->delay_probe_time = nla_get_msecs(tbp[i]);
2050 break;
2051 case NDTPA_RETRANS_TIME:
2052 p->retrans_time = nla_get_msecs(tbp[i]);
2053 break;
2054 case NDTPA_ANYCAST_DELAY:
2055 p->anycast_delay = nla_get_msecs(tbp[i]);
2056 break;
2057 case NDTPA_PROXY_DELAY:
2058 p->proxy_delay = nla_get_msecs(tbp[i]);
2059 break;
2060 case NDTPA_LOCKTIME:
2061 p->locktime = nla_get_msecs(tbp[i]);
2062 break;
2063 }
2064 }
2065 }
2066
2067 if (tb[NDTA_THRESH1])
2068 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
2069
2070 if (tb[NDTA_THRESH2])
2071 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
2072
2073 if (tb[NDTA_THRESH3])
2074 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
2075
2076 if (tb[NDTA_GC_INTERVAL])
2077 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
2078
2079 err = 0;
2080
2081 errout_tbl_lock:
2082 write_unlock_bh(&tbl->lock);
2083 errout_locked:
2084 read_unlock(&neigh_tbl_lock);
2085 errout:
2086 return err;
2087 }
2088
2089 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2090 {
2091 struct net *net = sock_net(skb->sk);
2092 int family, tidx, nidx = 0;
2093 int tbl_skip = cb->args[0];
2094 int neigh_skip = cb->args[1];
2095 struct neigh_table *tbl;
2096
2097 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2098
2099 read_lock(&neigh_tbl_lock);
2100 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
2101 struct neigh_parms *p;
2102
2103 if (tidx < tbl_skip || (family && tbl->family != family))
2104 continue;
2105
2106 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2107 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
2108 NLM_F_MULTI) <= 0)
2109 break;
2110
2111 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2112 if (!net_eq(neigh_parms_net(p), net))
2113 continue;
2114
2115 if (nidx < neigh_skip)
2116 goto next;
2117
2118 if (neightbl_fill_param_info(skb, tbl, p,
2119 NETLINK_CB(cb->skb).portid,
2120 cb->nlh->nlmsg_seq,
2121 RTM_NEWNEIGHTBL,
2122 NLM_F_MULTI) <= 0)
2123 goto out;
2124 next:
2125 nidx++;
2126 }
2127
2128 neigh_skip = 0;
2129 }
2130 out:
2131 read_unlock(&neigh_tbl_lock);
2132 cb->args[0] = tidx;
2133 cb->args[1] = nidx;
2134
2135 return skb->len;
2136 }
2137
2138 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2139 u32 pid, u32 seq, int type, unsigned int flags)
2140 {
2141 unsigned long now = jiffies;
2142 struct nda_cacheinfo ci;
2143 struct nlmsghdr *nlh;
2144 struct ndmsg *ndm;
2145
2146 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2147 if (nlh == NULL)
2148 return -EMSGSIZE;
2149
2150 ndm = nlmsg_data(nlh);
2151 ndm->ndm_family = neigh->ops->family;
2152 ndm->ndm_pad1 = 0;
2153 ndm->ndm_pad2 = 0;
2154 ndm->ndm_flags = neigh->flags;
2155 ndm->ndm_type = neigh->type;
2156 ndm->ndm_ifindex = neigh->dev->ifindex;
2157
2158 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2159 goto nla_put_failure;
2160
2161 read_lock_bh(&neigh->lock);
2162 ndm->ndm_state = neigh->nud_state;
2163 if (neigh->nud_state & NUD_VALID) {
2164 char haddr[MAX_ADDR_LEN];
2165
2166 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2167 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2168 read_unlock_bh(&neigh->lock);
2169 goto nla_put_failure;
2170 }
2171 }
2172
2173 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2174 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2175 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2176 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2177 read_unlock_bh(&neigh->lock);
2178
2179 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2180 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2181 goto nla_put_failure;
2182
2183 return nlmsg_end(skb, nlh);
2184
2185 nla_put_failure:
2186 nlmsg_cancel(skb, nlh);
2187 return -EMSGSIZE;
2188 }
2189
2190 static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2191 u32 pid, u32 seq, int type, unsigned int flags,
2192 struct neigh_table *tbl)
2193 {
2194 struct nlmsghdr *nlh;
2195 struct ndmsg *ndm;
2196
2197 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2198 if (nlh == NULL)
2199 return -EMSGSIZE;
2200
2201 ndm = nlmsg_data(nlh);
2202 ndm->ndm_family = tbl->family;
2203 ndm->ndm_pad1 = 0;
2204 ndm->ndm_pad2 = 0;
2205 ndm->ndm_flags = pn->flags | NTF_PROXY;
2206 ndm->ndm_type = NDA_DST;
2207 ndm->ndm_ifindex = pn->dev->ifindex;
2208 ndm->ndm_state = NUD_NONE;
2209
2210 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2211 goto nla_put_failure;
2212
2213 return nlmsg_end(skb, nlh);
2214
2215 nla_put_failure:
2216 nlmsg_cancel(skb, nlh);
2217 return -EMSGSIZE;
2218 }
2219
2220 static void neigh_update_notify(struct neighbour *neigh)
2221 {
2222 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2223 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2224 }
2225
2226 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2227 struct netlink_callback *cb)
2228 {
2229 struct net *net = sock_net(skb->sk);
2230 struct neighbour *n;
2231 int rc, h, s_h = cb->args[1];
2232 int idx, s_idx = idx = cb->args[2];
2233 struct neigh_hash_table *nht;
2234
2235 rcu_read_lock_bh();
2236 nht = rcu_dereference_bh(tbl->nht);
2237
2238 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2239 if (h > s_h)
2240 s_idx = 0;
2241 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
2242 n != NULL;
2243 n = rcu_dereference_bh(n->next)) {
2244 if (!net_eq(dev_net(n->dev), net))
2245 continue;
2246 if (idx < s_idx)
2247 goto next;
2248 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2249 cb->nlh->nlmsg_seq,
2250 RTM_NEWNEIGH,
2251 NLM_F_MULTI) <= 0) {
2252 rc = -1;
2253 goto out;
2254 }
2255 next:
2256 idx++;
2257 }
2258 }
2259 rc = skb->len;
2260 out:
2261 rcu_read_unlock_bh();
2262 cb->args[1] = h;
2263 cb->args[2] = idx;
2264 return rc;
2265 }
2266
2267 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2268 struct netlink_callback *cb)
2269 {
2270 struct pneigh_entry *n;
2271 struct net *net = sock_net(skb->sk);
2272 int rc, h, s_h = cb->args[3];
2273 int idx, s_idx = idx = cb->args[4];
2274
2275 read_lock_bh(&tbl->lock);
2276
2277 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2278 if (h > s_h)
2279 s_idx = 0;
2280 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2281 if (dev_net(n->dev) != net)
2282 continue;
2283 if (idx < s_idx)
2284 goto next;
2285 if (pneigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid,
2286 cb->nlh->nlmsg_seq,
2287 RTM_NEWNEIGH,
2288 NLM_F_MULTI, tbl) <= 0) {
2289 read_unlock_bh(&tbl->lock);
2290 rc = -1;
2291 goto out;
2292 }
2293 next:
2294 idx++;
2295 }
2296 }
2297
2298 read_unlock_bh(&tbl->lock);
2299 rc = skb->len;
2300 out:
2301 cb->args[3] = h;
2302 cb->args[4] = idx;
2303 return rc;
2304
2305 }
2306
2307 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2308 {
2309 struct neigh_table *tbl;
2310 int t, family, s_t;
2311 int proxy = 0;
2312 int err;
2313
2314 read_lock(&neigh_tbl_lock);
2315 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2316
2317 /* check for full ndmsg structure presence, family member is
2318 * the same for both structures
2319 */
2320 if (nlmsg_len(cb->nlh) >= sizeof(struct ndmsg) &&
2321 ((struct ndmsg *) nlmsg_data(cb->nlh))->ndm_flags == NTF_PROXY)
2322 proxy = 1;
2323
2324 s_t = cb->args[0];
2325
2326 for (tbl = neigh_tables, t = 0; tbl;
2327 tbl = tbl->next, t++) {
2328 if (t < s_t || (family && tbl->family != family))
2329 continue;
2330 if (t > s_t)
2331 memset(&cb->args[1], 0, sizeof(cb->args) -
2332 sizeof(cb->args[0]));
2333 if (proxy)
2334 err = pneigh_dump_table(tbl, skb, cb);
2335 else
2336 err = neigh_dump_table(tbl, skb, cb);
2337 if (err < 0)
2338 break;
2339 }
2340 read_unlock(&neigh_tbl_lock);
2341
2342 cb->args[0] = t;
2343 return skb->len;
2344 }
2345
2346 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2347 {
2348 int chain;
2349 struct neigh_hash_table *nht;
2350
2351 rcu_read_lock_bh();
2352 nht = rcu_dereference_bh(tbl->nht);
2353
2354 read_lock(&tbl->lock); /* avoid resizes */
2355 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2356 struct neighbour *n;
2357
2358 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
2359 n != NULL;
2360 n = rcu_dereference_bh(n->next))
2361 cb(n, cookie);
2362 }
2363 read_unlock(&tbl->lock);
2364 rcu_read_unlock_bh();
2365 }
2366 EXPORT_SYMBOL(neigh_for_each);
2367
2368 /* The tbl->lock must be held as a writer and BH disabled. */
2369 void __neigh_for_each_release(struct neigh_table *tbl,
2370 int (*cb)(struct neighbour *))
2371 {
2372 int chain;
2373 struct neigh_hash_table *nht;
2374
2375 nht = rcu_dereference_protected(tbl->nht,
2376 lockdep_is_held(&tbl->lock));
2377 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2378 struct neighbour *n;
2379 struct neighbour __rcu **np;
2380
2381 np = &nht->hash_buckets[chain];
2382 while ((n = rcu_dereference_protected(*np,
2383 lockdep_is_held(&tbl->lock))) != NULL) {
2384 int release;
2385
2386 write_lock(&n->lock);
2387 release = cb(n);
2388 if (release) {
2389 rcu_assign_pointer(*np,
2390 rcu_dereference_protected(n->next,
2391 lockdep_is_held(&tbl->lock)));
2392 n->dead = 1;
2393 } else
2394 np = &n->next;
2395 write_unlock(&n->lock);
2396 if (release)
2397 neigh_cleanup_and_release(n);
2398 }
2399 }
2400 }
2401 EXPORT_SYMBOL(__neigh_for_each_release);
2402
2403 #ifdef CONFIG_PROC_FS
2404
2405 static struct neighbour *neigh_get_first(struct seq_file *seq)
2406 {
2407 struct neigh_seq_state *state = seq->private;
2408 struct net *net = seq_file_net(seq);
2409 struct neigh_hash_table *nht = state->nht;
2410 struct neighbour *n = NULL;
2411 int bucket = state->bucket;
2412
2413 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2414 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2415 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2416
2417 while (n) {
2418 if (!net_eq(dev_net(n->dev), net))
2419 goto next;
2420 if (state->neigh_sub_iter) {
2421 loff_t fakep = 0;
2422 void *v;
2423
2424 v = state->neigh_sub_iter(state, n, &fakep);
2425 if (!v)
2426 goto next;
2427 }
2428 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2429 break;
2430 if (n->nud_state & ~NUD_NOARP)
2431 break;
2432 next:
2433 n = rcu_dereference_bh(n->next);
2434 }
2435
2436 if (n)
2437 break;
2438 }
2439 state->bucket = bucket;
2440
2441 return n;
2442 }
2443
2444 static struct neighbour *neigh_get_next(struct seq_file *seq,
2445 struct neighbour *n,
2446 loff_t *pos)
2447 {
2448 struct neigh_seq_state *state = seq->private;
2449 struct net *net = seq_file_net(seq);
2450 struct neigh_hash_table *nht = state->nht;
2451
2452 if (state->neigh_sub_iter) {
2453 void *v = state->neigh_sub_iter(state, n, pos);
2454 if (v)
2455 return n;
2456 }
2457 n = rcu_dereference_bh(n->next);
2458
2459 while (1) {
2460 while (n) {
2461 if (!net_eq(dev_net(n->dev), net))
2462 goto next;
2463 if (state->neigh_sub_iter) {
2464 void *v = state->neigh_sub_iter(state, n, pos);
2465 if (v)
2466 return n;
2467 goto next;
2468 }
2469 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2470 break;
2471
2472 if (n->nud_state & ~NUD_NOARP)
2473 break;
2474 next:
2475 n = rcu_dereference_bh(n->next);
2476 }
2477
2478 if (n)
2479 break;
2480
2481 if (++state->bucket >= (1 << nht->hash_shift))
2482 break;
2483
2484 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2485 }
2486
2487 if (n && pos)
2488 --(*pos);
2489 return n;
2490 }
2491
2492 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2493 {
2494 struct neighbour *n = neigh_get_first(seq);
2495
2496 if (n) {
2497 --(*pos);
2498 while (*pos) {
2499 n = neigh_get_next(seq, n, pos);
2500 if (!n)
2501 break;
2502 }
2503 }
2504 return *pos ? NULL : n;
2505 }
2506
2507 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2508 {
2509 struct neigh_seq_state *state = seq->private;
2510 struct net *net = seq_file_net(seq);
2511 struct neigh_table *tbl = state->tbl;
2512 struct pneigh_entry *pn = NULL;
2513 int bucket = state->bucket;
2514
2515 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2516 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2517 pn = tbl->phash_buckets[bucket];
2518 while (pn && !net_eq(pneigh_net(pn), net))
2519 pn = pn->next;
2520 if (pn)
2521 break;
2522 }
2523 state->bucket = bucket;
2524
2525 return pn;
2526 }
2527
2528 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2529 struct pneigh_entry *pn,
2530 loff_t *pos)
2531 {
2532 struct neigh_seq_state *state = seq->private;
2533 struct net *net = seq_file_net(seq);
2534 struct neigh_table *tbl = state->tbl;
2535
2536 do {
2537 pn = pn->next;
2538 } while (pn && !net_eq(pneigh_net(pn), net));
2539
2540 while (!pn) {
2541 if (++state->bucket > PNEIGH_HASHMASK)
2542 break;
2543 pn = tbl->phash_buckets[state->bucket];
2544 while (pn && !net_eq(pneigh_net(pn), net))
2545 pn = pn->next;
2546 if (pn)
2547 break;
2548 }
2549
2550 if (pn && pos)
2551 --(*pos);
2552
2553 return pn;
2554 }
2555
2556 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2557 {
2558 struct pneigh_entry *pn = pneigh_get_first(seq);
2559
2560 if (pn) {
2561 --(*pos);
2562 while (*pos) {
2563 pn = pneigh_get_next(seq, pn, pos);
2564 if (!pn)
2565 break;
2566 }
2567 }
2568 return *pos ? NULL : pn;
2569 }
2570
2571 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2572 {
2573 struct neigh_seq_state *state = seq->private;
2574 void *rc;
2575 loff_t idxpos = *pos;
2576
2577 rc = neigh_get_idx(seq, &idxpos);
2578 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2579 rc = pneigh_get_idx(seq, &idxpos);
2580
2581 return rc;
2582 }
2583
2584 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2585 __acquires(rcu_bh)
2586 {
2587 struct neigh_seq_state *state = seq->private;
2588
2589 state->tbl = tbl;
2590 state->bucket = 0;
2591 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2592
2593 rcu_read_lock_bh();
2594 state->nht = rcu_dereference_bh(tbl->nht);
2595
2596 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2597 }
2598 EXPORT_SYMBOL(neigh_seq_start);
2599
2600 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2601 {
2602 struct neigh_seq_state *state;
2603 void *rc;
2604
2605 if (v == SEQ_START_TOKEN) {
2606 rc = neigh_get_first(seq);
2607 goto out;
2608 }
2609
2610 state = seq->private;
2611 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2612 rc = neigh_get_next(seq, v, NULL);
2613 if (rc)
2614 goto out;
2615 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2616 rc = pneigh_get_first(seq);
2617 } else {
2618 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2619 rc = pneigh_get_next(seq, v, NULL);
2620 }
2621 out:
2622 ++(*pos);
2623 return rc;
2624 }
2625 EXPORT_SYMBOL(neigh_seq_next);
2626
2627 void neigh_seq_stop(struct seq_file *seq, void *v)
2628 __releases(rcu_bh)
2629 {
2630 rcu_read_unlock_bh();
2631 }
2632 EXPORT_SYMBOL(neigh_seq_stop);
2633
2634 /* statistics via seq_file */
2635
2636 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2637 {
2638 struct neigh_table *tbl = seq->private;
2639 int cpu;
2640
2641 if (*pos == 0)
2642 return SEQ_START_TOKEN;
2643
2644 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
2645 if (!cpu_possible(cpu))
2646 continue;
2647 *pos = cpu+1;
2648 return per_cpu_ptr(tbl->stats, cpu);
2649 }
2650 return NULL;
2651 }
2652
2653 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2654 {
2655 struct neigh_table *tbl = seq->private;
2656 int cpu;
2657
2658 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
2659 if (!cpu_possible(cpu))
2660 continue;
2661 *pos = cpu+1;
2662 return per_cpu_ptr(tbl->stats, cpu);
2663 }
2664 return NULL;
2665 }
2666
2667 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2668 {
2669
2670 }
2671
2672 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2673 {
2674 struct neigh_table *tbl = seq->private;
2675 struct neigh_statistics *st = v;
2676
2677 if (v == SEQ_START_TOKEN) {
2678 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2679 return 0;
2680 }
2681
2682 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2683 "%08lx %08lx %08lx %08lx %08lx\n",
2684 atomic_read(&tbl->entries),
2685
2686 st->allocs,
2687 st->destroys,
2688 st->hash_grows,
2689
2690 st->lookups,
2691 st->hits,
2692
2693 st->res_failed,
2694
2695 st->rcv_probes_mcast,
2696 st->rcv_probes_ucast,
2697
2698 st->periodic_gc_runs,
2699 st->forced_gc_runs,
2700 st->unres_discards
2701 );
2702
2703 return 0;
2704 }
2705
2706 static const struct seq_operations neigh_stat_seq_ops = {
2707 .start = neigh_stat_seq_start,
2708 .next = neigh_stat_seq_next,
2709 .stop = neigh_stat_seq_stop,
2710 .show = neigh_stat_seq_show,
2711 };
2712
2713 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2714 {
2715 int ret = seq_open(file, &neigh_stat_seq_ops);
2716
2717 if (!ret) {
2718 struct seq_file *sf = file->private_data;
2719 sf->private = PDE(inode)->data;
2720 }
2721 return ret;
2722 };
2723
2724 static const struct file_operations neigh_stat_seq_fops = {
2725 .owner = THIS_MODULE,
2726 .open = neigh_stat_seq_open,
2727 .read = seq_read,
2728 .llseek = seq_lseek,
2729 .release = seq_release,
2730 };
2731
2732 #endif /* CONFIG_PROC_FS */
2733
2734 static inline size_t neigh_nlmsg_size(void)
2735 {
2736 return NLMSG_ALIGN(sizeof(struct ndmsg))
2737 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2738 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2739 + nla_total_size(sizeof(struct nda_cacheinfo))
2740 + nla_total_size(4); /* NDA_PROBES */
2741 }
2742
2743 static void __neigh_notify(struct neighbour *n, int type, int flags)
2744 {
2745 struct net *net = dev_net(n->dev);
2746 struct sk_buff *skb;
2747 int err = -ENOBUFS;
2748
2749 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2750 if (skb == NULL)
2751 goto errout;
2752
2753 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2754 if (err < 0) {
2755 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2756 WARN_ON(err == -EMSGSIZE);
2757 kfree_skb(skb);
2758 goto errout;
2759 }
2760 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2761 return;
2762 errout:
2763 if (err < 0)
2764 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2765 }
2766
2767 #ifdef CONFIG_ARPD
2768 void neigh_app_ns(struct neighbour *n)
2769 {
2770 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2771 }
2772 EXPORT_SYMBOL(neigh_app_ns);
2773 #endif /* CONFIG_ARPD */
2774
2775 #ifdef CONFIG_SYSCTL
2776 static int zero;
2777 static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
2778
2779 static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
2780 size_t *lenp, loff_t *ppos)
2781 {
2782 int size, ret;
2783 ctl_table tmp = *ctl;
2784
2785 tmp.extra1 = &zero;
2786 tmp.extra2 = &unres_qlen_max;
2787 tmp.data = &size;
2788
2789 size = *(int *)ctl->data / SKB_TRUESIZE(ETH_FRAME_LEN);
2790 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
2791
2792 if (write && !ret)
2793 *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN);
2794 return ret;
2795 }
2796
2797 enum {
2798 NEIGH_VAR_MCAST_PROBE,
2799 NEIGH_VAR_UCAST_PROBE,
2800 NEIGH_VAR_APP_PROBE,
2801 NEIGH_VAR_RETRANS_TIME,
2802 NEIGH_VAR_BASE_REACHABLE_TIME,
2803 NEIGH_VAR_DELAY_PROBE_TIME,
2804 NEIGH_VAR_GC_STALETIME,
2805 NEIGH_VAR_QUEUE_LEN,
2806 NEIGH_VAR_QUEUE_LEN_BYTES,
2807 NEIGH_VAR_PROXY_QLEN,
2808 NEIGH_VAR_ANYCAST_DELAY,
2809 NEIGH_VAR_PROXY_DELAY,
2810 NEIGH_VAR_LOCKTIME,
2811 NEIGH_VAR_RETRANS_TIME_MS,
2812 NEIGH_VAR_BASE_REACHABLE_TIME_MS,
2813 NEIGH_VAR_GC_INTERVAL,
2814 NEIGH_VAR_GC_THRESH1,
2815 NEIGH_VAR_GC_THRESH2,
2816 NEIGH_VAR_GC_THRESH3,
2817 NEIGH_VAR_MAX
2818 };
2819
2820 static struct neigh_sysctl_table {
2821 struct ctl_table_header *sysctl_header;
2822 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2823 } neigh_sysctl_template __read_mostly = {
2824 .neigh_vars = {
2825 [NEIGH_VAR_MCAST_PROBE] = {
2826 .procname = "mcast_solicit",
2827 .maxlen = sizeof(int),
2828 .mode = 0644,
2829 .proc_handler = proc_dointvec,
2830 },
2831 [NEIGH_VAR_UCAST_PROBE] = {
2832 .procname = "ucast_solicit",
2833 .maxlen = sizeof(int),
2834 .mode = 0644,
2835 .proc_handler = proc_dointvec,
2836 },
2837 [NEIGH_VAR_APP_PROBE] = {
2838 .procname = "app_solicit",
2839 .maxlen = sizeof(int),
2840 .mode = 0644,
2841 .proc_handler = proc_dointvec,
2842 },
2843 [NEIGH_VAR_RETRANS_TIME] = {
2844 .procname = "retrans_time",
2845 .maxlen = sizeof(int),
2846 .mode = 0644,
2847 .proc_handler = proc_dointvec_userhz_jiffies,
2848 },
2849 [NEIGH_VAR_BASE_REACHABLE_TIME] = {
2850 .procname = "base_reachable_time",
2851 .maxlen = sizeof(int),
2852 .mode = 0644,
2853 .proc_handler = proc_dointvec_jiffies,
2854 },
2855 [NEIGH_VAR_DELAY_PROBE_TIME] = {
2856 .procname = "delay_first_probe_time",
2857 .maxlen = sizeof(int),
2858 .mode = 0644,
2859 .proc_handler = proc_dointvec_jiffies,
2860 },
2861 [NEIGH_VAR_GC_STALETIME] = {
2862 .procname = "gc_stale_time",
2863 .maxlen = sizeof(int),
2864 .mode = 0644,
2865 .proc_handler = proc_dointvec_jiffies,
2866 },
2867 [NEIGH_VAR_QUEUE_LEN] = {
2868 .procname = "unres_qlen",
2869 .maxlen = sizeof(int),
2870 .mode = 0644,
2871 .proc_handler = proc_unres_qlen,
2872 },
2873 [NEIGH_VAR_QUEUE_LEN_BYTES] = {
2874 .procname = "unres_qlen_bytes",
2875 .maxlen = sizeof(int),
2876 .mode = 0644,
2877 .extra1 = &zero,
2878 .proc_handler = proc_dointvec_minmax,
2879 },
2880 [NEIGH_VAR_PROXY_QLEN] = {
2881 .procname = "proxy_qlen",
2882 .maxlen = sizeof(int),
2883 .mode = 0644,
2884 .proc_handler = proc_dointvec,
2885 },
2886 [NEIGH_VAR_ANYCAST_DELAY] = {
2887 .procname = "anycast_delay",
2888 .maxlen = sizeof(int),
2889 .mode = 0644,
2890 .proc_handler = proc_dointvec_userhz_jiffies,
2891 },
2892 [NEIGH_VAR_PROXY_DELAY] = {
2893 .procname = "proxy_delay",
2894 .maxlen = sizeof(int),
2895 .mode = 0644,
2896 .proc_handler = proc_dointvec_userhz_jiffies,
2897 },
2898 [NEIGH_VAR_LOCKTIME] = {
2899 .procname = "locktime",
2900 .maxlen = sizeof(int),
2901 .mode = 0644,
2902 .proc_handler = proc_dointvec_userhz_jiffies,
2903 },
2904 [NEIGH_VAR_RETRANS_TIME_MS] = {
2905 .procname = "retrans_time_ms",
2906 .maxlen = sizeof(int),
2907 .mode = 0644,
2908 .proc_handler = proc_dointvec_ms_jiffies,
2909 },
2910 [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = {
2911 .procname = "base_reachable_time_ms",
2912 .maxlen = sizeof(int),
2913 .mode = 0644,
2914 .proc_handler = proc_dointvec_ms_jiffies,
2915 },
2916 [NEIGH_VAR_GC_INTERVAL] = {
2917 .procname = "gc_interval",
2918 .maxlen = sizeof(int),
2919 .mode = 0644,
2920 .proc_handler = proc_dointvec_jiffies,
2921 },
2922 [NEIGH_VAR_GC_THRESH1] = {
2923 .procname = "gc_thresh1",
2924 .maxlen = sizeof(int),
2925 .mode = 0644,
2926 .proc_handler = proc_dointvec,
2927 },
2928 [NEIGH_VAR_GC_THRESH2] = {
2929 .procname = "gc_thresh2",
2930 .maxlen = sizeof(int),
2931 .mode = 0644,
2932 .proc_handler = proc_dointvec,
2933 },
2934 [NEIGH_VAR_GC_THRESH3] = {
2935 .procname = "gc_thresh3",
2936 .maxlen = sizeof(int),
2937 .mode = 0644,
2938 .proc_handler = proc_dointvec,
2939 },
2940 {},
2941 },
2942 };
2943
2944 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2945 char *p_name, proc_handler *handler)
2946 {
2947 struct neigh_sysctl_table *t;
2948 const char *dev_name_source = NULL;
2949 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2950
2951 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2952 if (!t)
2953 goto err;
2954
2955 t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes;
2956 t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes;
2957 t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes;
2958 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time;
2959 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time;
2960 t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time;
2961 t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime;
2962 t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes;
2963 t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes;
2964 t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen;
2965 t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay;
2966 t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay;
2967 t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime;
2968 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time;
2969 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time;
2970
2971 if (dev) {
2972 dev_name_source = dev->name;
2973 /* Terminate the table early */
2974 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2975 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2976 } else {
2977 dev_name_source = "default";
2978 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2979 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2980 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
2981 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
2982 }
2983
2984
2985 if (handler) {
2986 /* RetransTime */
2987 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler;
2988 t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev;
2989 /* ReachableTime */
2990 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler;
2991 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev;
2992 /* RetransTime (in milliseconds)*/
2993 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
2994 t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev;
2995 /* ReachableTime (in milliseconds) */
2996 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
2997 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2998 }
2999
3000 /* Don't export sysctls to unprivileged users */
3001 if (neigh_parms_net(p)->user_ns != &init_user_ns)
3002 t->neigh_vars[0].procname = NULL;
3003
3004 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
3005 p_name, dev_name_source);
3006 t->sysctl_header =
3007 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
3008 if (!t->sysctl_header)
3009 goto free;
3010
3011 p->sysctl_table = t;
3012 return 0;
3013
3014 free:
3015 kfree(t);
3016 err:
3017 return -ENOBUFS;
3018 }
3019 EXPORT_SYMBOL(neigh_sysctl_register);
3020
3021 void neigh_sysctl_unregister(struct neigh_parms *p)
3022 {
3023 if (p->sysctl_table) {
3024 struct neigh_sysctl_table *t = p->sysctl_table;
3025 p->sysctl_table = NULL;
3026 unregister_net_sysctl_table(t->sysctl_header);
3027 kfree(t);
3028 }
3029 }
3030 EXPORT_SYMBOL(neigh_sysctl_unregister);
3031
3032 #endif /* CONFIG_SYSCTL */
3033
3034 static int __init neigh_init(void)
3035 {
3036 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
3037 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
3038 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
3039
3040 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
3041 NULL);
3042 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
3043
3044 return 0;
3045 }
3046
3047 subsys_initcall(neigh_init);
3048
This page took 0.089518 seconds and 6 git commands to generate.