batman-adv: remove unused BATADV_BONDING_TQ_THRESHOLD constant
[deliverable/linux.git] / net / batman-adv / originator.c
... / ...
CommitLineData
1/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "originator.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/errno.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
27#include <linux/kref.h>
28#include <linux/list.h>
29#include <linux/lockdep.h>
30#include <linux/netdevice.h>
31#include <linux/rculist.h>
32#include <linux/seq_file.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/workqueue.h>
36
37#include "distributed-arp-table.h"
38#include "fragmentation.h"
39#include "gateway_client.h"
40#include "hard-interface.h"
41#include "hash.h"
42#include "multicast.h"
43#include "network-coding.h"
44#include "routing.h"
45#include "translation-table.h"
46
47/* hash class keys */
48static struct lock_class_key batadv_orig_hash_lock_class_key;
49
50static void batadv_purge_orig(struct work_struct *work);
51
52/**
53 * batadv_compare_orig - comparing function used in the originator hash table
54 * @node: node in the local table
55 * @data2: second object to compare the node to
56 *
57 * Return: 1 if they are the same originator
58 */
59int batadv_compare_orig(const struct hlist_node *node, const void *data2)
60{
61 const void *data1 = container_of(node, struct batadv_orig_node,
62 hash_entry);
63
64 return batadv_compare_eth(data1, data2);
65}
66
67/**
68 * batadv_orig_node_vlan_get - get an orig_node_vlan object
69 * @orig_node: the originator serving the VLAN
70 * @vid: the VLAN identifier
71 *
72 * Return: the vlan object identified by vid and belonging to orig_node or NULL
73 * if it does not exist.
74 */
75struct batadv_orig_node_vlan *
76batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
77 unsigned short vid)
78{
79 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
80
81 rcu_read_lock();
82 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
83 if (tmp->vid != vid)
84 continue;
85
86 if (!kref_get_unless_zero(&tmp->refcount))
87 continue;
88
89 vlan = tmp;
90
91 break;
92 }
93 rcu_read_unlock();
94
95 return vlan;
96}
97
98/**
99 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
100 * object
101 * @orig_node: the originator serving the VLAN
102 * @vid: the VLAN identifier
103 *
104 * Return: NULL in case of failure or the vlan object identified by vid and
105 * belonging to orig_node otherwise. The object is created and added to the list
106 * if it does not exist.
107 *
108 * The object is returned with refcounter increased by 1.
109 */
110struct batadv_orig_node_vlan *
111batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
112 unsigned short vid)
113{
114 struct batadv_orig_node_vlan *vlan;
115
116 spin_lock_bh(&orig_node->vlan_list_lock);
117
118 /* first look if an object for this vid already exists */
119 vlan = batadv_orig_node_vlan_get(orig_node, vid);
120 if (vlan)
121 goto out;
122
123 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
124 if (!vlan)
125 goto out;
126
127 kref_init(&vlan->refcount);
128 kref_get(&vlan->refcount);
129 vlan->vid = vid;
130
131 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
132
133out:
134 spin_unlock_bh(&orig_node->vlan_list_lock);
135
136 return vlan;
137}
138
139/**
140 * batadv_orig_node_vlan_release - release originator-vlan object from lists
141 * and queue for free after rcu grace period
142 * @ref: kref pointer of the originator-vlan object
143 */
144static void batadv_orig_node_vlan_release(struct kref *ref)
145{
146 struct batadv_orig_node_vlan *orig_vlan;
147
148 orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
149
150 kfree_rcu(orig_vlan, rcu);
151}
152
153/**
154 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly
155 * release the originator-vlan object
156 * @orig_vlan: the originator-vlan object to release
157 */
158void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
159{
160 kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
161}
162
163int batadv_originator_init(struct batadv_priv *bat_priv)
164{
165 if (bat_priv->orig_hash)
166 return 0;
167
168 bat_priv->orig_hash = batadv_hash_new(1024);
169
170 if (!bat_priv->orig_hash)
171 goto err;
172
173 batadv_hash_set_lock_class(bat_priv->orig_hash,
174 &batadv_orig_hash_lock_class_key);
175
176 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
177 queue_delayed_work(batadv_event_workqueue,
178 &bat_priv->orig_work,
179 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
180
181 return 0;
182
183err:
184 return -ENOMEM;
185}
186
187/**
188 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
189 * free after rcu grace period
190 * @ref: kref pointer of the neigh_ifinfo
191 */
192static void batadv_neigh_ifinfo_release(struct kref *ref)
193{
194 struct batadv_neigh_ifinfo *neigh_ifinfo;
195
196 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
197
198 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
199 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
200
201 kfree_rcu(neigh_ifinfo, rcu);
202}
203
204/**
205 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
206 * the neigh_ifinfo
207 * @neigh_ifinfo: the neigh_ifinfo object to release
208 */
209void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
210{
211 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
212}
213
214/**
215 * batadv_hardif_neigh_release - release hardif neigh node from lists and
216 * queue for free after rcu grace period
217 * @ref: kref pointer of the neigh_node
218 */
219static void batadv_hardif_neigh_release(struct kref *ref)
220{
221 struct batadv_hardif_neigh_node *hardif_neigh;
222
223 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
224 refcount);
225
226 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
227 hlist_del_init_rcu(&hardif_neigh->list);
228 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
229
230 batadv_hardif_free_ref(hardif_neigh->if_incoming);
231 kfree_rcu(hardif_neigh, rcu);
232}
233
234/**
235 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
236 * and possibly release it
237 * @hardif_neigh: hardif neigh neighbor to free
238 */
239void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
240{
241 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
242}
243
244/**
245 * batadv_neigh_node_release - release neigh_node from lists and queue for
246 * free after rcu grace period
247 * @ref: kref pointer of the neigh_node
248 */
249static void batadv_neigh_node_release(struct kref *ref)
250{
251 struct hlist_node *node_tmp;
252 struct batadv_neigh_node *neigh_node;
253 struct batadv_hardif_neigh_node *hardif_neigh;
254 struct batadv_neigh_ifinfo *neigh_ifinfo;
255 struct batadv_algo_ops *bao;
256
257 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
258 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
259
260 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
261 &neigh_node->ifinfo_list, list) {
262 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
263 }
264
265 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
266 neigh_node->addr);
267 if (hardif_neigh) {
268 /* batadv_hardif_neigh_get() increases refcount too */
269 batadv_hardif_neigh_free_ref(hardif_neigh);
270 batadv_hardif_neigh_free_ref(hardif_neigh);
271 }
272
273 if (bao->bat_neigh_free)
274 bao->bat_neigh_free(neigh_node);
275
276 batadv_hardif_free_ref(neigh_node->if_incoming);
277
278 kfree_rcu(neigh_node, rcu);
279}
280
281/**
282 * batadv_neigh_node_free_ref - decrement the neighbors refcounter and possibly
283 * release it
284 * @neigh_node: neigh neighbor to free
285 */
286void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
287{
288 kref_put(&neigh_node->refcount, batadv_neigh_node_release);
289}
290
291/**
292 * batadv_orig_node_get_router - router to the originator depending on iface
293 * @orig_node: the orig node for the router
294 * @if_outgoing: the interface where the payload packet has been received or
295 * the OGM should be sent to
296 *
297 * Return: the neighbor which should be router for this orig_node/iface.
298 *
299 * The object is returned with refcounter increased by 1.
300 */
301struct batadv_neigh_node *
302batadv_orig_router_get(struct batadv_orig_node *orig_node,
303 const struct batadv_hard_iface *if_outgoing)
304{
305 struct batadv_orig_ifinfo *orig_ifinfo;
306 struct batadv_neigh_node *router = NULL;
307
308 rcu_read_lock();
309 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
310 if (orig_ifinfo->if_outgoing != if_outgoing)
311 continue;
312
313 router = rcu_dereference(orig_ifinfo->router);
314 break;
315 }
316
317 if (router && !kref_get_unless_zero(&router->refcount))
318 router = NULL;
319
320 rcu_read_unlock();
321 return router;
322}
323
324/**
325 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
326 * @orig_node: the orig node to be queried
327 * @if_outgoing: the interface for which the ifinfo should be acquired
328 *
329 * Return: the requested orig_ifinfo or NULL if not found.
330 *
331 * The object is returned with refcounter increased by 1.
332 */
333struct batadv_orig_ifinfo *
334batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
335 struct batadv_hard_iface *if_outgoing)
336{
337 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
338
339 rcu_read_lock();
340 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
341 list) {
342 if (tmp->if_outgoing != if_outgoing)
343 continue;
344
345 if (!kref_get_unless_zero(&tmp->refcount))
346 continue;
347
348 orig_ifinfo = tmp;
349 break;
350 }
351 rcu_read_unlock();
352
353 return orig_ifinfo;
354}
355
356/**
357 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
358 * @orig_node: the orig node to be queried
359 * @if_outgoing: the interface for which the ifinfo should be acquired
360 *
361 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
362 * interface otherwise. The object is created and added to the list
363 * if it does not exist.
364 *
365 * The object is returned with refcounter increased by 1.
366 */
367struct batadv_orig_ifinfo *
368batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
369 struct batadv_hard_iface *if_outgoing)
370{
371 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
372 unsigned long reset_time;
373
374 spin_lock_bh(&orig_node->neigh_list_lock);
375
376 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
377 if (orig_ifinfo)
378 goto out;
379
380 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
381 if (!orig_ifinfo)
382 goto out;
383
384 if (if_outgoing != BATADV_IF_DEFAULT &&
385 !kref_get_unless_zero(&if_outgoing->refcount)) {
386 kfree(orig_ifinfo);
387 orig_ifinfo = NULL;
388 goto out;
389 }
390
391 reset_time = jiffies - 1;
392 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
393 orig_ifinfo->batman_seqno_reset = reset_time;
394 orig_ifinfo->if_outgoing = if_outgoing;
395 INIT_HLIST_NODE(&orig_ifinfo->list);
396 kref_init(&orig_ifinfo->refcount);
397 kref_get(&orig_ifinfo->refcount);
398 hlist_add_head_rcu(&orig_ifinfo->list,
399 &orig_node->ifinfo_list);
400out:
401 spin_unlock_bh(&orig_node->neigh_list_lock);
402 return orig_ifinfo;
403}
404
405/**
406 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
407 * @neigh: the neigh node to be queried
408 * @if_outgoing: the interface for which the ifinfo should be acquired
409 *
410 * The object is returned with refcounter increased by 1.
411 *
412 * Return: the requested neigh_ifinfo or NULL if not found
413 */
414struct batadv_neigh_ifinfo *
415batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
416 struct batadv_hard_iface *if_outgoing)
417{
418 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
419 *tmp_neigh_ifinfo;
420
421 rcu_read_lock();
422 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
423 list) {
424 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
425 continue;
426
427 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
428 continue;
429
430 neigh_ifinfo = tmp_neigh_ifinfo;
431 break;
432 }
433 rcu_read_unlock();
434
435 return neigh_ifinfo;
436}
437
438/**
439 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
440 * @neigh: the neigh node to be queried
441 * @if_outgoing: the interface for which the ifinfo should be acquired
442 *
443 * Return: NULL in case of failure or the neigh_ifinfo object for the
444 * if_outgoing interface otherwise. The object is created and added to the list
445 * if it does not exist.
446 *
447 * The object is returned with refcounter increased by 1.
448 */
449struct batadv_neigh_ifinfo *
450batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
451 struct batadv_hard_iface *if_outgoing)
452{
453 struct batadv_neigh_ifinfo *neigh_ifinfo;
454
455 spin_lock_bh(&neigh->ifinfo_lock);
456
457 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
458 if (neigh_ifinfo)
459 goto out;
460
461 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
462 if (!neigh_ifinfo)
463 goto out;
464
465 if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
466 kfree(neigh_ifinfo);
467 neigh_ifinfo = NULL;
468 goto out;
469 }
470
471 INIT_HLIST_NODE(&neigh_ifinfo->list);
472 kref_init(&neigh_ifinfo->refcount);
473 kref_get(&neigh_ifinfo->refcount);
474 neigh_ifinfo->if_outgoing = if_outgoing;
475
476 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
477
478out:
479 spin_unlock_bh(&neigh->ifinfo_lock);
480
481 return neigh_ifinfo;
482}
483
484/**
485 * batadv_neigh_node_get - retrieve a neighbour from the list
486 * @orig_node: originator which the neighbour belongs to
487 * @hard_iface: the interface where this neighbour is connected to
488 * @addr: the address of the neighbour
489 *
490 * Looks for and possibly returns a neighbour belonging to this originator list
491 * which is connected through the provided hard interface.
492 *
493 * Return: neighbor when found. Othwerwise NULL
494 */
495static struct batadv_neigh_node *
496batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
497 const struct batadv_hard_iface *hard_iface,
498 const u8 *addr)
499{
500 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
501
502 rcu_read_lock();
503 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
504 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
505 continue;
506
507 if (tmp_neigh_node->if_incoming != hard_iface)
508 continue;
509
510 if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
511 continue;
512
513 res = tmp_neigh_node;
514 break;
515 }
516 rcu_read_unlock();
517
518 return res;
519}
520
521/**
522 * batadv_hardif_neigh_create - create a hardif neighbour node
523 * @hard_iface: the interface this neighbour is connected to
524 * @neigh_addr: the interface address of the neighbour to retrieve
525 *
526 * Return: the hardif neighbour node if found or created or NULL otherwise.
527 */
528static struct batadv_hardif_neigh_node *
529batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
530 const u8 *neigh_addr)
531{
532 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
533 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
534
535 spin_lock_bh(&hard_iface->neigh_list_lock);
536
537 /* check if neighbor hasn't been added in the meantime */
538 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
539 if (hardif_neigh)
540 goto out;
541
542 if (!kref_get_unless_zero(&hard_iface->refcount))
543 goto out;
544
545 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
546 if (!hardif_neigh) {
547 batadv_hardif_free_ref(hard_iface);
548 goto out;
549 }
550
551 INIT_HLIST_NODE(&hardif_neigh->list);
552 ether_addr_copy(hardif_neigh->addr, neigh_addr);
553 hardif_neigh->if_incoming = hard_iface;
554 hardif_neigh->last_seen = jiffies;
555
556 kref_init(&hardif_neigh->refcount);
557
558 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
559 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
560
561 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
562
563out:
564 spin_unlock_bh(&hard_iface->neigh_list_lock);
565 return hardif_neigh;
566}
567
568/**
569 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
570 * node
571 * @hard_iface: the interface this neighbour is connected to
572 * @neigh_addr: the interface address of the neighbour to retrieve
573 *
574 * Return: the hardif neighbour node if found or created or NULL otherwise.
575 */
576static struct batadv_hardif_neigh_node *
577batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
578 const u8 *neigh_addr)
579{
580 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
581
582 /* first check without locking to avoid the overhead */
583 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
584 if (hardif_neigh)
585 return hardif_neigh;
586
587 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
588}
589
590/**
591 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
592 * @hard_iface: the interface where this neighbour is connected to
593 * @neigh_addr: the address of the neighbour
594 *
595 * Looks for and possibly returns a neighbour belonging to this hard interface.
596 *
597 * Return: neighbor when found. Othwerwise NULL
598 */
599struct batadv_hardif_neigh_node *
600batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
601 const u8 *neigh_addr)
602{
603 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
604
605 rcu_read_lock();
606 hlist_for_each_entry_rcu(tmp_hardif_neigh,
607 &hard_iface->neigh_list, list) {
608 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
609 continue;
610
611 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
612 continue;
613
614 hardif_neigh = tmp_hardif_neigh;
615 break;
616 }
617 rcu_read_unlock();
618
619 return hardif_neigh;
620}
621
622/**
623 * batadv_neigh_node_new - create and init a new neigh_node object
624 * @orig_node: originator object representing the neighbour
625 * @hard_iface: the interface where the neighbour is connected to
626 * @neigh_addr: the mac address of the neighbour interface
627 *
628 * Allocates a new neigh_node object and initialises all the generic fields.
629 *
630 * Return: neighbor when found. Othwerwise NULL
631 */
632struct batadv_neigh_node *
633batadv_neigh_node_new(struct batadv_orig_node *orig_node,
634 struct batadv_hard_iface *hard_iface,
635 const u8 *neigh_addr)
636{
637 struct batadv_neigh_node *neigh_node;
638 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
639
640 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
641 if (neigh_node)
642 goto out;
643
644 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
645 neigh_addr);
646 if (!hardif_neigh)
647 goto out;
648
649 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
650 if (!neigh_node)
651 goto out;
652
653 if (!kref_get_unless_zero(&hard_iface->refcount)) {
654 kfree(neigh_node);
655 neigh_node = NULL;
656 goto out;
657 }
658
659 INIT_HLIST_NODE(&neigh_node->list);
660 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
661 spin_lock_init(&neigh_node->ifinfo_lock);
662
663 ether_addr_copy(neigh_node->addr, neigh_addr);
664 neigh_node->if_incoming = hard_iface;
665 neigh_node->orig_node = orig_node;
666
667 /* extra reference for return */
668 kref_init(&neigh_node->refcount);
669 kref_get(&neigh_node->refcount);
670
671 spin_lock_bh(&orig_node->neigh_list_lock);
672 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
673 spin_unlock_bh(&orig_node->neigh_list_lock);
674
675 /* increment unique neighbor refcount */
676 kref_get(&hardif_neigh->refcount);
677
678 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
679 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
680 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
681
682out:
683 if (hardif_neigh)
684 batadv_hardif_neigh_free_ref(hardif_neigh);
685 return neigh_node;
686}
687
688/**
689 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
690 * @seq: neighbour table seq_file struct
691 * @offset: not used
692 *
693 * Return: always 0
694 */
695int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
696{
697 struct net_device *net_dev = (struct net_device *)seq->private;
698 struct batadv_priv *bat_priv = netdev_priv(net_dev);
699 struct batadv_hard_iface *primary_if;
700
701 primary_if = batadv_seq_print_text_primary_if_get(seq);
702 if (!primary_if)
703 return 0;
704
705 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
706 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
707 primary_if->net_dev->dev_addr, net_dev->name,
708 bat_priv->bat_algo_ops->name);
709
710 batadv_hardif_free_ref(primary_if);
711
712 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
713 seq_puts(seq,
714 "No printing function for this routing protocol\n");
715 return 0;
716 }
717
718 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
719 return 0;
720}
721
722/**
723 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
724 * free after rcu grace period
725 * @ref: kref pointer of the orig_ifinfo
726 */
727static void batadv_orig_ifinfo_release(struct kref *ref)
728{
729 struct batadv_orig_ifinfo *orig_ifinfo;
730 struct batadv_neigh_node *router;
731
732 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
733
734 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
735 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
736
737 /* this is the last reference to this object */
738 router = rcu_dereference_protected(orig_ifinfo->router, true);
739 if (router)
740 batadv_neigh_node_free_ref(router);
741
742 kfree_rcu(orig_ifinfo, rcu);
743}
744
745/**
746 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
747 * the orig_ifinfo
748 * @orig_ifinfo: the orig_ifinfo object to release
749 */
750void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
751{
752 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
753}
754
755/**
756 * batadv_orig_node_free_rcu - free the orig_node
757 * @rcu: rcu pointer of the orig_node
758 */
759static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
760{
761 struct batadv_orig_node *orig_node;
762
763 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
764
765 batadv_mcast_purge_orig(orig_node);
766
767 batadv_frag_purge_orig(orig_node, NULL);
768
769 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
770 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
771
772 kfree(orig_node->tt_buff);
773 kfree(orig_node);
774}
775
776/**
777 * batadv_orig_node_release - release orig_node from lists and queue for
778 * free after rcu grace period
779 * @ref: kref pointer of the orig_node
780 */
781static void batadv_orig_node_release(struct kref *ref)
782{
783 struct hlist_node *node_tmp;
784 struct batadv_neigh_node *neigh_node;
785 struct batadv_orig_node *orig_node;
786 struct batadv_orig_ifinfo *orig_ifinfo;
787
788 orig_node = container_of(ref, struct batadv_orig_node, refcount);
789
790 spin_lock_bh(&orig_node->neigh_list_lock);
791
792 /* for all neighbors towards this originator ... */
793 hlist_for_each_entry_safe(neigh_node, node_tmp,
794 &orig_node->neigh_list, list) {
795 hlist_del_rcu(&neigh_node->list);
796 batadv_neigh_node_free_ref(neigh_node);
797 }
798
799 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
800 &orig_node->ifinfo_list, list) {
801 hlist_del_rcu(&orig_ifinfo->list);
802 batadv_orig_ifinfo_free_ref(orig_ifinfo);
803 }
804 spin_unlock_bh(&orig_node->neigh_list_lock);
805
806 /* Free nc_nodes */
807 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
808
809 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
810}
811
812/**
813 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
814 * release it
815 * @orig_node: the orig node to free
816 */
817void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
818{
819 kref_put(&orig_node->refcount, batadv_orig_node_release);
820}
821
822void batadv_originator_free(struct batadv_priv *bat_priv)
823{
824 struct batadv_hashtable *hash = bat_priv->orig_hash;
825 struct hlist_node *node_tmp;
826 struct hlist_head *head;
827 spinlock_t *list_lock; /* spinlock to protect write access */
828 struct batadv_orig_node *orig_node;
829 u32 i;
830
831 if (!hash)
832 return;
833
834 cancel_delayed_work_sync(&bat_priv->orig_work);
835
836 bat_priv->orig_hash = NULL;
837
838 for (i = 0; i < hash->size; i++) {
839 head = &hash->table[i];
840 list_lock = &hash->list_locks[i];
841
842 spin_lock_bh(list_lock);
843 hlist_for_each_entry_safe(orig_node, node_tmp,
844 head, hash_entry) {
845 hlist_del_rcu(&orig_node->hash_entry);
846 batadv_orig_node_free_ref(orig_node);
847 }
848 spin_unlock_bh(list_lock);
849 }
850
851 batadv_hash_destroy(hash);
852}
853
854/**
855 * batadv_orig_node_new - creates a new orig_node
856 * @bat_priv: the bat priv with all the soft interface information
857 * @addr: the mac address of the originator
858 *
859 * Creates a new originator object and initialise all the generic fields.
860 * The new object is not added to the originator list.
861 *
862 * Return: the newly created object or NULL on failure.
863 */
864struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
865 const u8 *addr)
866{
867 struct batadv_orig_node *orig_node;
868 struct batadv_orig_node_vlan *vlan;
869 unsigned long reset_time;
870 int i;
871
872 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
873 "Creating new originator: %pM\n", addr);
874
875 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
876 if (!orig_node)
877 return NULL;
878
879 INIT_HLIST_HEAD(&orig_node->neigh_list);
880 INIT_HLIST_HEAD(&orig_node->vlan_list);
881 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
882 spin_lock_init(&orig_node->bcast_seqno_lock);
883 spin_lock_init(&orig_node->neigh_list_lock);
884 spin_lock_init(&orig_node->tt_buff_lock);
885 spin_lock_init(&orig_node->tt_lock);
886 spin_lock_init(&orig_node->vlan_list_lock);
887
888 batadv_nc_init_orig(orig_node);
889
890 /* extra reference for return */
891 kref_init(&orig_node->refcount);
892 kref_get(&orig_node->refcount);
893
894 orig_node->bat_priv = bat_priv;
895 ether_addr_copy(orig_node->orig, addr);
896 batadv_dat_init_orig_node_addr(orig_node);
897 atomic_set(&orig_node->last_ttvn, 0);
898 orig_node->tt_buff = NULL;
899 orig_node->tt_buff_len = 0;
900 orig_node->last_seen = jiffies;
901 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
902 orig_node->bcast_seqno_reset = reset_time;
903
904#ifdef CONFIG_BATMAN_ADV_MCAST
905 orig_node->mcast_flags = BATADV_NO_FLAGS;
906 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
907 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
908 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
909 spin_lock_init(&orig_node->mcast_handler_lock);
910#endif
911
912 /* create a vlan object for the "untagged" LAN */
913 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
914 if (!vlan)
915 goto free_orig_node;
916 /* batadv_orig_node_vlan_new() increases the refcounter.
917 * Immediately release vlan since it is not needed anymore in this
918 * context
919 */
920 batadv_orig_node_vlan_free_ref(vlan);
921
922 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
923 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
924 spin_lock_init(&orig_node->fragments[i].lock);
925 orig_node->fragments[i].size = 0;
926 }
927
928 return orig_node;
929free_orig_node:
930 kfree(orig_node);
931 return NULL;
932}
933
934/**
935 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
936 * @bat_priv: the bat priv with all the soft interface information
937 * @neigh: orig node which is to be checked
938 */
939static void
940batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
941 struct batadv_neigh_node *neigh)
942{
943 struct batadv_neigh_ifinfo *neigh_ifinfo;
944 struct batadv_hard_iface *if_outgoing;
945 struct hlist_node *node_tmp;
946
947 spin_lock_bh(&neigh->ifinfo_lock);
948
949 /* for all ifinfo objects for this neighinator */
950 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
951 &neigh->ifinfo_list, list) {
952 if_outgoing = neigh_ifinfo->if_outgoing;
953
954 /* always keep the default interface */
955 if (if_outgoing == BATADV_IF_DEFAULT)
956 continue;
957
958 /* don't purge if the interface is not (going) down */
959 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
960 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
961 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
962 continue;
963
964 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
965 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
966 neigh->addr, if_outgoing->net_dev->name);
967
968 hlist_del_rcu(&neigh_ifinfo->list);
969 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
970 }
971
972 spin_unlock_bh(&neigh->ifinfo_lock);
973}
974
975/**
976 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
977 * @bat_priv: the bat priv with all the soft interface information
978 * @orig_node: orig node which is to be checked
979 *
980 * Return: true if any ifinfo entry was purged, false otherwise.
981 */
982static bool
983batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
984 struct batadv_orig_node *orig_node)
985{
986 struct batadv_orig_ifinfo *orig_ifinfo;
987 struct batadv_hard_iface *if_outgoing;
988 struct hlist_node *node_tmp;
989 bool ifinfo_purged = false;
990
991 spin_lock_bh(&orig_node->neigh_list_lock);
992
993 /* for all ifinfo objects for this originator */
994 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
995 &orig_node->ifinfo_list, list) {
996 if_outgoing = orig_ifinfo->if_outgoing;
997
998 /* always keep the default interface */
999 if (if_outgoing == BATADV_IF_DEFAULT)
1000 continue;
1001
1002 /* don't purge if the interface is not (going) down */
1003 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
1004 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
1005 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
1006 continue;
1007
1008 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1009 "router/ifinfo purge: originator %pM, iface: %s\n",
1010 orig_node->orig, if_outgoing->net_dev->name);
1011
1012 ifinfo_purged = true;
1013
1014 hlist_del_rcu(&orig_ifinfo->list);
1015 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1016 if (orig_node->last_bonding_candidate == orig_ifinfo) {
1017 orig_node->last_bonding_candidate = NULL;
1018 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1019 }
1020 }
1021
1022 spin_unlock_bh(&orig_node->neigh_list_lock);
1023
1024 return ifinfo_purged;
1025}
1026
1027/**
1028 * batadv_purge_orig_neighbors - purges neighbors from originator
1029 * @bat_priv: the bat priv with all the soft interface information
1030 * @orig_node: orig node which is to be checked
1031 *
1032 * Return: true if any neighbor was purged, false otherwise
1033 */
1034static bool
1035batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
1036 struct batadv_orig_node *orig_node)
1037{
1038 struct hlist_node *node_tmp;
1039 struct batadv_neigh_node *neigh_node;
1040 bool neigh_purged = false;
1041 unsigned long last_seen;
1042 struct batadv_hard_iface *if_incoming;
1043
1044 spin_lock_bh(&orig_node->neigh_list_lock);
1045
1046 /* for all neighbors towards this originator ... */
1047 hlist_for_each_entry_safe(neigh_node, node_tmp,
1048 &orig_node->neigh_list, list) {
1049 last_seen = neigh_node->last_seen;
1050 if_incoming = neigh_node->if_incoming;
1051
1052 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
1053 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1054 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1055 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
1056 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1057 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1058 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
1059 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1060 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1061 orig_node->orig, neigh_node->addr,
1062 if_incoming->net_dev->name);
1063 else
1064 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1065 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1066 orig_node->orig, neigh_node->addr,
1067 jiffies_to_msecs(last_seen));
1068
1069 neigh_purged = true;
1070
1071 hlist_del_rcu(&neigh_node->list);
1072 batadv_neigh_node_free_ref(neigh_node);
1073 } else {
1074 /* only necessary if not the whole neighbor is to be
1075 * deleted, but some interface has been removed.
1076 */
1077 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
1078 }
1079 }
1080
1081 spin_unlock_bh(&orig_node->neigh_list_lock);
1082 return neigh_purged;
1083}
1084
1085/**
1086 * batadv_find_best_neighbor - finds the best neighbor after purging
1087 * @bat_priv: the bat priv with all the soft interface information
1088 * @orig_node: orig node which is to be checked
1089 * @if_outgoing: the interface for which the metric should be compared
1090 *
1091 * Return: the current best neighbor, with refcount increased.
1092 */
1093static struct batadv_neigh_node *
1094batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1095 struct batadv_orig_node *orig_node,
1096 struct batadv_hard_iface *if_outgoing)
1097{
1098 struct batadv_neigh_node *best = NULL, *neigh;
1099 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1100
1101 rcu_read_lock();
1102 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1103 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1104 best, if_outgoing) <= 0))
1105 continue;
1106
1107 if (!kref_get_unless_zero(&neigh->refcount))
1108 continue;
1109
1110 if (best)
1111 batadv_neigh_node_free_ref(best);
1112
1113 best = neigh;
1114 }
1115 rcu_read_unlock();
1116
1117 return best;
1118}
1119
1120/**
1121 * batadv_purge_orig_node - purges obsolete information from an orig_node
1122 * @bat_priv: the bat priv with all the soft interface information
1123 * @orig_node: orig node which is to be checked
1124 *
1125 * This function checks if the orig_node or substructures of it have become
1126 * obsolete, and purges this information if that's the case.
1127 *
1128 * Return: true if the orig_node is to be removed, false otherwise.
1129 */
1130static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1131 struct batadv_orig_node *orig_node)
1132{
1133 struct batadv_neigh_node *best_neigh_node;
1134 struct batadv_hard_iface *hard_iface;
1135 bool changed_ifinfo, changed_neigh;
1136
1137 if (batadv_has_timed_out(orig_node->last_seen,
1138 2 * BATADV_PURGE_TIMEOUT)) {
1139 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1140 "Originator timeout: originator %pM, last_seen %u\n",
1141 orig_node->orig,
1142 jiffies_to_msecs(orig_node->last_seen));
1143 return true;
1144 }
1145 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1146 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
1147
1148 if (!changed_ifinfo && !changed_neigh)
1149 return false;
1150
1151 /* first for NULL ... */
1152 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1153 BATADV_IF_DEFAULT);
1154 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1155 best_neigh_node);
1156 if (best_neigh_node)
1157 batadv_neigh_node_free_ref(best_neigh_node);
1158
1159 /* ... then for all other interfaces. */
1160 rcu_read_lock();
1161 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1162 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1163 continue;
1164
1165 if (hard_iface->soft_iface != bat_priv->soft_iface)
1166 continue;
1167
1168 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1169 orig_node,
1170 hard_iface);
1171 batadv_update_route(bat_priv, orig_node, hard_iface,
1172 best_neigh_node);
1173 if (best_neigh_node)
1174 batadv_neigh_node_free_ref(best_neigh_node);
1175 }
1176 rcu_read_unlock();
1177
1178 return false;
1179}
1180
1181static void _batadv_purge_orig(struct batadv_priv *bat_priv)
1182{
1183 struct batadv_hashtable *hash = bat_priv->orig_hash;
1184 struct hlist_node *node_tmp;
1185 struct hlist_head *head;
1186 spinlock_t *list_lock; /* spinlock to protect write access */
1187 struct batadv_orig_node *orig_node;
1188 u32 i;
1189
1190 if (!hash)
1191 return;
1192
1193 /* for all origins... */
1194 for (i = 0; i < hash->size; i++) {
1195 head = &hash->table[i];
1196 list_lock = &hash->list_locks[i];
1197
1198 spin_lock_bh(list_lock);
1199 hlist_for_each_entry_safe(orig_node, node_tmp,
1200 head, hash_entry) {
1201 if (batadv_purge_orig_node(bat_priv, orig_node)) {
1202 batadv_gw_node_delete(bat_priv, orig_node);
1203 hlist_del_rcu(&orig_node->hash_entry);
1204 batadv_tt_global_del_orig(orig_node->bat_priv,
1205 orig_node, -1,
1206 "originator timed out");
1207 batadv_orig_node_free_ref(orig_node);
1208 continue;
1209 }
1210
1211 batadv_frag_purge_orig(orig_node,
1212 batadv_frag_check_entry);
1213 }
1214 spin_unlock_bh(list_lock);
1215 }
1216
1217 batadv_gw_election(bat_priv);
1218}
1219
1220static void batadv_purge_orig(struct work_struct *work)
1221{
1222 struct delayed_work *delayed_work;
1223 struct batadv_priv *bat_priv;
1224
1225 delayed_work = container_of(work, struct delayed_work, work);
1226 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1227 _batadv_purge_orig(bat_priv);
1228 queue_delayed_work(batadv_event_workqueue,
1229 &bat_priv->orig_work,
1230 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1231}
1232
1233void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1234{
1235 _batadv_purge_orig(bat_priv);
1236}
1237
1238int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1239{
1240 struct net_device *net_dev = (struct net_device *)seq->private;
1241 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1242 struct batadv_hard_iface *primary_if;
1243
1244 primary_if = batadv_seq_print_text_primary_if_get(seq);
1245 if (!primary_if)
1246 return 0;
1247
1248 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1249 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1250 primary_if->net_dev->dev_addr, net_dev->name,
1251 bat_priv->bat_algo_ops->name);
1252
1253 batadv_hardif_free_ref(primary_if);
1254
1255 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1256 seq_puts(seq,
1257 "No printing function for this routing protocol\n");
1258 return 0;
1259 }
1260
1261 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1262 BATADV_IF_DEFAULT);
1263
1264 return 0;
1265}
1266
1267/**
1268 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1269 * outgoing interface
1270 * @seq: debugfs table seq_file struct
1271 * @offset: not used
1272 *
1273 * Return: 0
1274 */
1275int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1276{
1277 struct net_device *net_dev = (struct net_device *)seq->private;
1278 struct batadv_hard_iface *hard_iface;
1279 struct batadv_priv *bat_priv;
1280
1281 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1282
1283 if (!hard_iface || !hard_iface->soft_iface) {
1284 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1285 goto out;
1286 }
1287
1288 bat_priv = netdev_priv(hard_iface->soft_iface);
1289 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1290 seq_puts(seq,
1291 "No printing function for this routing protocol\n");
1292 goto out;
1293 }
1294
1295 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1296 seq_puts(seq, "Interface not active\n");
1297 goto out;
1298 }
1299
1300 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1301 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1302 hard_iface->net_dev->dev_addr,
1303 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1304
1305 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1306
1307out:
1308 if (hard_iface)
1309 batadv_hardif_free_ref(hard_iface);
1310 return 0;
1311}
1312
1313int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1314 int max_if_num)
1315{
1316 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1317 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1318 struct batadv_hashtable *hash = bat_priv->orig_hash;
1319 struct hlist_head *head;
1320 struct batadv_orig_node *orig_node;
1321 u32 i;
1322 int ret;
1323
1324 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1325 * if_num
1326 */
1327 for (i = 0; i < hash->size; i++) {
1328 head = &hash->table[i];
1329
1330 rcu_read_lock();
1331 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1332 ret = 0;
1333 if (bao->bat_orig_add_if)
1334 ret = bao->bat_orig_add_if(orig_node,
1335 max_if_num);
1336 if (ret == -ENOMEM)
1337 goto err;
1338 }
1339 rcu_read_unlock();
1340 }
1341
1342 return 0;
1343
1344err:
1345 rcu_read_unlock();
1346 return -ENOMEM;
1347}
1348
1349int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1350 int max_if_num)
1351{
1352 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1353 struct batadv_hashtable *hash = bat_priv->orig_hash;
1354 struct hlist_head *head;
1355 struct batadv_hard_iface *hard_iface_tmp;
1356 struct batadv_orig_node *orig_node;
1357 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1358 u32 i;
1359 int ret;
1360
1361 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1362 * if_num
1363 */
1364 for (i = 0; i < hash->size; i++) {
1365 head = &hash->table[i];
1366
1367 rcu_read_lock();
1368 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1369 ret = 0;
1370 if (bao->bat_orig_del_if)
1371 ret = bao->bat_orig_del_if(orig_node,
1372 max_if_num,
1373 hard_iface->if_num);
1374 if (ret == -ENOMEM)
1375 goto err;
1376 }
1377 rcu_read_unlock();
1378 }
1379
1380 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1381 rcu_read_lock();
1382 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1383 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1384 continue;
1385
1386 if (hard_iface == hard_iface_tmp)
1387 continue;
1388
1389 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1390 continue;
1391
1392 if (hard_iface_tmp->if_num > hard_iface->if_num)
1393 hard_iface_tmp->if_num--;
1394 }
1395 rcu_read_unlock();
1396
1397 hard_iface->if_num = -1;
1398 return 0;
1399
1400err:
1401 rcu_read_unlock();
1402 return -ENOMEM;
1403}
This page took 0.029713 seconds and 5 git commands to generate.