batman-adv: Drop immediate batadv_hardif_neigh_node free function
[deliverable/linux.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "originator.h"
19 #include "main.h"
20
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/rculist.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/workqueue.h>
34
35 #include "distributed-arp-table.h"
36 #include "fragmentation.h"
37 #include "gateway_client.h"
38 #include "hard-interface.h"
39 #include "hash.h"
40 #include "multicast.h"
41 #include "network-coding.h"
42 #include "routing.h"
43 #include "translation-table.h"
44
45 /* hash class keys */
46 static struct lock_class_key batadv_orig_hash_lock_class_key;
47
48 static void batadv_purge_orig(struct work_struct *work);
49
50 /* returns 1 if they are the same originator */
51 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
52 {
53 const void *data1 = container_of(node, struct batadv_orig_node,
54 hash_entry);
55
56 return batadv_compare_eth(data1, data2);
57 }
58
59 /**
60 * batadv_orig_node_vlan_get - get an orig_node_vlan object
61 * @orig_node: the originator serving the VLAN
62 * @vid: the VLAN identifier
63 *
64 * Returns the vlan object identified by vid and belonging to orig_node or NULL
65 * if it does not exist.
66 */
67 struct batadv_orig_node_vlan *
68 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 unsigned short vid)
70 {
71 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72
73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
75 if (tmp->vid != vid)
76 continue;
77
78 if (!atomic_inc_not_zero(&tmp->refcount))
79 continue;
80
81 vlan = tmp;
82
83 break;
84 }
85 rcu_read_unlock();
86
87 return vlan;
88 }
89
90 /**
91 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92 * object
93 * @orig_node: the originator serving the VLAN
94 * @vid: the VLAN identifier
95 *
96 * Returns NULL in case of failure or the vlan object identified by vid and
97 * belonging to orig_node otherwise. The object is created and added to the list
98 * if it does not exist.
99 *
100 * The object is returned with refcounter increased by 1.
101 */
102 struct batadv_orig_node_vlan *
103 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 unsigned short vid)
105 {
106 struct batadv_orig_node_vlan *vlan;
107
108 spin_lock_bh(&orig_node->vlan_list_lock);
109
110 /* first look if an object for this vid already exists */
111 vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 if (vlan)
113 goto out;
114
115 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 if (!vlan)
117 goto out;
118
119 atomic_set(&vlan->refcount, 2);
120 vlan->vid = vid;
121
122 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
123
124 out:
125 spin_unlock_bh(&orig_node->vlan_list_lock);
126
127 return vlan;
128 }
129
130 /**
131 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132 * the originator-vlan object
133 * @orig_vlan: the originator-vlan object to release
134 */
135 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136 {
137 if (atomic_dec_and_test(&orig_vlan->refcount))
138 kfree_rcu(orig_vlan, rcu);
139 }
140
141 int batadv_originator_init(struct batadv_priv *bat_priv)
142 {
143 if (bat_priv->orig_hash)
144 return 0;
145
146 bat_priv->orig_hash = batadv_hash_new(1024);
147
148 if (!bat_priv->orig_hash)
149 goto err;
150
151 batadv_hash_set_lock_class(bat_priv->orig_hash,
152 &batadv_orig_hash_lock_class_key);
153
154 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 queue_delayed_work(batadv_event_workqueue,
156 &bat_priv->orig_work,
157 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158
159 return 0;
160
161 err:
162 return -ENOMEM;
163 }
164
165 /**
166 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
167 * @rcu: rcu pointer of the neigh_ifinfo object
168 */
169 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
170 {
171 struct batadv_neigh_ifinfo *neigh_ifinfo;
172
173 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
174
175 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
176 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
177
178 kfree(neigh_ifinfo);
179 }
180
181 /**
182 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
183 * the neigh_ifinfo (without rcu callback)
184 * @neigh_ifinfo: the neigh_ifinfo object to release
185 */
186 static void
187 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
188 {
189 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
190 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
191 }
192
193 /**
194 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
195 * the neigh_ifinfo
196 * @neigh_ifinfo: the neigh_ifinfo object to release
197 */
198 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
199 {
200 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
201 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
202 }
203
204 /**
205 * batadv_hardif_neigh_release - release hardif neigh node from lists and
206 * queue for free after rcu grace period
207 * @hardif_neigh: hardif neigh neighbor to free
208 */
209 static void
210 batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh)
211 {
212 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
213 hlist_del_init_rcu(&hardif_neigh->list);
214 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
215
216 batadv_hardif_free_ref(hardif_neigh->if_incoming);
217 kfree_rcu(hardif_neigh, rcu);
218 }
219
220 /**
221 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
222 * and possibly release it
223 * @hardif_neigh: hardif neigh neighbor to free
224 */
225 void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
226 {
227 if (atomic_dec_and_test(&hardif_neigh->refcount))
228 batadv_hardif_neigh_release(hardif_neigh);
229 }
230
231 /**
232 * batadv_neigh_node_free_rcu - free the neigh_node
233 * @rcu: rcu pointer of the neigh_node
234 */
235 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
236 {
237 struct hlist_node *node_tmp;
238 struct batadv_neigh_node *neigh_node;
239 struct batadv_hardif_neigh_node *hardif_neigh;
240 struct batadv_neigh_ifinfo *neigh_ifinfo;
241 struct batadv_algo_ops *bao;
242
243 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
244 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
245
246 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
247 &neigh_node->ifinfo_list, list) {
248 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
249 }
250
251 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
252 neigh_node->addr);
253 if (hardif_neigh) {
254 /* batadv_hardif_neigh_get() increases refcount too */
255 batadv_hardif_neigh_free_ref(hardif_neigh);
256 batadv_hardif_neigh_free_ref(hardif_neigh);
257 }
258
259 if (bao->bat_neigh_free)
260 bao->bat_neigh_free(neigh_node);
261
262 batadv_hardif_free_ref_now(neigh_node->if_incoming);
263
264 kfree(neigh_node);
265 }
266
267 /**
268 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
269 * and possibly release it
270 * @neigh_node: neigh neighbor to free
271 */
272 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
273 {
274 if (atomic_dec_and_test(&neigh_node->refcount))
275 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
276 }
277
278 /**
279 * batadv_orig_node_get_router - router to the originator depending on iface
280 * @orig_node: the orig node for the router
281 * @if_outgoing: the interface where the payload packet has been received or
282 * the OGM should be sent to
283 *
284 * Returns the neighbor which should be router for this orig_node/iface.
285 *
286 * The object is returned with refcounter increased by 1.
287 */
288 struct batadv_neigh_node *
289 batadv_orig_router_get(struct batadv_orig_node *orig_node,
290 const struct batadv_hard_iface *if_outgoing)
291 {
292 struct batadv_orig_ifinfo *orig_ifinfo;
293 struct batadv_neigh_node *router = NULL;
294
295 rcu_read_lock();
296 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
297 if (orig_ifinfo->if_outgoing != if_outgoing)
298 continue;
299
300 router = rcu_dereference(orig_ifinfo->router);
301 break;
302 }
303
304 if (router && !atomic_inc_not_zero(&router->refcount))
305 router = NULL;
306
307 rcu_read_unlock();
308 return router;
309 }
310
311 /**
312 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
313 * @orig_node: the orig node to be queried
314 * @if_outgoing: the interface for which the ifinfo should be acquired
315 *
316 * Returns the requested orig_ifinfo or NULL if not found.
317 *
318 * The object is returned with refcounter increased by 1.
319 */
320 struct batadv_orig_ifinfo *
321 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
322 struct batadv_hard_iface *if_outgoing)
323 {
324 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
325
326 rcu_read_lock();
327 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
328 list) {
329 if (tmp->if_outgoing != if_outgoing)
330 continue;
331
332 if (!atomic_inc_not_zero(&tmp->refcount))
333 continue;
334
335 orig_ifinfo = tmp;
336 break;
337 }
338 rcu_read_unlock();
339
340 return orig_ifinfo;
341 }
342
343 /**
344 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
345 * @orig_node: the orig node to be queried
346 * @if_outgoing: the interface for which the ifinfo should be acquired
347 *
348 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
349 * interface otherwise. The object is created and added to the list
350 * if it does not exist.
351 *
352 * The object is returned with refcounter increased by 1.
353 */
354 struct batadv_orig_ifinfo *
355 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
356 struct batadv_hard_iface *if_outgoing)
357 {
358 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
359 unsigned long reset_time;
360
361 spin_lock_bh(&orig_node->neigh_list_lock);
362
363 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
364 if (orig_ifinfo)
365 goto out;
366
367 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
368 if (!orig_ifinfo)
369 goto out;
370
371 if (if_outgoing != BATADV_IF_DEFAULT &&
372 !atomic_inc_not_zero(&if_outgoing->refcount)) {
373 kfree(orig_ifinfo);
374 orig_ifinfo = NULL;
375 goto out;
376 }
377
378 reset_time = jiffies - 1;
379 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
380 orig_ifinfo->batman_seqno_reset = reset_time;
381 orig_ifinfo->if_outgoing = if_outgoing;
382 INIT_HLIST_NODE(&orig_ifinfo->list);
383 atomic_set(&orig_ifinfo->refcount, 2);
384 hlist_add_head_rcu(&orig_ifinfo->list,
385 &orig_node->ifinfo_list);
386 out:
387 spin_unlock_bh(&orig_node->neigh_list_lock);
388 return orig_ifinfo;
389 }
390
391 /**
392 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
393 * @neigh_node: the neigh node to be queried
394 * @if_outgoing: the interface for which the ifinfo should be acquired
395 *
396 * The object is returned with refcounter increased by 1.
397 *
398 * Returns the requested neigh_ifinfo or NULL if not found
399 */
400 struct batadv_neigh_ifinfo *
401 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
402 struct batadv_hard_iface *if_outgoing)
403 {
404 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
405 *tmp_neigh_ifinfo;
406
407 rcu_read_lock();
408 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
409 list) {
410 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
411 continue;
412
413 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
414 continue;
415
416 neigh_ifinfo = tmp_neigh_ifinfo;
417 break;
418 }
419 rcu_read_unlock();
420
421 return neigh_ifinfo;
422 }
423
424 /**
425 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
426 * @neigh_node: the neigh node to be queried
427 * @if_outgoing: the interface for which the ifinfo should be acquired
428 *
429 * Returns NULL in case of failure or the neigh_ifinfo object for the
430 * if_outgoing interface otherwise. The object is created and added to the list
431 * if it does not exist.
432 *
433 * The object is returned with refcounter increased by 1.
434 */
435 struct batadv_neigh_ifinfo *
436 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
437 struct batadv_hard_iface *if_outgoing)
438 {
439 struct batadv_neigh_ifinfo *neigh_ifinfo;
440
441 spin_lock_bh(&neigh->ifinfo_lock);
442
443 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
444 if (neigh_ifinfo)
445 goto out;
446
447 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
448 if (!neigh_ifinfo)
449 goto out;
450
451 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
452 kfree(neigh_ifinfo);
453 neigh_ifinfo = NULL;
454 goto out;
455 }
456
457 INIT_HLIST_NODE(&neigh_ifinfo->list);
458 atomic_set(&neigh_ifinfo->refcount, 2);
459 neigh_ifinfo->if_outgoing = if_outgoing;
460
461 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
462
463 out:
464 spin_unlock_bh(&neigh->ifinfo_lock);
465
466 return neigh_ifinfo;
467 }
468
469 /**
470 * batadv_neigh_node_get - retrieve a neighbour from the list
471 * @orig_node: originator which the neighbour belongs to
472 * @hard_iface: the interface where this neighbour is connected to
473 * @addr: the address of the neighbour
474 *
475 * Looks for and possibly returns a neighbour belonging to this originator list
476 * which is connected through the provided hard interface.
477 * Returns NULL if the neighbour is not found.
478 */
479 static struct batadv_neigh_node *
480 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
481 const struct batadv_hard_iface *hard_iface,
482 const u8 *addr)
483 {
484 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
485
486 rcu_read_lock();
487 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
488 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
489 continue;
490
491 if (tmp_neigh_node->if_incoming != hard_iface)
492 continue;
493
494 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
495 continue;
496
497 res = tmp_neigh_node;
498 break;
499 }
500 rcu_read_unlock();
501
502 return res;
503 }
504
505 /**
506 * batadv_hardif_neigh_create - create a hardif neighbour node
507 * @hard_iface: the interface this neighbour is connected to
508 * @neigh_addr: the interface address of the neighbour to retrieve
509 *
510 * Returns the hardif neighbour node if found or created or NULL otherwise.
511 */
512 static struct batadv_hardif_neigh_node *
513 batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
514 const u8 *neigh_addr)
515 {
516 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
517 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
518
519 spin_lock_bh(&hard_iface->neigh_list_lock);
520
521 /* check if neighbor hasn't been added in the meantime */
522 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
523 if (hardif_neigh)
524 goto out;
525
526 if (!atomic_inc_not_zero(&hard_iface->refcount))
527 goto out;
528
529 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
530 if (!hardif_neigh) {
531 batadv_hardif_free_ref(hard_iface);
532 goto out;
533 }
534
535 INIT_HLIST_NODE(&hardif_neigh->list);
536 ether_addr_copy(hardif_neigh->addr, neigh_addr);
537 hardif_neigh->if_incoming = hard_iface;
538 hardif_neigh->last_seen = jiffies;
539
540 atomic_set(&hardif_neigh->refcount, 1);
541
542 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
543 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
544
545 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
546
547 out:
548 spin_unlock_bh(&hard_iface->neigh_list_lock);
549 return hardif_neigh;
550 }
551
552 /**
553 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
554 * node
555 * @hard_iface: the interface this neighbour is connected to
556 * @neigh_addr: the interface address of the neighbour to retrieve
557 *
558 * Returns the hardif neighbour node if found or created or NULL otherwise.
559 */
560 static struct batadv_hardif_neigh_node *
561 batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
562 const u8 *neigh_addr)
563 {
564 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
565
566 /* first check without locking to avoid the overhead */
567 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
568 if (hardif_neigh)
569 return hardif_neigh;
570
571 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
572 }
573
574 /**
575 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
576 * @hard_iface: the interface where this neighbour is connected to
577 * @neigh_addr: the address of the neighbour
578 *
579 * Looks for and possibly returns a neighbour belonging to this hard interface.
580 * Returns NULL if the neighbour is not found.
581 */
582 struct batadv_hardif_neigh_node *
583 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
584 const u8 *neigh_addr)
585 {
586 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
587
588 rcu_read_lock();
589 hlist_for_each_entry_rcu(tmp_hardif_neigh,
590 &hard_iface->neigh_list, list) {
591 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
592 continue;
593
594 if (!atomic_inc_not_zero(&tmp_hardif_neigh->refcount))
595 continue;
596
597 hardif_neigh = tmp_hardif_neigh;
598 break;
599 }
600 rcu_read_unlock();
601
602 return hardif_neigh;
603 }
604
605 /**
606 * batadv_neigh_node_new - create and init a new neigh_node object
607 * @orig_node: originator object representing the neighbour
608 * @hard_iface: the interface where the neighbour is connected to
609 * @neigh_addr: the mac address of the neighbour interface
610 *
611 * Allocates a new neigh_node object and initialises all the generic fields.
612 * Returns the new object or NULL on failure.
613 */
614 struct batadv_neigh_node *
615 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
616 struct batadv_hard_iface *hard_iface,
617 const u8 *neigh_addr)
618 {
619 struct batadv_neigh_node *neigh_node;
620 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
621
622 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
623 if (neigh_node)
624 goto out;
625
626 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
627 neigh_addr);
628 if (!hardif_neigh)
629 goto out;
630
631 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
632 if (!neigh_node)
633 goto out;
634
635 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
636 kfree(neigh_node);
637 neigh_node = NULL;
638 goto out;
639 }
640
641 INIT_HLIST_NODE(&neigh_node->list);
642 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
643 spin_lock_init(&neigh_node->ifinfo_lock);
644
645 ether_addr_copy(neigh_node->addr, neigh_addr);
646 neigh_node->if_incoming = hard_iface;
647 neigh_node->orig_node = orig_node;
648
649 /* extra reference for return */
650 atomic_set(&neigh_node->refcount, 2);
651
652 spin_lock_bh(&orig_node->neigh_list_lock);
653 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
654 spin_unlock_bh(&orig_node->neigh_list_lock);
655
656 /* increment unique neighbor refcount */
657 atomic_inc(&hardif_neigh->refcount);
658
659 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
660 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
661 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
662
663 out:
664 if (hardif_neigh)
665 batadv_hardif_neigh_free_ref(hardif_neigh);
666 return neigh_node;
667 }
668
669 /**
670 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
671 * @seq: neighbour table seq_file struct
672 * @offset: not used
673 *
674 * Always returns 0.
675 */
676 int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
677 {
678 struct net_device *net_dev = (struct net_device *)seq->private;
679 struct batadv_priv *bat_priv = netdev_priv(net_dev);
680 struct batadv_hard_iface *primary_if;
681
682 primary_if = batadv_seq_print_text_primary_if_get(seq);
683 if (!primary_if)
684 return 0;
685
686 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
687 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
688 primary_if->net_dev->dev_addr, net_dev->name,
689 bat_priv->bat_algo_ops->name);
690
691 batadv_hardif_free_ref(primary_if);
692
693 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
694 seq_puts(seq,
695 "No printing function for this routing protocol\n");
696 return 0;
697 }
698
699 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
700 return 0;
701 }
702
703 /**
704 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
705 * free after rcu grace period
706 * @orig_ifinfo: the orig_ifinfo object to release
707 */
708 static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
709 {
710 struct batadv_neigh_node *router;
711
712 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
713 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
714
715 /* this is the last reference to this object */
716 router = rcu_dereference_protected(orig_ifinfo->router, true);
717 if (router)
718 batadv_neigh_node_free_ref(router);
719
720 kfree_rcu(orig_ifinfo, rcu);
721 }
722
723 /**
724 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
725 * the orig_ifinfo
726 * @orig_ifinfo: the orig_ifinfo object to release
727 */
728 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
729 {
730 if (atomic_dec_and_test(&orig_ifinfo->refcount))
731 batadv_orig_ifinfo_release(orig_ifinfo);
732 }
733
734 /**
735 * batadv_orig_node_free_rcu - free the orig_node
736 * @rcu: rcu pointer of the orig_node
737 */
738 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
739 {
740 struct batadv_orig_node *orig_node;
741
742 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
743
744 batadv_mcast_purge_orig(orig_node);
745
746 batadv_frag_purge_orig(orig_node, NULL);
747
748 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
749 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
750
751 kfree(orig_node->tt_buff);
752 kfree(orig_node);
753 }
754
755 /**
756 * batadv_orig_node_release - release orig_node from lists and queue for
757 * free after rcu grace period
758 * @orig_node: the orig node to free
759 */
760 static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
761 {
762 struct hlist_node *node_tmp;
763 struct batadv_neigh_node *neigh_node;
764 struct batadv_orig_ifinfo *orig_ifinfo;
765
766 spin_lock_bh(&orig_node->neigh_list_lock);
767
768 /* for all neighbors towards this originator ... */
769 hlist_for_each_entry_safe(neigh_node, node_tmp,
770 &orig_node->neigh_list, list) {
771 hlist_del_rcu(&neigh_node->list);
772 batadv_neigh_node_free_ref(neigh_node);
773 }
774
775 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
776 &orig_node->ifinfo_list, list) {
777 hlist_del_rcu(&orig_ifinfo->list);
778 batadv_orig_ifinfo_free_ref(orig_ifinfo);
779 }
780 spin_unlock_bh(&orig_node->neigh_list_lock);
781
782 /* Free nc_nodes */
783 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
784
785 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
786 }
787
788 /**
789 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
790 * release it
791 * @orig_node: the orig node to free
792 */
793 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
794 {
795 if (atomic_dec_and_test(&orig_node->refcount))
796 batadv_orig_node_release(orig_node);
797 }
798
799 /**
800 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
801 * possibly free it (without rcu callback)
802 * @orig_node: the orig node to free
803 */
804 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
805 {
806 if (atomic_dec_and_test(&orig_node->refcount))
807 batadv_orig_node_free_rcu(&orig_node->rcu);
808 }
809
810 void batadv_originator_free(struct batadv_priv *bat_priv)
811 {
812 struct batadv_hashtable *hash = bat_priv->orig_hash;
813 struct hlist_node *node_tmp;
814 struct hlist_head *head;
815 spinlock_t *list_lock; /* spinlock to protect write access */
816 struct batadv_orig_node *orig_node;
817 u32 i;
818
819 if (!hash)
820 return;
821
822 cancel_delayed_work_sync(&bat_priv->orig_work);
823
824 bat_priv->orig_hash = NULL;
825
826 for (i = 0; i < hash->size; i++) {
827 head = &hash->table[i];
828 list_lock = &hash->list_locks[i];
829
830 spin_lock_bh(list_lock);
831 hlist_for_each_entry_safe(orig_node, node_tmp,
832 head, hash_entry) {
833 hlist_del_rcu(&orig_node->hash_entry);
834 batadv_orig_node_free_ref(orig_node);
835 }
836 spin_unlock_bh(list_lock);
837 }
838
839 batadv_hash_destroy(hash);
840 }
841
842 /**
843 * batadv_orig_node_new - creates a new orig_node
844 * @bat_priv: the bat priv with all the soft interface information
845 * @addr: the mac address of the originator
846 *
847 * Creates a new originator object and initialise all the generic fields.
848 * The new object is not added to the originator list.
849 * Returns the newly created object or NULL on failure.
850 */
851 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
852 const u8 *addr)
853 {
854 struct batadv_orig_node *orig_node;
855 struct batadv_orig_node_vlan *vlan;
856 unsigned long reset_time;
857 int i;
858
859 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
860 "Creating new originator: %pM\n", addr);
861
862 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
863 if (!orig_node)
864 return NULL;
865
866 INIT_HLIST_HEAD(&orig_node->neigh_list);
867 INIT_HLIST_HEAD(&orig_node->vlan_list);
868 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
869 spin_lock_init(&orig_node->bcast_seqno_lock);
870 spin_lock_init(&orig_node->neigh_list_lock);
871 spin_lock_init(&orig_node->tt_buff_lock);
872 spin_lock_init(&orig_node->tt_lock);
873 spin_lock_init(&orig_node->vlan_list_lock);
874
875 batadv_nc_init_orig(orig_node);
876
877 /* extra reference for return */
878 atomic_set(&orig_node->refcount, 2);
879
880 orig_node->bat_priv = bat_priv;
881 ether_addr_copy(orig_node->orig, addr);
882 batadv_dat_init_orig_node_addr(orig_node);
883 atomic_set(&orig_node->last_ttvn, 0);
884 orig_node->tt_buff = NULL;
885 orig_node->tt_buff_len = 0;
886 orig_node->last_seen = jiffies;
887 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
888 orig_node->bcast_seqno_reset = reset_time;
889
890 #ifdef CONFIG_BATMAN_ADV_MCAST
891 orig_node->mcast_flags = BATADV_NO_FLAGS;
892 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
893 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
894 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
895 spin_lock_init(&orig_node->mcast_handler_lock);
896 #endif
897
898 /* create a vlan object for the "untagged" LAN */
899 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
900 if (!vlan)
901 goto free_orig_node;
902 /* batadv_orig_node_vlan_new() increases the refcounter.
903 * Immediately release vlan since it is not needed anymore in this
904 * context
905 */
906 batadv_orig_node_vlan_free_ref(vlan);
907
908 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
909 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
910 spin_lock_init(&orig_node->fragments[i].lock);
911 orig_node->fragments[i].size = 0;
912 }
913
914 return orig_node;
915 free_orig_node:
916 kfree(orig_node);
917 return NULL;
918 }
919
920 /**
921 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
922 * @bat_priv: the bat priv with all the soft interface information
923 * @neigh: orig node which is to be checked
924 */
925 static void
926 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
927 struct batadv_neigh_node *neigh)
928 {
929 struct batadv_neigh_ifinfo *neigh_ifinfo;
930 struct batadv_hard_iface *if_outgoing;
931 struct hlist_node *node_tmp;
932
933 spin_lock_bh(&neigh->ifinfo_lock);
934
935 /* for all ifinfo objects for this neighinator */
936 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
937 &neigh->ifinfo_list, list) {
938 if_outgoing = neigh_ifinfo->if_outgoing;
939
940 /* always keep the default interface */
941 if (if_outgoing == BATADV_IF_DEFAULT)
942 continue;
943
944 /* don't purge if the interface is not (going) down */
945 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
946 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
947 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
948 continue;
949
950 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
951 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
952 neigh->addr, if_outgoing->net_dev->name);
953
954 hlist_del_rcu(&neigh_ifinfo->list);
955 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
956 }
957
958 spin_unlock_bh(&neigh->ifinfo_lock);
959 }
960
961 /**
962 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
963 * @bat_priv: the bat priv with all the soft interface information
964 * @orig_node: orig node which is to be checked
965 *
966 * Returns true if any ifinfo entry was purged, false otherwise.
967 */
968 static bool
969 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
970 struct batadv_orig_node *orig_node)
971 {
972 struct batadv_orig_ifinfo *orig_ifinfo;
973 struct batadv_hard_iface *if_outgoing;
974 struct hlist_node *node_tmp;
975 bool ifinfo_purged = false;
976
977 spin_lock_bh(&orig_node->neigh_list_lock);
978
979 /* for all ifinfo objects for this originator */
980 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
981 &orig_node->ifinfo_list, list) {
982 if_outgoing = orig_ifinfo->if_outgoing;
983
984 /* always keep the default interface */
985 if (if_outgoing == BATADV_IF_DEFAULT)
986 continue;
987
988 /* don't purge if the interface is not (going) down */
989 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
990 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
991 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
992 continue;
993
994 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
995 "router/ifinfo purge: originator %pM, iface: %s\n",
996 orig_node->orig, if_outgoing->net_dev->name);
997
998 ifinfo_purged = true;
999
1000 hlist_del_rcu(&orig_ifinfo->list);
1001 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1002 if (orig_node->last_bonding_candidate == orig_ifinfo) {
1003 orig_node->last_bonding_candidate = NULL;
1004 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1005 }
1006 }
1007
1008 spin_unlock_bh(&orig_node->neigh_list_lock);
1009
1010 return ifinfo_purged;
1011 }
1012
1013 /**
1014 * batadv_purge_orig_neighbors - purges neighbors from originator
1015 * @bat_priv: the bat priv with all the soft interface information
1016 * @orig_node: orig node which is to be checked
1017 *
1018 * Returns true if any neighbor was purged, false otherwise
1019 */
1020 static bool
1021 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
1022 struct batadv_orig_node *orig_node)
1023 {
1024 struct hlist_node *node_tmp;
1025 struct batadv_neigh_node *neigh_node;
1026 bool neigh_purged = false;
1027 unsigned long last_seen;
1028 struct batadv_hard_iface *if_incoming;
1029
1030 spin_lock_bh(&orig_node->neigh_list_lock);
1031
1032 /* for all neighbors towards this originator ... */
1033 hlist_for_each_entry_safe(neigh_node, node_tmp,
1034 &orig_node->neigh_list, list) {
1035 last_seen = neigh_node->last_seen;
1036 if_incoming = neigh_node->if_incoming;
1037
1038 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
1039 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1040 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1041 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
1042 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1043 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1044 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
1045 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1046 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1047 orig_node->orig, neigh_node->addr,
1048 if_incoming->net_dev->name);
1049 else
1050 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1051 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1052 orig_node->orig, neigh_node->addr,
1053 jiffies_to_msecs(last_seen));
1054
1055 neigh_purged = true;
1056
1057 hlist_del_rcu(&neigh_node->list);
1058 batadv_neigh_node_free_ref(neigh_node);
1059 } else {
1060 /* only necessary if not the whole neighbor is to be
1061 * deleted, but some interface has been removed.
1062 */
1063 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
1064 }
1065 }
1066
1067 spin_unlock_bh(&orig_node->neigh_list_lock);
1068 return neigh_purged;
1069 }
1070
1071 /**
1072 * batadv_find_best_neighbor - finds the best neighbor after purging
1073 * @bat_priv: the bat priv with all the soft interface information
1074 * @orig_node: orig node which is to be checked
1075 * @if_outgoing: the interface for which the metric should be compared
1076 *
1077 * Returns the current best neighbor, with refcount increased.
1078 */
1079 static struct batadv_neigh_node *
1080 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1081 struct batadv_orig_node *orig_node,
1082 struct batadv_hard_iface *if_outgoing)
1083 {
1084 struct batadv_neigh_node *best = NULL, *neigh;
1085 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1086
1087 rcu_read_lock();
1088 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1089 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1090 best, if_outgoing) <= 0))
1091 continue;
1092
1093 if (!atomic_inc_not_zero(&neigh->refcount))
1094 continue;
1095
1096 if (best)
1097 batadv_neigh_node_free_ref(best);
1098
1099 best = neigh;
1100 }
1101 rcu_read_unlock();
1102
1103 return best;
1104 }
1105
1106 /**
1107 * batadv_purge_orig_node - purges obsolete information from an orig_node
1108 * @bat_priv: the bat priv with all the soft interface information
1109 * @orig_node: orig node which is to be checked
1110 *
1111 * This function checks if the orig_node or substructures of it have become
1112 * obsolete, and purges this information if that's the case.
1113 *
1114 * Returns true if the orig_node is to be removed, false otherwise.
1115 */
1116 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1117 struct batadv_orig_node *orig_node)
1118 {
1119 struct batadv_neigh_node *best_neigh_node;
1120 struct batadv_hard_iface *hard_iface;
1121 bool changed_ifinfo, changed_neigh;
1122
1123 if (batadv_has_timed_out(orig_node->last_seen,
1124 2 * BATADV_PURGE_TIMEOUT)) {
1125 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1126 "Originator timeout: originator %pM, last_seen %u\n",
1127 orig_node->orig,
1128 jiffies_to_msecs(orig_node->last_seen));
1129 return true;
1130 }
1131 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1132 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
1133
1134 if (!changed_ifinfo && !changed_neigh)
1135 return false;
1136
1137 /* first for NULL ... */
1138 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1139 BATADV_IF_DEFAULT);
1140 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1141 best_neigh_node);
1142 if (best_neigh_node)
1143 batadv_neigh_node_free_ref(best_neigh_node);
1144
1145 /* ... then for all other interfaces. */
1146 rcu_read_lock();
1147 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1148 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1149 continue;
1150
1151 if (hard_iface->soft_iface != bat_priv->soft_iface)
1152 continue;
1153
1154 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1155 orig_node,
1156 hard_iface);
1157 batadv_update_route(bat_priv, orig_node, hard_iface,
1158 best_neigh_node);
1159 if (best_neigh_node)
1160 batadv_neigh_node_free_ref(best_neigh_node);
1161 }
1162 rcu_read_unlock();
1163
1164 return false;
1165 }
1166
1167 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
1168 {
1169 struct batadv_hashtable *hash = bat_priv->orig_hash;
1170 struct hlist_node *node_tmp;
1171 struct hlist_head *head;
1172 spinlock_t *list_lock; /* spinlock to protect write access */
1173 struct batadv_orig_node *orig_node;
1174 u32 i;
1175
1176 if (!hash)
1177 return;
1178
1179 /* for all origins... */
1180 for (i = 0; i < hash->size; i++) {
1181 head = &hash->table[i];
1182 list_lock = &hash->list_locks[i];
1183
1184 spin_lock_bh(list_lock);
1185 hlist_for_each_entry_safe(orig_node, node_tmp,
1186 head, hash_entry) {
1187 if (batadv_purge_orig_node(bat_priv, orig_node)) {
1188 batadv_gw_node_delete(bat_priv, orig_node);
1189 hlist_del_rcu(&orig_node->hash_entry);
1190 batadv_tt_global_del_orig(orig_node->bat_priv,
1191 orig_node, -1,
1192 "originator timed out");
1193 batadv_orig_node_free_ref(orig_node);
1194 continue;
1195 }
1196
1197 batadv_frag_purge_orig(orig_node,
1198 batadv_frag_check_entry);
1199 }
1200 spin_unlock_bh(list_lock);
1201 }
1202
1203 batadv_gw_election(bat_priv);
1204 }
1205
1206 static void batadv_purge_orig(struct work_struct *work)
1207 {
1208 struct delayed_work *delayed_work;
1209 struct batadv_priv *bat_priv;
1210
1211 delayed_work = container_of(work, struct delayed_work, work);
1212 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1213 _batadv_purge_orig(bat_priv);
1214 queue_delayed_work(batadv_event_workqueue,
1215 &bat_priv->orig_work,
1216 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1217 }
1218
1219 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1220 {
1221 _batadv_purge_orig(bat_priv);
1222 }
1223
1224 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1225 {
1226 struct net_device *net_dev = (struct net_device *)seq->private;
1227 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1228 struct batadv_hard_iface *primary_if;
1229
1230 primary_if = batadv_seq_print_text_primary_if_get(seq);
1231 if (!primary_if)
1232 return 0;
1233
1234 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1235 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1236 primary_if->net_dev->dev_addr, net_dev->name,
1237 bat_priv->bat_algo_ops->name);
1238
1239 batadv_hardif_free_ref(primary_if);
1240
1241 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1242 seq_puts(seq,
1243 "No printing function for this routing protocol\n");
1244 return 0;
1245 }
1246
1247 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1248 BATADV_IF_DEFAULT);
1249
1250 return 0;
1251 }
1252
1253 /**
1254 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1255 * outgoing interface
1256 * @seq: debugfs table seq_file struct
1257 * @offset: not used
1258 *
1259 * Returns 0
1260 */
1261 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1262 {
1263 struct net_device *net_dev = (struct net_device *)seq->private;
1264 struct batadv_hard_iface *hard_iface;
1265 struct batadv_priv *bat_priv;
1266
1267 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1268
1269 if (!hard_iface || !hard_iface->soft_iface) {
1270 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1271 goto out;
1272 }
1273
1274 bat_priv = netdev_priv(hard_iface->soft_iface);
1275 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1276 seq_puts(seq,
1277 "No printing function for this routing protocol\n");
1278 goto out;
1279 }
1280
1281 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1282 seq_puts(seq, "Interface not active\n");
1283 goto out;
1284 }
1285
1286 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1287 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1288 hard_iface->net_dev->dev_addr,
1289 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1290
1291 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1292
1293 out:
1294 if (hard_iface)
1295 batadv_hardif_free_ref(hard_iface);
1296 return 0;
1297 }
1298
1299 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1300 int max_if_num)
1301 {
1302 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1303 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1304 struct batadv_hashtable *hash = bat_priv->orig_hash;
1305 struct hlist_head *head;
1306 struct batadv_orig_node *orig_node;
1307 u32 i;
1308 int ret;
1309
1310 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1311 * if_num
1312 */
1313 for (i = 0; i < hash->size; i++) {
1314 head = &hash->table[i];
1315
1316 rcu_read_lock();
1317 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1318 ret = 0;
1319 if (bao->bat_orig_add_if)
1320 ret = bao->bat_orig_add_if(orig_node,
1321 max_if_num);
1322 if (ret == -ENOMEM)
1323 goto err;
1324 }
1325 rcu_read_unlock();
1326 }
1327
1328 return 0;
1329
1330 err:
1331 rcu_read_unlock();
1332 return -ENOMEM;
1333 }
1334
1335 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1336 int max_if_num)
1337 {
1338 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1339 struct batadv_hashtable *hash = bat_priv->orig_hash;
1340 struct hlist_head *head;
1341 struct batadv_hard_iface *hard_iface_tmp;
1342 struct batadv_orig_node *orig_node;
1343 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1344 u32 i;
1345 int ret;
1346
1347 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1348 * if_num
1349 */
1350 for (i = 0; i < hash->size; i++) {
1351 head = &hash->table[i];
1352
1353 rcu_read_lock();
1354 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1355 ret = 0;
1356 if (bao->bat_orig_del_if)
1357 ret = bao->bat_orig_del_if(orig_node,
1358 max_if_num,
1359 hard_iface->if_num);
1360 if (ret == -ENOMEM)
1361 goto err;
1362 }
1363 rcu_read_unlock();
1364 }
1365
1366 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1367 rcu_read_lock();
1368 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1369 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1370 continue;
1371
1372 if (hard_iface == hard_iface_tmp)
1373 continue;
1374
1375 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1376 continue;
1377
1378 if (hard_iface_tmp->if_num > hard_iface->if_num)
1379 hard_iface_tmp->if_num--;
1380 }
1381 rcu_read_unlock();
1382
1383 hard_iface->if_num = -1;
1384 return 0;
1385
1386 err:
1387 rcu_read_unlock();
1388 return -ENOMEM;
1389 }
This page took 0.061077 seconds and 6 git commands to generate.