batman-adv: remove obsolete deleted attribute for gateway node
[deliverable/linux.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "originator.h"
19 #include "main.h"
20
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/rculist.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/workqueue.h>
34
35 #include "distributed-arp-table.h"
36 #include "fragmentation.h"
37 #include "gateway_client.h"
38 #include "hard-interface.h"
39 #include "hash.h"
40 #include "multicast.h"
41 #include "network-coding.h"
42 #include "routing.h"
43 #include "translation-table.h"
44
45 /* hash class keys */
46 static struct lock_class_key batadv_orig_hash_lock_class_key;
47
48 static void batadv_purge_orig(struct work_struct *work);
49
50 /* returns 1 if they are the same originator */
51 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
52 {
53 const void *data1 = container_of(node, struct batadv_orig_node,
54 hash_entry);
55
56 return batadv_compare_eth(data1, data2);
57 }
58
59 /**
60 * batadv_orig_node_vlan_get - get an orig_node_vlan object
61 * @orig_node: the originator serving the VLAN
62 * @vid: the VLAN identifier
63 *
64 * Returns the vlan object identified by vid and belonging to orig_node or NULL
65 * if it does not exist.
66 */
67 struct batadv_orig_node_vlan *
68 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 unsigned short vid)
70 {
71 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72
73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
75 if (tmp->vid != vid)
76 continue;
77
78 if (!atomic_inc_not_zero(&tmp->refcount))
79 continue;
80
81 vlan = tmp;
82
83 break;
84 }
85 rcu_read_unlock();
86
87 return vlan;
88 }
89
90 /**
91 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92 * object
93 * @orig_node: the originator serving the VLAN
94 * @vid: the VLAN identifier
95 *
96 * Returns NULL in case of failure or the vlan object identified by vid and
97 * belonging to orig_node otherwise. The object is created and added to the list
98 * if it does not exist.
99 *
100 * The object is returned with refcounter increased by 1.
101 */
102 struct batadv_orig_node_vlan *
103 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 unsigned short vid)
105 {
106 struct batadv_orig_node_vlan *vlan;
107
108 spin_lock_bh(&orig_node->vlan_list_lock);
109
110 /* first look if an object for this vid already exists */
111 vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 if (vlan)
113 goto out;
114
115 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 if (!vlan)
117 goto out;
118
119 atomic_set(&vlan->refcount, 2);
120 vlan->vid = vid;
121
122 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
123
124 out:
125 spin_unlock_bh(&orig_node->vlan_list_lock);
126
127 return vlan;
128 }
129
130 /**
131 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132 * the originator-vlan object
133 * @orig_vlan: the originator-vlan object to release
134 */
135 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136 {
137 if (atomic_dec_and_test(&orig_vlan->refcount))
138 kfree_rcu(orig_vlan, rcu);
139 }
140
141 int batadv_originator_init(struct batadv_priv *bat_priv)
142 {
143 if (bat_priv->orig_hash)
144 return 0;
145
146 bat_priv->orig_hash = batadv_hash_new(1024);
147
148 if (!bat_priv->orig_hash)
149 goto err;
150
151 batadv_hash_set_lock_class(bat_priv->orig_hash,
152 &batadv_orig_hash_lock_class_key);
153
154 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 queue_delayed_work(batadv_event_workqueue,
156 &bat_priv->orig_work,
157 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158
159 return 0;
160
161 err:
162 return -ENOMEM;
163 }
164
165 /**
166 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
167 * @rcu: rcu pointer of the neigh_ifinfo object
168 */
169 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
170 {
171 struct batadv_neigh_ifinfo *neigh_ifinfo;
172
173 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
174
175 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
176 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
177
178 kfree(neigh_ifinfo);
179 }
180
181 /**
182 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
183 * the neigh_ifinfo (without rcu callback)
184 * @neigh_ifinfo: the neigh_ifinfo object to release
185 */
186 static void
187 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
188 {
189 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
190 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
191 }
192
193 /**
194 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
195 * the neigh_ifinfo
196 * @neigh_ifinfo: the neigh_ifinfo object to release
197 */
198 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
199 {
200 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
201 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
202 }
203
204 /**
205 * batadv_neigh_node_free_rcu - free the neigh_node
206 * @rcu: rcu pointer of the neigh_node
207 */
208 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
209 {
210 struct hlist_node *node_tmp;
211 struct batadv_neigh_node *neigh_node;
212 struct batadv_neigh_ifinfo *neigh_ifinfo;
213 struct batadv_algo_ops *bao;
214
215 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
216 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
217
218 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
219 &neigh_node->ifinfo_list, list) {
220 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
221 }
222
223 if (bao->bat_neigh_free)
224 bao->bat_neigh_free(neigh_node);
225
226 batadv_hardif_free_ref_now(neigh_node->if_incoming);
227
228 kfree(neigh_node);
229 }
230
231 /**
232 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
233 * and possibly free it (without rcu callback)
234 * @neigh_node: neigh neighbor to free
235 */
236 static void
237 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
238 {
239 if (atomic_dec_and_test(&neigh_node->refcount))
240 batadv_neigh_node_free_rcu(&neigh_node->rcu);
241 }
242
243 /**
244 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
245 * and possibly free it
246 * @neigh_node: neigh neighbor to free
247 */
248 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
249 {
250 if (atomic_dec_and_test(&neigh_node->refcount))
251 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
252 }
253
254 /**
255 * batadv_orig_node_get_router - router to the originator depending on iface
256 * @orig_node: the orig node for the router
257 * @if_outgoing: the interface where the payload packet has been received or
258 * the OGM should be sent to
259 *
260 * Returns the neighbor which should be router for this orig_node/iface.
261 *
262 * The object is returned with refcounter increased by 1.
263 */
264 struct batadv_neigh_node *
265 batadv_orig_router_get(struct batadv_orig_node *orig_node,
266 const struct batadv_hard_iface *if_outgoing)
267 {
268 struct batadv_orig_ifinfo *orig_ifinfo;
269 struct batadv_neigh_node *router = NULL;
270
271 rcu_read_lock();
272 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
273 if (orig_ifinfo->if_outgoing != if_outgoing)
274 continue;
275
276 router = rcu_dereference(orig_ifinfo->router);
277 break;
278 }
279
280 if (router && !atomic_inc_not_zero(&router->refcount))
281 router = NULL;
282
283 rcu_read_unlock();
284 return router;
285 }
286
287 /**
288 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
289 * @orig_node: the orig node to be queried
290 * @if_outgoing: the interface for which the ifinfo should be acquired
291 *
292 * Returns the requested orig_ifinfo or NULL if not found.
293 *
294 * The object is returned with refcounter increased by 1.
295 */
296 struct batadv_orig_ifinfo *
297 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
298 struct batadv_hard_iface *if_outgoing)
299 {
300 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
301
302 rcu_read_lock();
303 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
304 list) {
305 if (tmp->if_outgoing != if_outgoing)
306 continue;
307
308 if (!atomic_inc_not_zero(&tmp->refcount))
309 continue;
310
311 orig_ifinfo = tmp;
312 break;
313 }
314 rcu_read_unlock();
315
316 return orig_ifinfo;
317 }
318
319 /**
320 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
321 * @orig_node: the orig node to be queried
322 * @if_outgoing: the interface for which the ifinfo should be acquired
323 *
324 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
325 * interface otherwise. The object is created and added to the list
326 * if it does not exist.
327 *
328 * The object is returned with refcounter increased by 1.
329 */
330 struct batadv_orig_ifinfo *
331 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
332 struct batadv_hard_iface *if_outgoing)
333 {
334 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
335 unsigned long reset_time;
336
337 spin_lock_bh(&orig_node->neigh_list_lock);
338
339 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
340 if (orig_ifinfo)
341 goto out;
342
343 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
344 if (!orig_ifinfo)
345 goto out;
346
347 if (if_outgoing != BATADV_IF_DEFAULT &&
348 !atomic_inc_not_zero(&if_outgoing->refcount)) {
349 kfree(orig_ifinfo);
350 orig_ifinfo = NULL;
351 goto out;
352 }
353
354 reset_time = jiffies - 1;
355 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
356 orig_ifinfo->batman_seqno_reset = reset_time;
357 orig_ifinfo->if_outgoing = if_outgoing;
358 INIT_HLIST_NODE(&orig_ifinfo->list);
359 atomic_set(&orig_ifinfo->refcount, 2);
360 hlist_add_head_rcu(&orig_ifinfo->list,
361 &orig_node->ifinfo_list);
362 out:
363 spin_unlock_bh(&orig_node->neigh_list_lock);
364 return orig_ifinfo;
365 }
366
367 /**
368 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
369 * @neigh_node: the neigh node to be queried
370 * @if_outgoing: the interface for which the ifinfo should be acquired
371 *
372 * The object is returned with refcounter increased by 1.
373 *
374 * Returns the requested neigh_ifinfo or NULL if not found
375 */
376 struct batadv_neigh_ifinfo *
377 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
378 struct batadv_hard_iface *if_outgoing)
379 {
380 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
381 *tmp_neigh_ifinfo;
382
383 rcu_read_lock();
384 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
385 list) {
386 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
387 continue;
388
389 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
390 continue;
391
392 neigh_ifinfo = tmp_neigh_ifinfo;
393 break;
394 }
395 rcu_read_unlock();
396
397 return neigh_ifinfo;
398 }
399
400 /**
401 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
402 * @neigh_node: the neigh node to be queried
403 * @if_outgoing: the interface for which the ifinfo should be acquired
404 *
405 * Returns NULL in case of failure or the neigh_ifinfo object for the
406 * if_outgoing interface otherwise. The object is created and added to the list
407 * if it does not exist.
408 *
409 * The object is returned with refcounter increased by 1.
410 */
411 struct batadv_neigh_ifinfo *
412 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
413 struct batadv_hard_iface *if_outgoing)
414 {
415 struct batadv_neigh_ifinfo *neigh_ifinfo;
416
417 spin_lock_bh(&neigh->ifinfo_lock);
418
419 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
420 if (neigh_ifinfo)
421 goto out;
422
423 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
424 if (!neigh_ifinfo)
425 goto out;
426
427 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
428 kfree(neigh_ifinfo);
429 neigh_ifinfo = NULL;
430 goto out;
431 }
432
433 INIT_HLIST_NODE(&neigh_ifinfo->list);
434 atomic_set(&neigh_ifinfo->refcount, 2);
435 neigh_ifinfo->if_outgoing = if_outgoing;
436
437 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
438
439 out:
440 spin_unlock_bh(&neigh->ifinfo_lock);
441
442 return neigh_ifinfo;
443 }
444
445 /**
446 * batadv_neigh_node_new - create and init a new neigh_node object
447 * @hard_iface: the interface where the neighbour is connected to
448 * @neigh_addr: the mac address of the neighbour interface
449 * @orig_node: originator object representing the neighbour
450 *
451 * Allocates a new neigh_node object and initialises all the generic fields.
452 * Returns the new object or NULL on failure.
453 */
454 struct batadv_neigh_node *
455 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
456 const u8 *neigh_addr, struct batadv_orig_node *orig_node)
457 {
458 struct batadv_neigh_node *neigh_node;
459
460 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
461 if (neigh_node)
462 goto out;
463
464 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
465 if (!neigh_node)
466 goto out;
467
468 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
469 kfree(neigh_node);
470 neigh_node = NULL;
471 goto out;
472 }
473
474 INIT_HLIST_NODE(&neigh_node->list);
475 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
476 spin_lock_init(&neigh_node->ifinfo_lock);
477
478 ether_addr_copy(neigh_node->addr, neigh_addr);
479 neigh_node->if_incoming = hard_iface;
480 neigh_node->orig_node = orig_node;
481
482 /* extra reference for return */
483 atomic_set(&neigh_node->refcount, 2);
484
485 spin_lock_bh(&orig_node->neigh_list_lock);
486 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
487 spin_unlock_bh(&orig_node->neigh_list_lock);
488
489 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
490 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
491 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
492
493 out:
494 return neigh_node;
495 }
496
497 /**
498 * batadv_neigh_node_get - retrieve a neighbour from the list
499 * @orig_node: originator which the neighbour belongs to
500 * @hard_iface: the interface where this neighbour is connected to
501 * @addr: the address of the neighbour
502 *
503 * Looks for and possibly returns a neighbour belonging to this originator list
504 * which is connected through the provided hard interface.
505 * Returns NULL if the neighbour is not found.
506 */
507 struct batadv_neigh_node *
508 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
509 const struct batadv_hard_iface *hard_iface,
510 const u8 *addr)
511 {
512 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
513
514 rcu_read_lock();
515 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
516 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
517 continue;
518
519 if (tmp_neigh_node->if_incoming != hard_iface)
520 continue;
521
522 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
523 continue;
524
525 res = tmp_neigh_node;
526 break;
527 }
528 rcu_read_unlock();
529
530 return res;
531 }
532
533 /**
534 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
535 * @rcu: rcu pointer of the orig_ifinfo object
536 */
537 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
538 {
539 struct batadv_orig_ifinfo *orig_ifinfo;
540 struct batadv_neigh_node *router;
541
542 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
543
544 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
545 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
546
547 /* this is the last reference to this object */
548 router = rcu_dereference_protected(orig_ifinfo->router, true);
549 if (router)
550 batadv_neigh_node_free_ref_now(router);
551 kfree(orig_ifinfo);
552 }
553
554 /**
555 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
556 * the orig_ifinfo (without rcu callback)
557 * @orig_ifinfo: the orig_ifinfo object to release
558 */
559 static void
560 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
561 {
562 if (atomic_dec_and_test(&orig_ifinfo->refcount))
563 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
564 }
565
566 /**
567 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
568 * the orig_ifinfo
569 * @orig_ifinfo: the orig_ifinfo object to release
570 */
571 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
572 {
573 if (atomic_dec_and_test(&orig_ifinfo->refcount))
574 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
575 }
576
577 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
578 {
579 struct hlist_node *node_tmp;
580 struct batadv_neigh_node *neigh_node;
581 struct batadv_orig_node *orig_node;
582 struct batadv_orig_ifinfo *orig_ifinfo;
583
584 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
585
586 spin_lock_bh(&orig_node->neigh_list_lock);
587
588 /* for all neighbors towards this originator ... */
589 hlist_for_each_entry_safe(neigh_node, node_tmp,
590 &orig_node->neigh_list, list) {
591 hlist_del_rcu(&neigh_node->list);
592 batadv_neigh_node_free_ref_now(neigh_node);
593 }
594
595 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
596 &orig_node->ifinfo_list, list) {
597 hlist_del_rcu(&orig_ifinfo->list);
598 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
599 }
600 spin_unlock_bh(&orig_node->neigh_list_lock);
601
602 batadv_mcast_purge_orig(orig_node);
603
604 /* Free nc_nodes */
605 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
606
607 batadv_frag_purge_orig(orig_node, NULL);
608
609 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
610 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
611
612 kfree(orig_node->tt_buff);
613 kfree(orig_node);
614 }
615
616 /**
617 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
618 * schedule an rcu callback for freeing it
619 * @orig_node: the orig node to free
620 */
621 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
622 {
623 if (atomic_dec_and_test(&orig_node->refcount))
624 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
625 }
626
627 /**
628 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
629 * possibly free it (without rcu callback)
630 * @orig_node: the orig node to free
631 */
632 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
633 {
634 if (atomic_dec_and_test(&orig_node->refcount))
635 batadv_orig_node_free_rcu(&orig_node->rcu);
636 }
637
638 void batadv_originator_free(struct batadv_priv *bat_priv)
639 {
640 struct batadv_hashtable *hash = bat_priv->orig_hash;
641 struct hlist_node *node_tmp;
642 struct hlist_head *head;
643 spinlock_t *list_lock; /* spinlock to protect write access */
644 struct batadv_orig_node *orig_node;
645 u32 i;
646
647 if (!hash)
648 return;
649
650 cancel_delayed_work_sync(&bat_priv->orig_work);
651
652 bat_priv->orig_hash = NULL;
653
654 for (i = 0; i < hash->size; i++) {
655 head = &hash->table[i];
656 list_lock = &hash->list_locks[i];
657
658 spin_lock_bh(list_lock);
659 hlist_for_each_entry_safe(orig_node, node_tmp,
660 head, hash_entry) {
661 hlist_del_rcu(&orig_node->hash_entry);
662 batadv_orig_node_free_ref(orig_node);
663 }
664 spin_unlock_bh(list_lock);
665 }
666
667 batadv_hash_destroy(hash);
668 }
669
670 /**
671 * batadv_orig_node_new - creates a new orig_node
672 * @bat_priv: the bat priv with all the soft interface information
673 * @addr: the mac address of the originator
674 *
675 * Creates a new originator object and initialise all the generic fields.
676 * The new object is not added to the originator list.
677 * Returns the newly created object or NULL on failure.
678 */
679 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
680 const u8 *addr)
681 {
682 struct batadv_orig_node *orig_node;
683 struct batadv_orig_node_vlan *vlan;
684 unsigned long reset_time;
685 int i;
686
687 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
688 "Creating new originator: %pM\n", addr);
689
690 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
691 if (!orig_node)
692 return NULL;
693
694 INIT_HLIST_HEAD(&orig_node->neigh_list);
695 INIT_HLIST_HEAD(&orig_node->vlan_list);
696 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
697 spin_lock_init(&orig_node->bcast_seqno_lock);
698 spin_lock_init(&orig_node->neigh_list_lock);
699 spin_lock_init(&orig_node->tt_buff_lock);
700 spin_lock_init(&orig_node->tt_lock);
701 spin_lock_init(&orig_node->vlan_list_lock);
702
703 batadv_nc_init_orig(orig_node);
704
705 /* extra reference for return */
706 atomic_set(&orig_node->refcount, 2);
707
708 orig_node->bat_priv = bat_priv;
709 ether_addr_copy(orig_node->orig, addr);
710 batadv_dat_init_orig_node_addr(orig_node);
711 atomic_set(&orig_node->last_ttvn, 0);
712 orig_node->tt_buff = NULL;
713 orig_node->tt_buff_len = 0;
714 orig_node->last_seen = jiffies;
715 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
716 orig_node->bcast_seqno_reset = reset_time;
717
718 #ifdef CONFIG_BATMAN_ADV_MCAST
719 orig_node->mcast_flags = BATADV_NO_FLAGS;
720 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
721 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
722 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
723 spin_lock_init(&orig_node->mcast_handler_lock);
724 #endif
725
726 /* create a vlan object for the "untagged" LAN */
727 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
728 if (!vlan)
729 goto free_orig_node;
730 /* batadv_orig_node_vlan_new() increases the refcounter.
731 * Immediately release vlan since it is not needed anymore in this
732 * context
733 */
734 batadv_orig_node_vlan_free_ref(vlan);
735
736 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
737 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
738 spin_lock_init(&orig_node->fragments[i].lock);
739 orig_node->fragments[i].size = 0;
740 }
741
742 return orig_node;
743 free_orig_node:
744 kfree(orig_node);
745 return NULL;
746 }
747
748 /**
749 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
750 * @bat_priv: the bat priv with all the soft interface information
751 * @neigh: orig node which is to be checked
752 */
753 static void
754 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
755 struct batadv_neigh_node *neigh)
756 {
757 struct batadv_neigh_ifinfo *neigh_ifinfo;
758 struct batadv_hard_iface *if_outgoing;
759 struct hlist_node *node_tmp;
760
761 spin_lock_bh(&neigh->ifinfo_lock);
762
763 /* for all ifinfo objects for this neighinator */
764 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
765 &neigh->ifinfo_list, list) {
766 if_outgoing = neigh_ifinfo->if_outgoing;
767
768 /* always keep the default interface */
769 if (if_outgoing == BATADV_IF_DEFAULT)
770 continue;
771
772 /* don't purge if the interface is not (going) down */
773 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
774 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
775 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
776 continue;
777
778 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
779 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
780 neigh->addr, if_outgoing->net_dev->name);
781
782 hlist_del_rcu(&neigh_ifinfo->list);
783 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
784 }
785
786 spin_unlock_bh(&neigh->ifinfo_lock);
787 }
788
789 /**
790 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
791 * @bat_priv: the bat priv with all the soft interface information
792 * @orig_node: orig node which is to be checked
793 *
794 * Returns true if any ifinfo entry was purged, false otherwise.
795 */
796 static bool
797 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
798 struct batadv_orig_node *orig_node)
799 {
800 struct batadv_orig_ifinfo *orig_ifinfo;
801 struct batadv_hard_iface *if_outgoing;
802 struct hlist_node *node_tmp;
803 bool ifinfo_purged = false;
804
805 spin_lock_bh(&orig_node->neigh_list_lock);
806
807 /* for all ifinfo objects for this originator */
808 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
809 &orig_node->ifinfo_list, list) {
810 if_outgoing = orig_ifinfo->if_outgoing;
811
812 /* always keep the default interface */
813 if (if_outgoing == BATADV_IF_DEFAULT)
814 continue;
815
816 /* don't purge if the interface is not (going) down */
817 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
818 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
819 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
820 continue;
821
822 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
823 "router/ifinfo purge: originator %pM, iface: %s\n",
824 orig_node->orig, if_outgoing->net_dev->name);
825
826 ifinfo_purged = true;
827
828 hlist_del_rcu(&orig_ifinfo->list);
829 batadv_orig_ifinfo_free_ref(orig_ifinfo);
830 if (orig_node->last_bonding_candidate == orig_ifinfo) {
831 orig_node->last_bonding_candidate = NULL;
832 batadv_orig_ifinfo_free_ref(orig_ifinfo);
833 }
834 }
835
836 spin_unlock_bh(&orig_node->neigh_list_lock);
837
838 return ifinfo_purged;
839 }
840
841 /**
842 * batadv_purge_orig_neighbors - purges neighbors from originator
843 * @bat_priv: the bat priv with all the soft interface information
844 * @orig_node: orig node which is to be checked
845 *
846 * Returns true if any neighbor was purged, false otherwise
847 */
848 static bool
849 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
850 struct batadv_orig_node *orig_node)
851 {
852 struct hlist_node *node_tmp;
853 struct batadv_neigh_node *neigh_node;
854 bool neigh_purged = false;
855 unsigned long last_seen;
856 struct batadv_hard_iface *if_incoming;
857
858 spin_lock_bh(&orig_node->neigh_list_lock);
859
860 /* for all neighbors towards this originator ... */
861 hlist_for_each_entry_safe(neigh_node, node_tmp,
862 &orig_node->neigh_list, list) {
863 last_seen = neigh_node->last_seen;
864 if_incoming = neigh_node->if_incoming;
865
866 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
867 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
868 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
869 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
870 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
871 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
872 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
873 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
874 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
875 orig_node->orig, neigh_node->addr,
876 if_incoming->net_dev->name);
877 else
878 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
879 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
880 orig_node->orig, neigh_node->addr,
881 jiffies_to_msecs(last_seen));
882
883 neigh_purged = true;
884
885 hlist_del_rcu(&neigh_node->list);
886 batadv_neigh_node_free_ref(neigh_node);
887 } else {
888 /* only necessary if not the whole neighbor is to be
889 * deleted, but some interface has been removed.
890 */
891 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
892 }
893 }
894
895 spin_unlock_bh(&orig_node->neigh_list_lock);
896 return neigh_purged;
897 }
898
899 /**
900 * batadv_find_best_neighbor - finds the best neighbor after purging
901 * @bat_priv: the bat priv with all the soft interface information
902 * @orig_node: orig node which is to be checked
903 * @if_outgoing: the interface for which the metric should be compared
904 *
905 * Returns the current best neighbor, with refcount increased.
906 */
907 static struct batadv_neigh_node *
908 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
909 struct batadv_orig_node *orig_node,
910 struct batadv_hard_iface *if_outgoing)
911 {
912 struct batadv_neigh_node *best = NULL, *neigh;
913 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
914
915 rcu_read_lock();
916 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
917 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
918 best, if_outgoing) <= 0))
919 continue;
920
921 if (!atomic_inc_not_zero(&neigh->refcount))
922 continue;
923
924 if (best)
925 batadv_neigh_node_free_ref(best);
926
927 best = neigh;
928 }
929 rcu_read_unlock();
930
931 return best;
932 }
933
934 /**
935 * batadv_purge_orig_node - purges obsolete information from an orig_node
936 * @bat_priv: the bat priv with all the soft interface information
937 * @orig_node: orig node which is to be checked
938 *
939 * This function checks if the orig_node or substructures of it have become
940 * obsolete, and purges this information if that's the case.
941 *
942 * Returns true if the orig_node is to be removed, false otherwise.
943 */
944 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
945 struct batadv_orig_node *orig_node)
946 {
947 struct batadv_neigh_node *best_neigh_node;
948 struct batadv_hard_iface *hard_iface;
949 bool changed_ifinfo, changed_neigh;
950
951 if (batadv_has_timed_out(orig_node->last_seen,
952 2 * BATADV_PURGE_TIMEOUT)) {
953 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
954 "Originator timeout: originator %pM, last_seen %u\n",
955 orig_node->orig,
956 jiffies_to_msecs(orig_node->last_seen));
957 return true;
958 }
959 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
960 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
961
962 if (!changed_ifinfo && !changed_neigh)
963 return false;
964
965 /* first for NULL ... */
966 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
967 BATADV_IF_DEFAULT);
968 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
969 best_neigh_node);
970 if (best_neigh_node)
971 batadv_neigh_node_free_ref(best_neigh_node);
972
973 /* ... then for all other interfaces. */
974 rcu_read_lock();
975 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
976 if (hard_iface->if_status != BATADV_IF_ACTIVE)
977 continue;
978
979 if (hard_iface->soft_iface != bat_priv->soft_iface)
980 continue;
981
982 best_neigh_node = batadv_find_best_neighbor(bat_priv,
983 orig_node,
984 hard_iface);
985 batadv_update_route(bat_priv, orig_node, hard_iface,
986 best_neigh_node);
987 if (best_neigh_node)
988 batadv_neigh_node_free_ref(best_neigh_node);
989 }
990 rcu_read_unlock();
991
992 return false;
993 }
994
995 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
996 {
997 struct batadv_hashtable *hash = bat_priv->orig_hash;
998 struct hlist_node *node_tmp;
999 struct hlist_head *head;
1000 spinlock_t *list_lock; /* spinlock to protect write access */
1001 struct batadv_orig_node *orig_node;
1002 u32 i;
1003
1004 if (!hash)
1005 return;
1006
1007 /* for all origins... */
1008 for (i = 0; i < hash->size; i++) {
1009 head = &hash->table[i];
1010 list_lock = &hash->list_locks[i];
1011
1012 spin_lock_bh(list_lock);
1013 hlist_for_each_entry_safe(orig_node, node_tmp,
1014 head, hash_entry) {
1015 if (batadv_purge_orig_node(bat_priv, orig_node)) {
1016 batadv_gw_node_delete(bat_priv, orig_node);
1017 hlist_del_rcu(&orig_node->hash_entry);
1018 batadv_tt_global_del_orig(orig_node->bat_priv,
1019 orig_node, -1,
1020 "originator timed out");
1021 batadv_orig_node_free_ref(orig_node);
1022 continue;
1023 }
1024
1025 batadv_frag_purge_orig(orig_node,
1026 batadv_frag_check_entry);
1027 }
1028 spin_unlock_bh(list_lock);
1029 }
1030
1031 batadv_gw_election(bat_priv);
1032 }
1033
1034 static void batadv_purge_orig(struct work_struct *work)
1035 {
1036 struct delayed_work *delayed_work;
1037 struct batadv_priv *bat_priv;
1038
1039 delayed_work = container_of(work, struct delayed_work, work);
1040 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1041 _batadv_purge_orig(bat_priv);
1042 queue_delayed_work(batadv_event_workqueue,
1043 &bat_priv->orig_work,
1044 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1045 }
1046
1047 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1048 {
1049 _batadv_purge_orig(bat_priv);
1050 }
1051
1052 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1053 {
1054 struct net_device *net_dev = (struct net_device *)seq->private;
1055 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1056 struct batadv_hard_iface *primary_if;
1057
1058 primary_if = batadv_seq_print_text_primary_if_get(seq);
1059 if (!primary_if)
1060 return 0;
1061
1062 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1063 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1064 primary_if->net_dev->dev_addr, net_dev->name,
1065 bat_priv->bat_algo_ops->name);
1066
1067 batadv_hardif_free_ref(primary_if);
1068
1069 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1070 seq_puts(seq,
1071 "No printing function for this routing protocol\n");
1072 return 0;
1073 }
1074
1075 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1076 BATADV_IF_DEFAULT);
1077
1078 return 0;
1079 }
1080
1081 /**
1082 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1083 * outgoing interface
1084 * @seq: debugfs table seq_file struct
1085 * @offset: not used
1086 *
1087 * Returns 0
1088 */
1089 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1090 {
1091 struct net_device *net_dev = (struct net_device *)seq->private;
1092 struct batadv_hard_iface *hard_iface;
1093 struct batadv_priv *bat_priv;
1094
1095 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1096
1097 if (!hard_iface || !hard_iface->soft_iface) {
1098 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1099 goto out;
1100 }
1101
1102 bat_priv = netdev_priv(hard_iface->soft_iface);
1103 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1104 seq_puts(seq,
1105 "No printing function for this routing protocol\n");
1106 goto out;
1107 }
1108
1109 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1110 seq_puts(seq, "Interface not active\n");
1111 goto out;
1112 }
1113
1114 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1115 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1116 hard_iface->net_dev->dev_addr,
1117 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1118
1119 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1120
1121 out:
1122 if (hard_iface)
1123 batadv_hardif_free_ref(hard_iface);
1124 return 0;
1125 }
1126
1127 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1128 int max_if_num)
1129 {
1130 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1131 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1132 struct batadv_hashtable *hash = bat_priv->orig_hash;
1133 struct hlist_head *head;
1134 struct batadv_orig_node *orig_node;
1135 u32 i;
1136 int ret;
1137
1138 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1139 * if_num
1140 */
1141 for (i = 0; i < hash->size; i++) {
1142 head = &hash->table[i];
1143
1144 rcu_read_lock();
1145 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1146 ret = 0;
1147 if (bao->bat_orig_add_if)
1148 ret = bao->bat_orig_add_if(orig_node,
1149 max_if_num);
1150 if (ret == -ENOMEM)
1151 goto err;
1152 }
1153 rcu_read_unlock();
1154 }
1155
1156 return 0;
1157
1158 err:
1159 rcu_read_unlock();
1160 return -ENOMEM;
1161 }
1162
1163 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1164 int max_if_num)
1165 {
1166 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1167 struct batadv_hashtable *hash = bat_priv->orig_hash;
1168 struct hlist_head *head;
1169 struct batadv_hard_iface *hard_iface_tmp;
1170 struct batadv_orig_node *orig_node;
1171 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1172 u32 i;
1173 int ret;
1174
1175 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1176 * if_num
1177 */
1178 for (i = 0; i < hash->size; i++) {
1179 head = &hash->table[i];
1180
1181 rcu_read_lock();
1182 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1183 ret = 0;
1184 if (bao->bat_orig_del_if)
1185 ret = bao->bat_orig_del_if(orig_node,
1186 max_if_num,
1187 hard_iface->if_num);
1188 if (ret == -ENOMEM)
1189 goto err;
1190 }
1191 rcu_read_unlock();
1192 }
1193
1194 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1195 rcu_read_lock();
1196 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1197 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1198 continue;
1199
1200 if (hard_iface == hard_iface_tmp)
1201 continue;
1202
1203 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1204 continue;
1205
1206 if (hard_iface_tmp->if_num > hard_iface->if_num)
1207 hard_iface_tmp->if_num--;
1208 }
1209 rcu_read_unlock();
1210
1211 hard_iface->if_num = -1;
1212 return 0;
1213
1214 err:
1215 rcu_read_unlock();
1216 return -ENOMEM;
1217 }
This page took 0.056118 seconds and 5 git commands to generate.