Merge tag 'dmaengine-fixes-3.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "main.h"
19 #include "distributed-arp-table.h"
20 #include "originator.h"
21 #include "hash.h"
22 #include "translation-table.h"
23 #include "routing.h"
24 #include "gateway_client.h"
25 #include "hard-interface.h"
26 #include "soft-interface.h"
27 #include "bridge_loop_avoidance.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
30
31 /* hash class keys */
32 static struct lock_class_key batadv_orig_hash_lock_class_key;
33
34 static void batadv_purge_orig(struct work_struct *work);
35
36 /* returns 1 if they are the same originator */
37 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
38 {
39 const void *data1 = container_of(node, struct batadv_orig_node,
40 hash_entry);
41
42 return batadv_compare_eth(data1, data2);
43 }
44
45 /**
46 * batadv_orig_node_vlan_get - get an orig_node_vlan object
47 * @orig_node: the originator serving the VLAN
48 * @vid: the VLAN identifier
49 *
50 * Returns the vlan object identified by vid and belonging to orig_node or NULL
51 * if it does not exist.
52 */
53 struct batadv_orig_node_vlan *
54 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
55 unsigned short vid)
56 {
57 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
58
59 rcu_read_lock();
60 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
61 if (tmp->vid != vid)
62 continue;
63
64 if (!atomic_inc_not_zero(&tmp->refcount))
65 continue;
66
67 vlan = tmp;
68
69 break;
70 }
71 rcu_read_unlock();
72
73 return vlan;
74 }
75
76 /**
77 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
78 * object
79 * @orig_node: the originator serving the VLAN
80 * @vid: the VLAN identifier
81 *
82 * Returns NULL in case of failure or the vlan object identified by vid and
83 * belonging to orig_node otherwise. The object is created and added to the list
84 * if it does not exist.
85 *
86 * The object is returned with refcounter increased by 1.
87 */
88 struct batadv_orig_node_vlan *
89 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
90 unsigned short vid)
91 {
92 struct batadv_orig_node_vlan *vlan;
93
94 spin_lock_bh(&orig_node->vlan_list_lock);
95
96 /* first look if an object for this vid already exists */
97 vlan = batadv_orig_node_vlan_get(orig_node, vid);
98 if (vlan)
99 goto out;
100
101 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
102 if (!vlan)
103 goto out;
104
105 atomic_set(&vlan->refcount, 2);
106 vlan->vid = vid;
107
108 list_add_rcu(&vlan->list, &orig_node->vlan_list);
109
110 out:
111 spin_unlock_bh(&orig_node->vlan_list_lock);
112
113 return vlan;
114 }
115
116 /**
117 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
118 * the originator-vlan object
119 * @orig_vlan: the originator-vlan object to release
120 */
121 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
122 {
123 if (atomic_dec_and_test(&orig_vlan->refcount))
124 kfree_rcu(orig_vlan, rcu);
125 }
126
127 int batadv_originator_init(struct batadv_priv *bat_priv)
128 {
129 if (bat_priv->orig_hash)
130 return 0;
131
132 bat_priv->orig_hash = batadv_hash_new(1024);
133
134 if (!bat_priv->orig_hash)
135 goto err;
136
137 batadv_hash_set_lock_class(bat_priv->orig_hash,
138 &batadv_orig_hash_lock_class_key);
139
140 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
141 queue_delayed_work(batadv_event_workqueue,
142 &bat_priv->orig_work,
143 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
144
145 return 0;
146
147 err:
148 return -ENOMEM;
149 }
150
151 /**
152 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
153 * @rcu: rcu pointer of the neigh_ifinfo object
154 */
155 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
156 {
157 struct batadv_neigh_ifinfo *neigh_ifinfo;
158
159 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
160
161 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
162 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
163
164 kfree(neigh_ifinfo);
165 }
166
167 /**
168 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
169 * the neigh_ifinfo (without rcu callback)
170 * @neigh_ifinfo: the neigh_ifinfo object to release
171 */
172 static void
173 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
174 {
175 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
176 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
177 }
178
179 /**
180 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
181 * the neigh_ifinfo
182 * @neigh_ifinfo: the neigh_ifinfo object to release
183 */
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
185 {
186 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
188 }
189
190 /**
191 * batadv_neigh_node_free_rcu - free the neigh_node
192 * @rcu: rcu pointer of the neigh_node
193 */
194 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
195 {
196 struct hlist_node *node_tmp;
197 struct batadv_neigh_node *neigh_node;
198 struct batadv_neigh_ifinfo *neigh_ifinfo;
199
200 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
201
202 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
203 &neigh_node->ifinfo_list, list) {
204 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
205 }
206 batadv_hardif_free_ref_now(neigh_node->if_incoming);
207
208 kfree(neigh_node);
209 }
210
211 /**
212 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
213 * and possibly free it (without rcu callback)
214 * @neigh_node: neigh neighbor to free
215 */
216 static void
217 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
218 {
219 if (atomic_dec_and_test(&neigh_node->refcount))
220 batadv_neigh_node_free_rcu(&neigh_node->rcu);
221 }
222
223 /**
224 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
225 * and possibly free it
226 * @neigh_node: neigh neighbor to free
227 */
228 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
229 {
230 if (atomic_dec_and_test(&neigh_node->refcount))
231 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
232 }
233
234 /**
235 * batadv_orig_node_get_router - router to the originator depending on iface
236 * @orig_node: the orig node for the router
237 * @if_outgoing: the interface where the payload packet has been received or
238 * the OGM should be sent to
239 *
240 * Returns the neighbor which should be router for this orig_node/iface.
241 *
242 * The object is returned with refcounter increased by 1.
243 */
244 struct batadv_neigh_node *
245 batadv_orig_router_get(struct batadv_orig_node *orig_node,
246 const struct batadv_hard_iface *if_outgoing)
247 {
248 struct batadv_orig_ifinfo *orig_ifinfo;
249 struct batadv_neigh_node *router = NULL;
250
251 rcu_read_lock();
252 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
253 if (orig_ifinfo->if_outgoing != if_outgoing)
254 continue;
255
256 router = rcu_dereference(orig_ifinfo->router);
257 break;
258 }
259
260 if (router && !atomic_inc_not_zero(&router->refcount))
261 router = NULL;
262
263 rcu_read_unlock();
264 return router;
265 }
266
267 /**
268 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
269 * @orig_node: the orig node to be queried
270 * @if_outgoing: the interface for which the ifinfo should be acquired
271 *
272 * Returns the requested orig_ifinfo or NULL if not found.
273 *
274 * The object is returned with refcounter increased by 1.
275 */
276 struct batadv_orig_ifinfo *
277 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
278 struct batadv_hard_iface *if_outgoing)
279 {
280 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
281
282 rcu_read_lock();
283 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
284 list) {
285 if (tmp->if_outgoing != if_outgoing)
286 continue;
287
288 if (!atomic_inc_not_zero(&tmp->refcount))
289 continue;
290
291 orig_ifinfo = tmp;
292 break;
293 }
294 rcu_read_unlock();
295
296 return orig_ifinfo;
297 }
298
299 /**
300 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
301 * @orig_node: the orig node to be queried
302 * @if_outgoing: the interface for which the ifinfo should be acquired
303 *
304 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
305 * interface otherwise. The object is created and added to the list
306 * if it does not exist.
307 *
308 * The object is returned with refcounter increased by 1.
309 */
310 struct batadv_orig_ifinfo *
311 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
312 struct batadv_hard_iface *if_outgoing)
313 {
314 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
315 unsigned long reset_time;
316
317 spin_lock_bh(&orig_node->neigh_list_lock);
318
319 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
320 if (orig_ifinfo)
321 goto out;
322
323 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
324 if (!orig_ifinfo)
325 goto out;
326
327 if (if_outgoing != BATADV_IF_DEFAULT &&
328 !atomic_inc_not_zero(&if_outgoing->refcount)) {
329 kfree(orig_ifinfo);
330 orig_ifinfo = NULL;
331 goto out;
332 }
333
334 reset_time = jiffies - 1;
335 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
336 orig_ifinfo->batman_seqno_reset = reset_time;
337 orig_ifinfo->if_outgoing = if_outgoing;
338 INIT_HLIST_NODE(&orig_ifinfo->list);
339 atomic_set(&orig_ifinfo->refcount, 2);
340 hlist_add_head_rcu(&orig_ifinfo->list,
341 &orig_node->ifinfo_list);
342 out:
343 spin_unlock_bh(&orig_node->neigh_list_lock);
344 return orig_ifinfo;
345 }
346
347 /**
348 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
349 * @neigh_node: the neigh node to be queried
350 * @if_outgoing: the interface for which the ifinfo should be acquired
351 *
352 * The object is returned with refcounter increased by 1.
353 *
354 * Returns the requested neigh_ifinfo or NULL if not found
355 */
356 struct batadv_neigh_ifinfo *
357 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
358 struct batadv_hard_iface *if_outgoing)
359 {
360 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
361 *tmp_neigh_ifinfo;
362
363 rcu_read_lock();
364 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
365 list) {
366 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
367 continue;
368
369 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
370 continue;
371
372 neigh_ifinfo = tmp_neigh_ifinfo;
373 break;
374 }
375 rcu_read_unlock();
376
377 return neigh_ifinfo;
378 }
379
380 /**
381 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
382 * @neigh_node: the neigh node to be queried
383 * @if_outgoing: the interface for which the ifinfo should be acquired
384 *
385 * Returns NULL in case of failure or the neigh_ifinfo object for the
386 * if_outgoing interface otherwise. The object is created and added to the list
387 * if it does not exist.
388 *
389 * The object is returned with refcounter increased by 1.
390 */
391 struct batadv_neigh_ifinfo *
392 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
393 struct batadv_hard_iface *if_outgoing)
394 {
395 struct batadv_neigh_ifinfo *neigh_ifinfo;
396
397 spin_lock_bh(&neigh->ifinfo_lock);
398
399 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
400 if (neigh_ifinfo)
401 goto out;
402
403 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
404 if (!neigh_ifinfo)
405 goto out;
406
407 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
408 kfree(neigh_ifinfo);
409 neigh_ifinfo = NULL;
410 goto out;
411 }
412
413 INIT_HLIST_NODE(&neigh_ifinfo->list);
414 atomic_set(&neigh_ifinfo->refcount, 2);
415 neigh_ifinfo->if_outgoing = if_outgoing;
416
417 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
418
419 out:
420 spin_unlock_bh(&neigh->ifinfo_lock);
421
422 return neigh_ifinfo;
423 }
424
425 /**
426 * batadv_neigh_node_new - create and init a new neigh_node object
427 * @hard_iface: the interface where the neighbour is connected to
428 * @neigh_addr: the mac address of the neighbour interface
429 * @orig_node: originator object representing the neighbour
430 *
431 * Allocates a new neigh_node object and initialises all the generic fields.
432 * Returns the new object or NULL on failure.
433 */
434 struct batadv_neigh_node *
435 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
436 const uint8_t *neigh_addr,
437 struct batadv_orig_node *orig_node)
438 {
439 struct batadv_neigh_node *neigh_node;
440
441 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
442 if (!neigh_node)
443 goto out;
444
445 INIT_HLIST_NODE(&neigh_node->list);
446 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
447 spin_lock_init(&neigh_node->ifinfo_lock);
448
449 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
450 neigh_node->if_incoming = hard_iface;
451 neigh_node->orig_node = orig_node;
452
453 /* extra reference for return */
454 atomic_set(&neigh_node->refcount, 2);
455
456 out:
457 return neigh_node;
458 }
459
460 /**
461 * batadv_neigh_node_get - retrieve a neighbour from the list
462 * @orig_node: originator which the neighbour belongs to
463 * @hard_iface: the interface where this neighbour is connected to
464 * @addr: the address of the neighbour
465 *
466 * Looks for and possibly returns a neighbour belonging to this originator list
467 * which is connected through the provided hard interface.
468 * Returns NULL if the neighbour is not found.
469 */
470 struct batadv_neigh_node *
471 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
472 const struct batadv_hard_iface *hard_iface,
473 const uint8_t *addr)
474 {
475 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
476
477 rcu_read_lock();
478 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
479 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
480 continue;
481
482 if (tmp_neigh_node->if_incoming != hard_iface)
483 continue;
484
485 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
486 continue;
487
488 res = tmp_neigh_node;
489 break;
490 }
491 rcu_read_unlock();
492
493 return res;
494 }
495
496 /**
497 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
498 * @rcu: rcu pointer of the orig_ifinfo object
499 */
500 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
501 {
502 struct batadv_orig_ifinfo *orig_ifinfo;
503
504 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
505
506 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
507 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
508
509 kfree(orig_ifinfo);
510 }
511
512 /**
513 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
514 * the orig_ifinfo (without rcu callback)
515 * @orig_ifinfo: the orig_ifinfo object to release
516 */
517 static void
518 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
519 {
520 if (atomic_dec_and_test(&orig_ifinfo->refcount))
521 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
522 }
523
524 /**
525 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
526 * the orig_ifinfo
527 * @orig_ifinfo: the orig_ifinfo object to release
528 */
529 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
530 {
531 if (atomic_dec_and_test(&orig_ifinfo->refcount))
532 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
533 }
534
535 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
536 {
537 struct hlist_node *node_tmp;
538 struct batadv_neigh_node *neigh_node;
539 struct batadv_orig_node *orig_node;
540 struct batadv_orig_ifinfo *orig_ifinfo;
541
542 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
543
544 spin_lock_bh(&orig_node->neigh_list_lock);
545
546 /* for all neighbors towards this originator ... */
547 hlist_for_each_entry_safe(neigh_node, node_tmp,
548 &orig_node->neigh_list, list) {
549 hlist_del_rcu(&neigh_node->list);
550 batadv_neigh_node_free_ref_now(neigh_node);
551 }
552
553 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
554 &orig_node->ifinfo_list, list) {
555 hlist_del_rcu(&orig_ifinfo->list);
556 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
557 }
558 spin_unlock_bh(&orig_node->neigh_list_lock);
559
560 /* Free nc_nodes */
561 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
562
563 batadv_frag_purge_orig(orig_node, NULL);
564
565 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
566 "originator timed out");
567
568 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
569 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
570
571 kfree(orig_node->tt_buff);
572 kfree(orig_node);
573 }
574
575 /**
576 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
577 * schedule an rcu callback for freeing it
578 * @orig_node: the orig node to free
579 */
580 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
581 {
582 if (atomic_dec_and_test(&orig_node->refcount))
583 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
584 }
585
586 /**
587 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
588 * possibly free it (without rcu callback)
589 * @orig_node: the orig node to free
590 */
591 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
592 {
593 if (atomic_dec_and_test(&orig_node->refcount))
594 batadv_orig_node_free_rcu(&orig_node->rcu);
595 }
596
597 void batadv_originator_free(struct batadv_priv *bat_priv)
598 {
599 struct batadv_hashtable *hash = bat_priv->orig_hash;
600 struct hlist_node *node_tmp;
601 struct hlist_head *head;
602 spinlock_t *list_lock; /* spinlock to protect write access */
603 struct batadv_orig_node *orig_node;
604 uint32_t i;
605
606 if (!hash)
607 return;
608
609 cancel_delayed_work_sync(&bat_priv->orig_work);
610
611 bat_priv->orig_hash = NULL;
612
613 for (i = 0; i < hash->size; i++) {
614 head = &hash->table[i];
615 list_lock = &hash->list_locks[i];
616
617 spin_lock_bh(list_lock);
618 hlist_for_each_entry_safe(orig_node, node_tmp,
619 head, hash_entry) {
620 hlist_del_rcu(&orig_node->hash_entry);
621 batadv_orig_node_free_ref(orig_node);
622 }
623 spin_unlock_bh(list_lock);
624 }
625
626 batadv_hash_destroy(hash);
627 }
628
629 /**
630 * batadv_orig_node_new - creates a new orig_node
631 * @bat_priv: the bat priv with all the soft interface information
632 * @addr: the mac address of the originator
633 *
634 * Creates a new originator object and initialise all the generic fields.
635 * The new object is not added to the originator list.
636 * Returns the newly created object or NULL on failure.
637 */
638 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
639 const uint8_t *addr)
640 {
641 struct batadv_orig_node *orig_node;
642 struct batadv_orig_node_vlan *vlan;
643 unsigned long reset_time;
644 int i;
645
646 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
647 "Creating new originator: %pM\n", addr);
648
649 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
650 if (!orig_node)
651 return NULL;
652
653 INIT_HLIST_HEAD(&orig_node->neigh_list);
654 INIT_LIST_HEAD(&orig_node->vlan_list);
655 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
656 spin_lock_init(&orig_node->bcast_seqno_lock);
657 spin_lock_init(&orig_node->neigh_list_lock);
658 spin_lock_init(&orig_node->tt_buff_lock);
659 spin_lock_init(&orig_node->tt_lock);
660 spin_lock_init(&orig_node->vlan_list_lock);
661
662 batadv_nc_init_orig(orig_node);
663
664 /* extra reference for return */
665 atomic_set(&orig_node->refcount, 2);
666
667 orig_node->tt_initialised = false;
668 orig_node->bat_priv = bat_priv;
669 memcpy(orig_node->orig, addr, ETH_ALEN);
670 batadv_dat_init_orig_node_addr(orig_node);
671 atomic_set(&orig_node->last_ttvn, 0);
672 orig_node->tt_buff = NULL;
673 orig_node->tt_buff_len = 0;
674 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
675 orig_node->bcast_seqno_reset = reset_time;
676
677 /* create a vlan object for the "untagged" LAN */
678 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
679 if (!vlan)
680 goto free_orig_node;
681 /* batadv_orig_node_vlan_new() increases the refcounter.
682 * Immediately release vlan since it is not needed anymore in this
683 * context
684 */
685 batadv_orig_node_vlan_free_ref(vlan);
686
687 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
688 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
689 spin_lock_init(&orig_node->fragments[i].lock);
690 orig_node->fragments[i].size = 0;
691 }
692
693 return orig_node;
694 free_orig_node:
695 kfree(orig_node);
696 return NULL;
697 }
698
699 /**
700 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
701 * @bat_priv: the bat priv with all the soft interface information
702 * @orig_node: orig node which is to be checked
703 *
704 * Returns true if any ifinfo entry was purged, false otherwise.
705 */
706 static bool
707 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
708 struct batadv_orig_node *orig_node)
709 {
710 struct batadv_orig_ifinfo *orig_ifinfo;
711 struct batadv_hard_iface *if_outgoing;
712 struct hlist_node *node_tmp;
713 bool ifinfo_purged = false;
714
715 spin_lock_bh(&orig_node->neigh_list_lock);
716
717 /* for all ifinfo objects for this originator */
718 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
719 &orig_node->ifinfo_list, list) {
720 if_outgoing = orig_ifinfo->if_outgoing;
721
722 /* always keep the default interface */
723 if (if_outgoing == BATADV_IF_DEFAULT)
724 continue;
725
726 /* don't purge if the interface is not (going) down */
727 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
728 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
729 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
730 continue;
731
732 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
733 "router/ifinfo purge: originator %pM, iface: %s\n",
734 orig_node->orig, if_outgoing->net_dev->name);
735
736 ifinfo_purged = true;
737
738 hlist_del_rcu(&orig_ifinfo->list);
739 batadv_orig_ifinfo_free_ref(orig_ifinfo);
740 if (orig_node->last_bonding_candidate == orig_ifinfo) {
741 orig_node->last_bonding_candidate = NULL;
742 batadv_orig_ifinfo_free_ref(orig_ifinfo);
743 }
744 }
745
746 spin_unlock_bh(&orig_node->neigh_list_lock);
747
748 return ifinfo_purged;
749 }
750
751
752 /**
753 * batadv_purge_orig_neighbors - purges neighbors from originator
754 * @bat_priv: the bat priv with all the soft interface information
755 * @orig_node: orig node which is to be checked
756 *
757 * Returns true if any neighbor was purged, false otherwise
758 */
759 static bool
760 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
761 struct batadv_orig_node *orig_node)
762 {
763 struct hlist_node *node_tmp;
764 struct batadv_neigh_node *neigh_node;
765 bool neigh_purged = false;
766 unsigned long last_seen;
767 struct batadv_hard_iface *if_incoming;
768
769 spin_lock_bh(&orig_node->neigh_list_lock);
770
771 /* for all neighbors towards this originator ... */
772 hlist_for_each_entry_safe(neigh_node, node_tmp,
773 &orig_node->neigh_list, list) {
774 last_seen = neigh_node->last_seen;
775 if_incoming = neigh_node->if_incoming;
776
777 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
778 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
779 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
780 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
781 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
782 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
783 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
784 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
785 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
786 orig_node->orig, neigh_node->addr,
787 if_incoming->net_dev->name);
788 else
789 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
790 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
791 orig_node->orig, neigh_node->addr,
792 jiffies_to_msecs(last_seen));
793
794 neigh_purged = true;
795
796 hlist_del_rcu(&neigh_node->list);
797 batadv_neigh_node_free_ref(neigh_node);
798 }
799 }
800
801 spin_unlock_bh(&orig_node->neigh_list_lock);
802 return neigh_purged;
803 }
804
805 /**
806 * batadv_find_best_neighbor - finds the best neighbor after purging
807 * @bat_priv: the bat priv with all the soft interface information
808 * @orig_node: orig node which is to be checked
809 * @if_outgoing: the interface for which the metric should be compared
810 *
811 * Returns the current best neighbor, with refcount increased.
812 */
813 static struct batadv_neigh_node *
814 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
815 struct batadv_orig_node *orig_node,
816 struct batadv_hard_iface *if_outgoing)
817 {
818 struct batadv_neigh_node *best = NULL, *neigh;
819 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
820
821 rcu_read_lock();
822 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
823 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
824 best, if_outgoing) <= 0))
825 continue;
826
827 if (!atomic_inc_not_zero(&neigh->refcount))
828 continue;
829
830 if (best)
831 batadv_neigh_node_free_ref(best);
832
833 best = neigh;
834 }
835 rcu_read_unlock();
836
837 return best;
838 }
839
840 /**
841 * batadv_purge_orig_node - purges obsolete information from an orig_node
842 * @bat_priv: the bat priv with all the soft interface information
843 * @orig_node: orig node which is to be checked
844 *
845 * This function checks if the orig_node or substructures of it have become
846 * obsolete, and purges this information if that's the case.
847 *
848 * Returns true if the orig_node is to be removed, false otherwise.
849 */
850 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
851 struct batadv_orig_node *orig_node)
852 {
853 struct batadv_neigh_node *best_neigh_node;
854 struct batadv_hard_iface *hard_iface;
855 bool changed;
856
857 if (batadv_has_timed_out(orig_node->last_seen,
858 2 * BATADV_PURGE_TIMEOUT)) {
859 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
860 "Originator timeout: originator %pM, last_seen %u\n",
861 orig_node->orig,
862 jiffies_to_msecs(orig_node->last_seen));
863 return true;
864 }
865 changed = batadv_purge_orig_ifinfo(bat_priv, orig_node);
866 changed = changed || batadv_purge_orig_neighbors(bat_priv, orig_node);
867
868 if (!changed)
869 return false;
870
871 /* first for NULL ... */
872 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
873 BATADV_IF_DEFAULT);
874 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
875 best_neigh_node);
876 if (best_neigh_node)
877 batadv_neigh_node_free_ref(best_neigh_node);
878
879 /* ... then for all other interfaces. */
880 rcu_read_lock();
881 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
882 if (hard_iface->if_status != BATADV_IF_ACTIVE)
883 continue;
884
885 if (hard_iface->soft_iface != bat_priv->soft_iface)
886 continue;
887
888 best_neigh_node = batadv_find_best_neighbor(bat_priv,
889 orig_node,
890 hard_iface);
891 batadv_update_route(bat_priv, orig_node, hard_iface,
892 best_neigh_node);
893 if (best_neigh_node)
894 batadv_neigh_node_free_ref(best_neigh_node);
895 }
896 rcu_read_unlock();
897
898 return false;
899 }
900
901 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
902 {
903 struct batadv_hashtable *hash = bat_priv->orig_hash;
904 struct hlist_node *node_tmp;
905 struct hlist_head *head;
906 spinlock_t *list_lock; /* spinlock to protect write access */
907 struct batadv_orig_node *orig_node;
908 uint32_t i;
909
910 if (!hash)
911 return;
912
913 /* for all origins... */
914 for (i = 0; i < hash->size; i++) {
915 head = &hash->table[i];
916 list_lock = &hash->list_locks[i];
917
918 spin_lock_bh(list_lock);
919 hlist_for_each_entry_safe(orig_node, node_tmp,
920 head, hash_entry) {
921 if (batadv_purge_orig_node(bat_priv, orig_node)) {
922 batadv_gw_node_delete(bat_priv, orig_node);
923 hlist_del_rcu(&orig_node->hash_entry);
924 batadv_orig_node_free_ref(orig_node);
925 continue;
926 }
927
928 batadv_frag_purge_orig(orig_node,
929 batadv_frag_check_entry);
930 }
931 spin_unlock_bh(list_lock);
932 }
933
934 batadv_gw_node_purge(bat_priv);
935 batadv_gw_election(bat_priv);
936 }
937
938 static void batadv_purge_orig(struct work_struct *work)
939 {
940 struct delayed_work *delayed_work;
941 struct batadv_priv *bat_priv;
942
943 delayed_work = container_of(work, struct delayed_work, work);
944 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
945 _batadv_purge_orig(bat_priv);
946 queue_delayed_work(batadv_event_workqueue,
947 &bat_priv->orig_work,
948 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
949 }
950
951 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
952 {
953 _batadv_purge_orig(bat_priv);
954 }
955
956 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
957 {
958 struct net_device *net_dev = (struct net_device *)seq->private;
959 struct batadv_priv *bat_priv = netdev_priv(net_dev);
960 struct batadv_hard_iface *primary_if;
961
962 primary_if = batadv_seq_print_text_primary_if_get(seq);
963 if (!primary_if)
964 return 0;
965
966 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
967 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
968 primary_if->net_dev->dev_addr, net_dev->name,
969 bat_priv->bat_algo_ops->name);
970
971 batadv_hardif_free_ref(primary_if);
972
973 if (!bat_priv->bat_algo_ops->bat_orig_print) {
974 seq_puts(seq,
975 "No printing function for this routing protocol\n");
976 return 0;
977 }
978
979 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
980 BATADV_IF_DEFAULT);
981
982 return 0;
983 }
984
985 /**
986 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
987 * outgoing interface
988 * @seq: debugfs table seq_file struct
989 * @offset: not used
990 *
991 * Returns 0
992 */
993 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
994 {
995 struct net_device *net_dev = (struct net_device *)seq->private;
996 struct batadv_hard_iface *hard_iface;
997 struct batadv_priv *bat_priv;
998
999 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1000
1001 if (!hard_iface || !hard_iface->soft_iface) {
1002 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1003 goto out;
1004 }
1005
1006 bat_priv = netdev_priv(hard_iface->soft_iface);
1007 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1008 seq_puts(seq,
1009 "No printing function for this routing protocol\n");
1010 goto out;
1011 }
1012
1013 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1014 seq_puts(seq, "Interface not active\n");
1015 goto out;
1016 }
1017
1018 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1019 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1020 hard_iface->net_dev->dev_addr,
1021 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1022
1023 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1024
1025 out:
1026 batadv_hardif_free_ref(hard_iface);
1027 return 0;
1028 }
1029
1030 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1031 int max_if_num)
1032 {
1033 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1034 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1035 struct batadv_hashtable *hash = bat_priv->orig_hash;
1036 struct hlist_head *head;
1037 struct batadv_orig_node *orig_node;
1038 uint32_t i;
1039 int ret;
1040
1041 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1042 * if_num
1043 */
1044 for (i = 0; i < hash->size; i++) {
1045 head = &hash->table[i];
1046
1047 rcu_read_lock();
1048 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1049 ret = 0;
1050 if (bao->bat_orig_add_if)
1051 ret = bao->bat_orig_add_if(orig_node,
1052 max_if_num);
1053 if (ret == -ENOMEM)
1054 goto err;
1055 }
1056 rcu_read_unlock();
1057 }
1058
1059 return 0;
1060
1061 err:
1062 rcu_read_unlock();
1063 return -ENOMEM;
1064 }
1065
1066 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1067 int max_if_num)
1068 {
1069 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1070 struct batadv_hashtable *hash = bat_priv->orig_hash;
1071 struct hlist_head *head;
1072 struct batadv_hard_iface *hard_iface_tmp;
1073 struct batadv_orig_node *orig_node;
1074 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1075 uint32_t i;
1076 int ret;
1077
1078 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1079 * if_num
1080 */
1081 for (i = 0; i < hash->size; i++) {
1082 head = &hash->table[i];
1083
1084 rcu_read_lock();
1085 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1086 ret = 0;
1087 if (bao->bat_orig_del_if)
1088 ret = bao->bat_orig_del_if(orig_node,
1089 max_if_num,
1090 hard_iface->if_num);
1091 if (ret == -ENOMEM)
1092 goto err;
1093 }
1094 rcu_read_unlock();
1095 }
1096
1097 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1098 rcu_read_lock();
1099 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1100 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1101 continue;
1102
1103 if (hard_iface == hard_iface_tmp)
1104 continue;
1105
1106 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1107 continue;
1108
1109 if (hard_iface_tmp->if_num > hard_iface->if_num)
1110 hard_iface_tmp->if_num--;
1111 }
1112 rcu_read_unlock();
1113
1114 hard_iface->if_num = -1;
1115 return 0;
1116
1117 err:
1118 rcu_read_unlock();
1119 return -ENOMEM;
1120 }
This page took 0.057297 seconds and 5 git commands to generate.