mips: reserve elfcorehdr
[deliverable/linux.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "originator.h"
23 #include "hash.h"
24 #include "translation-table.h"
25 #include "routing.h"
26 #include "gateway_client.h"
27 #include "hard-interface.h"
28 #include "unicast.h"
29 #include "soft-interface.h"
30 #include "bridge_loop_avoidance.h"
31
32 static void batadv_purge_orig(struct work_struct *work);
33
34 static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
35 {
36 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
37 queue_delayed_work(batadv_event_workqueue,
38 &bat_priv->orig_work, msecs_to_jiffies(1000));
39 }
40
41 /* returns 1 if they are the same originator */
42 static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
43 {
44 const void *data1 = container_of(node, struct batadv_orig_node,
45 hash_entry);
46
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48 }
49
50 int batadv_originator_init(struct batadv_priv *bat_priv)
51 {
52 if (bat_priv->orig_hash)
53 return 0;
54
55 bat_priv->orig_hash = batadv_hash_new(1024);
56
57 if (!bat_priv->orig_hash)
58 goto err;
59
60 batadv_start_purge_timer(bat_priv);
61 return 0;
62
63 err:
64 return -ENOMEM;
65 }
66
67 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
68 {
69 if (atomic_dec_and_test(&neigh_node->refcount))
70 kfree_rcu(neigh_node, rcu);
71 }
72
73 /* increases the refcounter of a found router */
74 struct batadv_neigh_node *
75 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
76 {
77 struct batadv_neigh_node *router;
78
79 rcu_read_lock();
80 router = rcu_dereference(orig_node->router);
81
82 if (router && !atomic_inc_not_zero(&router->refcount))
83 router = NULL;
84
85 rcu_read_unlock();
86 return router;
87 }
88
89 struct batadv_neigh_node *
90 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
91 const uint8_t *neigh_addr, uint32_t seqno)
92 {
93 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
94 struct batadv_neigh_node *neigh_node;
95
96 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
97 if (!neigh_node)
98 goto out;
99
100 INIT_HLIST_NODE(&neigh_node->list);
101
102 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
103 spin_lock_init(&neigh_node->lq_update_lock);
104
105 /* extra reference for return */
106 atomic_set(&neigh_node->refcount, 2);
107
108 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
109 "Creating new neighbor %pM, initial seqno %d\n",
110 neigh_addr, seqno);
111
112 out:
113 return neigh_node;
114 }
115
116 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
117 {
118 struct hlist_node *node, *node_tmp;
119 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
120 struct batadv_orig_node *orig_node;
121
122 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
123
124 spin_lock_bh(&orig_node->neigh_list_lock);
125
126 /* for all bonding members ... */
127 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
128 &orig_node->bond_list, bonding_list) {
129 list_del_rcu(&neigh_node->bonding_list);
130 batadv_neigh_node_free_ref(neigh_node);
131 }
132
133 /* for all neighbors towards this originator ... */
134 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
135 &orig_node->neigh_list, list) {
136 hlist_del_rcu(&neigh_node->list);
137 batadv_neigh_node_free_ref(neigh_node);
138 }
139
140 spin_unlock_bh(&orig_node->neigh_list_lock);
141
142 batadv_frag_list_free(&orig_node->frag_list);
143 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
144 "originator timed out");
145
146 kfree(orig_node->tt_buff);
147 kfree(orig_node->bcast_own);
148 kfree(orig_node->bcast_own_sum);
149 kfree(orig_node);
150 }
151
152 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
153 {
154 if (atomic_dec_and_test(&orig_node->refcount))
155 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
156 }
157
158 void batadv_originator_free(struct batadv_priv *bat_priv)
159 {
160 struct batadv_hashtable *hash = bat_priv->orig_hash;
161 struct hlist_node *node, *node_tmp;
162 struct hlist_head *head;
163 spinlock_t *list_lock; /* spinlock to protect write access */
164 struct batadv_orig_node *orig_node;
165 uint32_t i;
166
167 if (!hash)
168 return;
169
170 cancel_delayed_work_sync(&bat_priv->orig_work);
171
172 bat_priv->orig_hash = NULL;
173
174 for (i = 0; i < hash->size; i++) {
175 head = &hash->table[i];
176 list_lock = &hash->list_locks[i];
177
178 spin_lock_bh(list_lock);
179 hlist_for_each_entry_safe(orig_node, node, node_tmp,
180 head, hash_entry) {
181
182 hlist_del_rcu(node);
183 batadv_orig_node_free_ref(orig_node);
184 }
185 spin_unlock_bh(list_lock);
186 }
187
188 batadv_hash_destroy(hash);
189 }
190
191 /* this function finds or creates an originator entry for the given
192 * address if it does not exits
193 */
194 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
195 const uint8_t *addr)
196 {
197 struct batadv_orig_node *orig_node;
198 int size;
199 int hash_added;
200 unsigned long reset_time;
201
202 orig_node = batadv_orig_hash_find(bat_priv, addr);
203 if (orig_node)
204 return orig_node;
205
206 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
207 "Creating new originator: %pM\n", addr);
208
209 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
210 if (!orig_node)
211 return NULL;
212
213 INIT_HLIST_HEAD(&orig_node->neigh_list);
214 INIT_LIST_HEAD(&orig_node->bond_list);
215 spin_lock_init(&orig_node->ogm_cnt_lock);
216 spin_lock_init(&orig_node->bcast_seqno_lock);
217 spin_lock_init(&orig_node->neigh_list_lock);
218 spin_lock_init(&orig_node->tt_buff_lock);
219
220 /* extra reference for return */
221 atomic_set(&orig_node->refcount, 2);
222
223 orig_node->tt_initialised = false;
224 orig_node->bat_priv = bat_priv;
225 memcpy(orig_node->orig, addr, ETH_ALEN);
226 batadv_dat_init_orig_node_addr(orig_node);
227 orig_node->router = NULL;
228 orig_node->tt_crc = 0;
229 atomic_set(&orig_node->last_ttvn, 0);
230 orig_node->tt_buff = NULL;
231 orig_node->tt_buff_len = 0;
232 atomic_set(&orig_node->tt_size, 0);
233 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
234 orig_node->bcast_seqno_reset = reset_time;
235 orig_node->batman_seqno_reset = reset_time;
236
237 atomic_set(&orig_node->bond_candidates, 0);
238
239 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
240
241 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
242 if (!orig_node->bcast_own)
243 goto free_orig_node;
244
245 size = bat_priv->num_ifaces * sizeof(uint8_t);
246 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
247
248 INIT_LIST_HEAD(&orig_node->frag_list);
249 orig_node->last_frag_packet = 0;
250
251 if (!orig_node->bcast_own_sum)
252 goto free_bcast_own;
253
254 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
255 batadv_choose_orig, orig_node,
256 &orig_node->hash_entry);
257 if (hash_added != 0)
258 goto free_bcast_own_sum;
259
260 return orig_node;
261 free_bcast_own_sum:
262 kfree(orig_node->bcast_own_sum);
263 free_bcast_own:
264 kfree(orig_node->bcast_own);
265 free_orig_node:
266 kfree(orig_node);
267 return NULL;
268 }
269
270 static bool
271 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
272 struct batadv_orig_node *orig_node,
273 struct batadv_neigh_node **best_neigh_node)
274 {
275 struct hlist_node *node, *node_tmp;
276 struct batadv_neigh_node *neigh_node;
277 bool neigh_purged = false;
278 unsigned long last_seen;
279 struct batadv_hard_iface *if_incoming;
280
281 *best_neigh_node = NULL;
282
283 spin_lock_bh(&orig_node->neigh_list_lock);
284
285 /* for all neighbors towards this originator ... */
286 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
287 &orig_node->neigh_list, list) {
288
289 last_seen = neigh_node->last_seen;
290 if_incoming = neigh_node->if_incoming;
291
292 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
293 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
294 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
295 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
296
297 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
298 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
299 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
300 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
301 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
302 orig_node->orig, neigh_node->addr,
303 if_incoming->net_dev->name);
304 else
305 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
306 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
307 orig_node->orig, neigh_node->addr,
308 jiffies_to_msecs(last_seen));
309
310 neigh_purged = true;
311
312 hlist_del_rcu(&neigh_node->list);
313 batadv_bonding_candidate_del(orig_node, neigh_node);
314 batadv_neigh_node_free_ref(neigh_node);
315 } else {
316 if ((!*best_neigh_node) ||
317 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
318 *best_neigh_node = neigh_node;
319 }
320 }
321
322 spin_unlock_bh(&orig_node->neigh_list_lock);
323 return neigh_purged;
324 }
325
326 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
327 struct batadv_orig_node *orig_node)
328 {
329 struct batadv_neigh_node *best_neigh_node;
330
331 if (batadv_has_timed_out(orig_node->last_seen,
332 2 * BATADV_PURGE_TIMEOUT)) {
333 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
334 "Originator timeout: originator %pM, last_seen %u\n",
335 orig_node->orig,
336 jiffies_to_msecs(orig_node->last_seen));
337 return true;
338 } else {
339 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
340 &best_neigh_node))
341 batadv_update_route(bat_priv, orig_node,
342 best_neigh_node);
343 }
344
345 return false;
346 }
347
348 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
349 {
350 struct batadv_hashtable *hash = bat_priv->orig_hash;
351 struct hlist_node *node, *node_tmp;
352 struct hlist_head *head;
353 spinlock_t *list_lock; /* spinlock to protect write access */
354 struct batadv_orig_node *orig_node;
355 uint32_t i;
356
357 if (!hash)
358 return;
359
360 /* for all origins... */
361 for (i = 0; i < hash->size; i++) {
362 head = &hash->table[i];
363 list_lock = &hash->list_locks[i];
364
365 spin_lock_bh(list_lock);
366 hlist_for_each_entry_safe(orig_node, node, node_tmp,
367 head, hash_entry) {
368 if (batadv_purge_orig_node(bat_priv, orig_node)) {
369 if (orig_node->gw_flags)
370 batadv_gw_node_delete(bat_priv,
371 orig_node);
372 hlist_del_rcu(node);
373 batadv_orig_node_free_ref(orig_node);
374 continue;
375 }
376
377 if (batadv_has_timed_out(orig_node->last_frag_packet,
378 BATADV_FRAG_TIMEOUT))
379 batadv_frag_list_free(&orig_node->frag_list);
380 }
381 spin_unlock_bh(list_lock);
382 }
383
384 batadv_gw_node_purge(bat_priv);
385 batadv_gw_election(bat_priv);
386 }
387
388 static void batadv_purge_orig(struct work_struct *work)
389 {
390 struct delayed_work *delayed_work;
391 struct batadv_priv *bat_priv;
392
393 delayed_work = container_of(work, struct delayed_work, work);
394 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
395 _batadv_purge_orig(bat_priv);
396 batadv_start_purge_timer(bat_priv);
397 }
398
399 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
400 {
401 _batadv_purge_orig(bat_priv);
402 }
403
404 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
405 {
406 struct net_device *net_dev = (struct net_device *)seq->private;
407 struct batadv_priv *bat_priv = netdev_priv(net_dev);
408 struct batadv_hashtable *hash = bat_priv->orig_hash;
409 struct hlist_node *node, *node_tmp;
410 struct hlist_head *head;
411 struct batadv_hard_iface *primary_if;
412 struct batadv_orig_node *orig_node;
413 struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
414 int batman_count = 0;
415 int last_seen_secs;
416 int last_seen_msecs;
417 unsigned long last_seen_jiffies;
418 uint32_t i;
419
420 primary_if = batadv_seq_print_text_primary_if_get(seq);
421 if (!primary_if)
422 goto out;
423
424 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
425 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
426 primary_if->net_dev->dev_addr, net_dev->name);
427 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
428 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
429 "Nexthop", "outgoingIF", "Potential nexthops");
430
431 for (i = 0; i < hash->size; i++) {
432 head = &hash->table[i];
433
434 rcu_read_lock();
435 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
436 neigh_node = batadv_orig_node_get_router(orig_node);
437 if (!neigh_node)
438 continue;
439
440 if (neigh_node->tq_avg == 0)
441 goto next;
442
443 last_seen_jiffies = jiffies - orig_node->last_seen;
444 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
445 last_seen_secs = last_seen_msecs / 1000;
446 last_seen_msecs = last_seen_msecs % 1000;
447
448 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
449 orig_node->orig, last_seen_secs,
450 last_seen_msecs, neigh_node->tq_avg,
451 neigh_node->addr,
452 neigh_node->if_incoming->net_dev->name);
453
454 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
455 &orig_node->neigh_list, list) {
456 seq_printf(seq, " %pM (%3i)",
457 neigh_node_tmp->addr,
458 neigh_node_tmp->tq_avg);
459 }
460
461 seq_printf(seq, "\n");
462 batman_count++;
463
464 next:
465 batadv_neigh_node_free_ref(neigh_node);
466 }
467 rcu_read_unlock();
468 }
469
470 if (batman_count == 0)
471 seq_printf(seq, "No batman nodes in range ...\n");
472
473 out:
474 if (primary_if)
475 batadv_hardif_free_ref(primary_if);
476 return 0;
477 }
478
479 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
480 int max_if_num)
481 {
482 void *data_ptr;
483 size_t data_size, old_size;
484
485 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
486 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
487 data_ptr = kmalloc(data_size, GFP_ATOMIC);
488 if (!data_ptr)
489 return -ENOMEM;
490
491 memcpy(data_ptr, orig_node->bcast_own, old_size);
492 kfree(orig_node->bcast_own);
493 orig_node->bcast_own = data_ptr;
494
495 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
496 if (!data_ptr)
497 return -ENOMEM;
498
499 memcpy(data_ptr, orig_node->bcast_own_sum,
500 (max_if_num - 1) * sizeof(uint8_t));
501 kfree(orig_node->bcast_own_sum);
502 orig_node->bcast_own_sum = data_ptr;
503
504 return 0;
505 }
506
507 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
508 int max_if_num)
509 {
510 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
511 struct batadv_hashtable *hash = bat_priv->orig_hash;
512 struct hlist_node *node;
513 struct hlist_head *head;
514 struct batadv_orig_node *orig_node;
515 uint32_t i;
516 int ret;
517
518 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
519 * if_num
520 */
521 for (i = 0; i < hash->size; i++) {
522 head = &hash->table[i];
523
524 rcu_read_lock();
525 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
526 spin_lock_bh(&orig_node->ogm_cnt_lock);
527 ret = batadv_orig_node_add_if(orig_node, max_if_num);
528 spin_unlock_bh(&orig_node->ogm_cnt_lock);
529
530 if (ret == -ENOMEM)
531 goto err;
532 }
533 rcu_read_unlock();
534 }
535
536 return 0;
537
538 err:
539 rcu_read_unlock();
540 return -ENOMEM;
541 }
542
543 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
544 int max_if_num, int del_if_num)
545 {
546 void *data_ptr = NULL;
547 int chunk_size;
548
549 /* last interface was removed */
550 if (max_if_num == 0)
551 goto free_bcast_own;
552
553 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
554 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
555 if (!data_ptr)
556 return -ENOMEM;
557
558 /* copy first part */
559 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
560
561 /* copy second part */
562 memcpy((char *)data_ptr + del_if_num * chunk_size,
563 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
564 (max_if_num - del_if_num) * chunk_size);
565
566 free_bcast_own:
567 kfree(orig_node->bcast_own);
568 orig_node->bcast_own = data_ptr;
569
570 if (max_if_num == 0)
571 goto free_own_sum;
572
573 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
574 if (!data_ptr)
575 return -ENOMEM;
576
577 memcpy(data_ptr, orig_node->bcast_own_sum,
578 del_if_num * sizeof(uint8_t));
579
580 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
581 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
582 (max_if_num - del_if_num) * sizeof(uint8_t));
583
584 free_own_sum:
585 kfree(orig_node->bcast_own_sum);
586 orig_node->bcast_own_sum = data_ptr;
587
588 return 0;
589 }
590
591 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
592 int max_if_num)
593 {
594 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
595 struct batadv_hashtable *hash = bat_priv->orig_hash;
596 struct hlist_node *node;
597 struct hlist_head *head;
598 struct batadv_hard_iface *hard_iface_tmp;
599 struct batadv_orig_node *orig_node;
600 uint32_t i;
601 int ret;
602
603 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
604 * if_num
605 */
606 for (i = 0; i < hash->size; i++) {
607 head = &hash->table[i];
608
609 rcu_read_lock();
610 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
611 spin_lock_bh(&orig_node->ogm_cnt_lock);
612 ret = batadv_orig_node_del_if(orig_node, max_if_num,
613 hard_iface->if_num);
614 spin_unlock_bh(&orig_node->ogm_cnt_lock);
615
616 if (ret == -ENOMEM)
617 goto err;
618 }
619 rcu_read_unlock();
620 }
621
622 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
623 rcu_read_lock();
624 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
625 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
626 continue;
627
628 if (hard_iface == hard_iface_tmp)
629 continue;
630
631 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
632 continue;
633
634 if (hard_iface_tmp->if_num > hard_iface->if_num)
635 hard_iface_tmp->if_num--;
636 }
637 rcu_read_unlock();
638
639 hard_iface->if_num = -1;
640 return 0;
641
642 err:
643 rcu_read_unlock();
644 return -ENOMEM;
645 }
This page took 0.045908 seconds and 5 git commands to generate.