batman-adv: beautify tt_global_add() argument list
[deliverable/linux.git] / net / batman-adv / translation-table.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "originator.h"
27 #include "routing.h"
28 #include "bridge_loop_avoidance.h"
29
30 #include <linux/crc16.h>
31
32 static void batadv_send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
33 struct orig_node *orig_node);
34 static void batadv_tt_purge(struct work_struct *work);
35 static void
36 batadv_tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
37
38 /* returns 1 if they are the same mac addr */
39 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
40 {
41 const void *data1 = container_of(node, struct tt_common_entry,
42 hash_entry);
43
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45 }
46
47 static void batadv_tt_start_timer(struct bat_priv *bat_priv)
48 {
49 INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
51 msecs_to_jiffies(5000));
52 }
53
54 static struct tt_common_entry *batadv_tt_hash_find(struct hashtable_t *hash,
55 const void *data)
56 {
57 struct hlist_head *head;
58 struct hlist_node *node;
59 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
60 uint32_t index;
61
62 if (!hash)
63 return NULL;
64
65 index = batadv_choose_orig(data, hash->size);
66 head = &hash->table[index];
67
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
70 if (!batadv_compare_eth(tt_common_entry, data))
71 continue;
72
73 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
74 continue;
75
76 tt_common_entry_tmp = tt_common_entry;
77 break;
78 }
79 rcu_read_unlock();
80
81 return tt_common_entry_tmp;
82 }
83
84 static struct tt_local_entry *
85 batadv_tt_local_hash_find(struct bat_priv *bat_priv, const void *data)
86 {
87 struct tt_common_entry *tt_common_entry;
88 struct tt_local_entry *tt_local_entry = NULL;
89
90 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
91 if (tt_common_entry)
92 tt_local_entry = container_of(tt_common_entry,
93 struct tt_local_entry, common);
94 return tt_local_entry;
95 }
96
97 static struct tt_global_entry *
98 batadv_tt_global_hash_find(struct bat_priv *bat_priv, const void *data)
99 {
100 struct tt_common_entry *tt_common_entry;
101 struct tt_global_entry *tt_global_entry = NULL;
102
103 tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
104 if (tt_common_entry)
105 tt_global_entry = container_of(tt_common_entry,
106 struct tt_global_entry, common);
107 return tt_global_entry;
108
109 }
110
111 static void
112 batadv_tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
113 {
114 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
115 kfree_rcu(tt_local_entry, common.rcu);
116 }
117
118 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
119 {
120 struct tt_common_entry *tt_common_entry;
121 struct tt_global_entry *tt_global_entry;
122
123 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
125 common);
126
127 kfree(tt_global_entry);
128 }
129
130 static void
131 batadv_tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
132 {
133 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
134 batadv_tt_global_del_orig_list(tt_global_entry);
135 call_rcu(&tt_global_entry->common.rcu,
136 batadv_tt_global_entry_free_rcu);
137 }
138 }
139
140 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
141 {
142 struct tt_orig_list_entry *orig_entry;
143
144 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
145 batadv_orig_node_free_ref(orig_entry->orig_node);
146 kfree(orig_entry);
147 }
148
149 static void
150 batadv_tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
151 {
152 /* to avoid race conditions, immediately decrease the tt counter */
153 atomic_dec(&orig_entry->orig_node->tt_size);
154 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
155 }
156
157 static void batadv_tt_local_event(struct bat_priv *bat_priv,
158 const uint8_t *addr, uint8_t flags)
159 {
160 struct tt_change_node *tt_change_node, *entry, *safe;
161 bool event_removed = false;
162 bool del_op_requested, del_op_entry;
163
164 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
165
166 if (!tt_change_node)
167 return;
168
169 tt_change_node->change.flags = flags;
170 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
171
172 del_op_requested = flags & TT_CLIENT_DEL;
173
174 /* check for ADD+DEL or DEL+ADD events */
175 spin_lock_bh(&bat_priv->tt_changes_list_lock);
176 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
177 list) {
178 if (!batadv_compare_eth(entry->change.addr, addr))
179 continue;
180
181 /* DEL+ADD in the same orig interval have no effect and can be
182 * removed to avoid silly behaviour on the receiver side. The
183 * other way around (ADD+DEL) can happen in case of roaming of
184 * a client still in the NEW state. Roaming of NEW clients is
185 * now possible due to automatically recognition of "temporary"
186 * clients
187 */
188 del_op_entry = entry->change.flags & TT_CLIENT_DEL;
189 if (!del_op_requested && del_op_entry)
190 goto del;
191 if (del_op_requested && !del_op_entry)
192 goto del;
193 continue;
194 del:
195 list_del(&entry->list);
196 kfree(entry);
197 event_removed = true;
198 goto unlock;
199 }
200
201 /* track the change in the OGMinterval list */
202 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
203
204 unlock:
205 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
206
207 if (event_removed)
208 atomic_dec(&bat_priv->tt_local_changes);
209 else
210 atomic_inc(&bat_priv->tt_local_changes);
211 }
212
213 int batadv_tt_len(int changes_num)
214 {
215 return changes_num * sizeof(struct tt_change);
216 }
217
218 static int batadv_tt_local_init(struct bat_priv *bat_priv)
219 {
220 if (bat_priv->tt_local_hash)
221 return 0;
222
223 bat_priv->tt_local_hash = batadv_hash_new(1024);
224
225 if (!bat_priv->tt_local_hash)
226 return -ENOMEM;
227
228 return 0;
229 }
230
231 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
232 int ifindex)
233 {
234 struct bat_priv *bat_priv = netdev_priv(soft_iface);
235 struct tt_local_entry *tt_local_entry = NULL;
236 struct tt_global_entry *tt_global_entry = NULL;
237 struct hlist_head *head;
238 struct hlist_node *node;
239 struct tt_orig_list_entry *orig_entry;
240 int hash_added;
241
242 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
243
244 if (tt_local_entry) {
245 tt_local_entry->last_seen = jiffies;
246 /* possibly unset the TT_CLIENT_PENDING flag */
247 tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
248 goto out;
249 }
250
251 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
252 if (!tt_local_entry)
253 goto out;
254
255 batadv_dbg(DBG_TT, bat_priv,
256 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
257 (uint8_t)atomic_read(&bat_priv->ttvn));
258
259 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
260 tt_local_entry->common.flags = NO_FLAGS;
261 if (batadv_is_wifi_iface(ifindex))
262 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
263 atomic_set(&tt_local_entry->common.refcount, 2);
264 tt_local_entry->last_seen = jiffies;
265
266 /* the batman interface mac address should never be purged */
267 if (batadv_compare_eth(addr, soft_iface->dev_addr))
268 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
269
270 /* The local entry has to be marked as NEW to avoid to send it in
271 * a full table response going out before the next ttvn increment
272 * (consistency check)
273 */
274 tt_local_entry->common.flags |= TT_CLIENT_NEW;
275
276 hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
277 batadv_choose_orig,
278 &tt_local_entry->common,
279 &tt_local_entry->common.hash_entry);
280
281 if (unlikely(hash_added != 0)) {
282 /* remove the reference for the hash */
283 batadv_tt_local_entry_free_ref(tt_local_entry);
284 goto out;
285 }
286
287 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
288
289 /* remove address from global hash if present */
290 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
291
292 /* Check whether it is a roaming! */
293 if (tt_global_entry) {
294 /* These node are probably going to update their tt table */
295 head = &tt_global_entry->orig_list;
296 rcu_read_lock();
297 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
298 orig_entry->orig_node->tt_poss_change = true;
299
300 batadv_send_roam_adv(bat_priv,
301 tt_global_entry->common.addr,
302 orig_entry->orig_node);
303 }
304 rcu_read_unlock();
305 /* The global entry has to be marked as ROAMING and
306 * has to be kept for consistency purpose
307 */
308 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
309 tt_global_entry->roam_at = jiffies;
310 }
311 out:
312 if (tt_local_entry)
313 batadv_tt_local_entry_free_ref(tt_local_entry);
314 if (tt_global_entry)
315 batadv_tt_global_entry_free_ref(tt_global_entry);
316 }
317
318 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
319 int *packet_buff_len,
320 int min_packet_len,
321 int new_packet_len)
322 {
323 unsigned char *new_buff;
324
325 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
326
327 /* keep old buffer if kmalloc should fail */
328 if (new_buff) {
329 memcpy(new_buff, *packet_buff, min_packet_len);
330 kfree(*packet_buff);
331 *packet_buff = new_buff;
332 *packet_buff_len = new_packet_len;
333 }
334 }
335
336 static void batadv_tt_prepare_packet_buff(struct bat_priv *bat_priv,
337 unsigned char **packet_buff,
338 int *packet_buff_len,
339 int min_packet_len)
340 {
341 struct hard_iface *primary_if;
342 int req_len;
343
344 primary_if = batadv_primary_if_get_selected(bat_priv);
345
346 req_len = min_packet_len;
347 req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
348
349 /* if we have too many changes for one packet don't send any
350 * and wait for the tt table request which will be fragmented
351 */
352 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
353 req_len = min_packet_len;
354
355 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
356 min_packet_len, req_len);
357
358 if (primary_if)
359 batadv_hardif_free_ref(primary_if);
360 }
361
362 static int batadv_tt_changes_fill_buff(struct bat_priv *bat_priv,
363 unsigned char **packet_buff,
364 int *packet_buff_len,
365 int min_packet_len)
366 {
367 struct tt_change_node *entry, *safe;
368 int count = 0, tot_changes = 0, new_len;
369 unsigned char *tt_buff;
370
371 batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
372 packet_buff_len, min_packet_len);
373
374 new_len = *packet_buff_len - min_packet_len;
375 tt_buff = *packet_buff + min_packet_len;
376
377 if (new_len > 0)
378 tot_changes = new_len / batadv_tt_len(1);
379
380 spin_lock_bh(&bat_priv->tt_changes_list_lock);
381 atomic_set(&bat_priv->tt_local_changes, 0);
382
383 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
384 list) {
385 if (count < tot_changes) {
386 memcpy(tt_buff + batadv_tt_len(count),
387 &entry->change, sizeof(struct tt_change));
388 count++;
389 }
390 list_del(&entry->list);
391 kfree(entry);
392 }
393 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
394
395 /* Keep the buffer for possible tt_request */
396 spin_lock_bh(&bat_priv->tt_buff_lock);
397 kfree(bat_priv->tt_buff);
398 bat_priv->tt_buff_len = 0;
399 bat_priv->tt_buff = NULL;
400 /* check whether this new OGM has no changes due to size problems */
401 if (new_len > 0) {
402 /* if kmalloc() fails we will reply with the full table
403 * instead of providing the diff
404 */
405 bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
406 if (bat_priv->tt_buff) {
407 memcpy(bat_priv->tt_buff, tt_buff, new_len);
408 bat_priv->tt_buff_len = new_len;
409 }
410 }
411 spin_unlock_bh(&bat_priv->tt_buff_lock);
412
413 return count;
414 }
415
416 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
417 {
418 struct net_device *net_dev = (struct net_device *)seq->private;
419 struct bat_priv *bat_priv = netdev_priv(net_dev);
420 struct hashtable_t *hash = bat_priv->tt_local_hash;
421 struct tt_common_entry *tt_common_entry;
422 struct hard_iface *primary_if;
423 struct hlist_node *node;
424 struct hlist_head *head;
425 uint32_t i;
426 int ret = 0;
427
428 primary_if = batadv_primary_if_get_selected(bat_priv);
429 if (!primary_if) {
430 ret = seq_printf(seq,
431 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
432 net_dev->name);
433 goto out;
434 }
435
436 if (primary_if->if_status != IF_ACTIVE) {
437 ret = seq_printf(seq,
438 "BATMAN mesh %s disabled - primary interface not active\n",
439 net_dev->name);
440 goto out;
441 }
442
443 seq_printf(seq,
444 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
445 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
446
447 for (i = 0; i < hash->size; i++) {
448 head = &hash->table[i];
449
450 rcu_read_lock();
451 hlist_for_each_entry_rcu(tt_common_entry, node,
452 head, hash_entry) {
453 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
454 tt_common_entry->addr,
455 (tt_common_entry->flags &
456 TT_CLIENT_ROAM ? 'R' : '.'),
457 (tt_common_entry->flags &
458 TT_CLIENT_NOPURGE ? 'P' : '.'),
459 (tt_common_entry->flags &
460 TT_CLIENT_NEW ? 'N' : '.'),
461 (tt_common_entry->flags &
462 TT_CLIENT_PENDING ? 'X' : '.'),
463 (tt_common_entry->flags &
464 TT_CLIENT_WIFI ? 'W' : '.'));
465 }
466 rcu_read_unlock();
467 }
468 out:
469 if (primary_if)
470 batadv_hardif_free_ref(primary_if);
471 return ret;
472 }
473
474 static void batadv_tt_local_set_pending(struct bat_priv *bat_priv,
475 struct tt_local_entry *tt_local_entry,
476 uint16_t flags, const char *message)
477 {
478 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
479 tt_local_entry->common.flags | flags);
480
481 /* The local client has to be marked as "pending to be removed" but has
482 * to be kept in the table in order to send it in a full table
483 * response issued before the net ttvn increment (consistency check)
484 */
485 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
486
487 batadv_dbg(DBG_TT, bat_priv,
488 "Local tt entry (%pM) pending to be removed: %s\n",
489 tt_local_entry->common.addr, message);
490 }
491
492 void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
493 const char *message, bool roaming)
494 {
495 struct tt_local_entry *tt_local_entry = NULL;
496
497 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
498 if (!tt_local_entry)
499 goto out;
500
501 batadv_tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
502 (roaming ? TT_CLIENT_ROAM : NO_FLAGS),
503 message);
504 out:
505 if (tt_local_entry)
506 batadv_tt_local_entry_free_ref(tt_local_entry);
507 }
508
509 static void batadv_tt_local_purge(struct bat_priv *bat_priv)
510 {
511 struct hashtable_t *hash = bat_priv->tt_local_hash;
512 struct tt_local_entry *tt_local_entry;
513 struct tt_common_entry *tt_common_entry;
514 struct hlist_node *node, *node_tmp;
515 struct hlist_head *head;
516 spinlock_t *list_lock; /* protects write access to the hash lists */
517 uint32_t i;
518
519 for (i = 0; i < hash->size; i++) {
520 head = &hash->table[i];
521 list_lock = &hash->list_locks[i];
522
523 spin_lock_bh(list_lock);
524 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
525 head, hash_entry) {
526 tt_local_entry = container_of(tt_common_entry,
527 struct tt_local_entry,
528 common);
529 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
530 continue;
531
532 /* entry already marked for deletion */
533 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
534 continue;
535
536 if (!batadv_has_timed_out(tt_local_entry->last_seen,
537 TT_LOCAL_TIMEOUT))
538 continue;
539
540 batadv_tt_local_set_pending(bat_priv, tt_local_entry,
541 TT_CLIENT_DEL, "timed out");
542 }
543 spin_unlock_bh(list_lock);
544 }
545
546 }
547
548 static void batadv_tt_local_table_free(struct bat_priv *bat_priv)
549 {
550 struct hashtable_t *hash;
551 spinlock_t *list_lock; /* protects write access to the hash lists */
552 struct tt_common_entry *tt_common_entry;
553 struct tt_local_entry *tt_local_entry;
554 struct hlist_node *node, *node_tmp;
555 struct hlist_head *head;
556 uint32_t i;
557
558 if (!bat_priv->tt_local_hash)
559 return;
560
561 hash = bat_priv->tt_local_hash;
562
563 for (i = 0; i < hash->size; i++) {
564 head = &hash->table[i];
565 list_lock = &hash->list_locks[i];
566
567 spin_lock_bh(list_lock);
568 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
569 head, hash_entry) {
570 hlist_del_rcu(node);
571 tt_local_entry = container_of(tt_common_entry,
572 struct tt_local_entry,
573 common);
574 batadv_tt_local_entry_free_ref(tt_local_entry);
575 }
576 spin_unlock_bh(list_lock);
577 }
578
579 batadv_hash_destroy(hash);
580
581 bat_priv->tt_local_hash = NULL;
582 }
583
584 static int batadv_tt_global_init(struct bat_priv *bat_priv)
585 {
586 if (bat_priv->tt_global_hash)
587 return 0;
588
589 bat_priv->tt_global_hash = batadv_hash_new(1024);
590
591 if (!bat_priv->tt_global_hash)
592 return -ENOMEM;
593
594 return 0;
595 }
596
597 static void batadv_tt_changes_list_free(struct bat_priv *bat_priv)
598 {
599 struct tt_change_node *entry, *safe;
600
601 spin_lock_bh(&bat_priv->tt_changes_list_lock);
602
603 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
604 list) {
605 list_del(&entry->list);
606 kfree(entry);
607 }
608
609 atomic_set(&bat_priv->tt_local_changes, 0);
610 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
611 }
612
613 /* find out if an orig_node is already in the list of a tt_global_entry.
614 * returns 1 if found, 0 otherwise
615 */
616 static bool batadv_tt_global_entry_has_orig(const struct tt_global_entry *entry,
617 const struct orig_node *orig_node)
618 {
619 struct tt_orig_list_entry *tmp_orig_entry;
620 const struct hlist_head *head;
621 struct hlist_node *node;
622 bool found = false;
623
624 rcu_read_lock();
625 head = &entry->orig_list;
626 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
627 if (tmp_orig_entry->orig_node == orig_node) {
628 found = true;
629 break;
630 }
631 }
632 rcu_read_unlock();
633 return found;
634 }
635
636 static void
637 batadv_tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
638 struct orig_node *orig_node, int ttvn)
639 {
640 struct tt_orig_list_entry *orig_entry;
641
642 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
643 if (!orig_entry)
644 return;
645
646 INIT_HLIST_NODE(&orig_entry->list);
647 atomic_inc(&orig_node->refcount);
648 atomic_inc(&orig_node->tt_size);
649 orig_entry->orig_node = orig_node;
650 orig_entry->ttvn = ttvn;
651
652 spin_lock_bh(&tt_global_entry->list_lock);
653 hlist_add_head_rcu(&orig_entry->list,
654 &tt_global_entry->orig_list);
655 spin_unlock_bh(&tt_global_entry->list_lock);
656 }
657
658 /* caller must hold orig_node refcount */
659 int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
660 const unsigned char *tt_addr, uint8_t flags,
661 uint8_t ttvn)
662 {
663 struct tt_global_entry *tt_global_entry = NULL;
664 int ret = 0;
665 int hash_added;
666 struct tt_common_entry *common;
667
668 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
669
670 if (!tt_global_entry) {
671 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
672 if (!tt_global_entry)
673 goto out;
674
675 common = &tt_global_entry->common;
676 memcpy(common->addr, tt_addr, ETH_ALEN);
677
678 common->flags = flags;
679 tt_global_entry->roam_at = 0;
680 atomic_set(&common->refcount, 2);
681
682 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
683 spin_lock_init(&tt_global_entry->list_lock);
684
685 hash_added = batadv_hash_add(bat_priv->tt_global_hash,
686 batadv_compare_tt,
687 batadv_choose_orig, common,
688 &common->hash_entry);
689
690 if (unlikely(hash_added != 0)) {
691 /* remove the reference for the hash */
692 batadv_tt_global_entry_free_ref(tt_global_entry);
693 goto out_remove;
694 }
695
696 batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
697 ttvn);
698 } else {
699 /* there is already a global entry, use this one. */
700
701 /* If there is the TT_CLIENT_ROAM flag set, there is only one
702 * originator left in the list and we previously received a
703 * delete + roaming change for this originator.
704 *
705 * We should first delete the old originator before adding the
706 * new one.
707 */
708 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
709 batadv_tt_global_del_orig_list(tt_global_entry);
710 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
711 tt_global_entry->roam_at = 0;
712 }
713
714 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
715 orig_node))
716 batadv_tt_global_add_orig_entry(tt_global_entry,
717 orig_node, ttvn);
718 }
719
720 batadv_dbg(DBG_TT, bat_priv,
721 "Creating new global tt entry: %pM (via %pM)\n",
722 tt_global_entry->common.addr, orig_node->orig);
723
724 out_remove:
725 /* remove address from local hash if present */
726 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
727 "global tt received", flags & TT_CLIENT_ROAM);
728 ret = 1;
729 out:
730 if (tt_global_entry)
731 batadv_tt_global_entry_free_ref(tt_global_entry);
732 return ret;
733 }
734
735 /* print all orig nodes who announce the address for this global entry.
736 * it is assumed that the caller holds rcu_read_lock();
737 */
738 static void
739 batadv_tt_global_print_entry(struct tt_global_entry *tt_global_entry,
740 struct seq_file *seq)
741 {
742 struct hlist_head *head;
743 struct hlist_node *node;
744 struct tt_orig_list_entry *orig_entry;
745 struct tt_common_entry *tt_common_entry;
746 uint16_t flags;
747 uint8_t last_ttvn;
748
749 tt_common_entry = &tt_global_entry->common;
750
751 head = &tt_global_entry->orig_list;
752
753 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
754 flags = tt_common_entry->flags;
755 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
756 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
757 tt_global_entry->common.addr, orig_entry->ttvn,
758 orig_entry->orig_node->orig, last_ttvn,
759 (flags & TT_CLIENT_ROAM ? 'R' : '.'),
760 (flags & TT_CLIENT_WIFI ? 'W' : '.'));
761 }
762 }
763
764 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
765 {
766 struct net_device *net_dev = (struct net_device *)seq->private;
767 struct bat_priv *bat_priv = netdev_priv(net_dev);
768 struct hashtable_t *hash = bat_priv->tt_global_hash;
769 struct tt_common_entry *tt_common_entry;
770 struct tt_global_entry *tt_global_entry;
771 struct hard_iface *primary_if;
772 struct hlist_node *node;
773 struct hlist_head *head;
774 uint32_t i;
775 int ret = 0;
776
777 primary_if = batadv_primary_if_get_selected(bat_priv);
778 if (!primary_if) {
779 ret = seq_printf(seq,
780 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
781 net_dev->name);
782 goto out;
783 }
784
785 if (primary_if->if_status != IF_ACTIVE) {
786 ret = seq_printf(seq,
787 "BATMAN mesh %s disabled - primary interface not active\n",
788 net_dev->name);
789 goto out;
790 }
791
792 seq_printf(seq,
793 "Globally announced TT entries received via the mesh %s\n",
794 net_dev->name);
795 seq_printf(seq, " %-13s %s %-15s %s %s\n",
796 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
797
798 for (i = 0; i < hash->size; i++) {
799 head = &hash->table[i];
800
801 rcu_read_lock();
802 hlist_for_each_entry_rcu(tt_common_entry, node,
803 head, hash_entry) {
804 tt_global_entry = container_of(tt_common_entry,
805 struct tt_global_entry,
806 common);
807 batadv_tt_global_print_entry(tt_global_entry, seq);
808 }
809 rcu_read_unlock();
810 }
811 out:
812 if (primary_if)
813 batadv_hardif_free_ref(primary_if);
814 return ret;
815 }
816
817 /* deletes the orig list of a tt_global_entry */
818 static void
819 batadv_tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
820 {
821 struct hlist_head *head;
822 struct hlist_node *node, *safe;
823 struct tt_orig_list_entry *orig_entry;
824
825 spin_lock_bh(&tt_global_entry->list_lock);
826 head = &tt_global_entry->orig_list;
827 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
828 hlist_del_rcu(node);
829 batadv_tt_orig_list_entry_free_ref(orig_entry);
830 }
831 spin_unlock_bh(&tt_global_entry->list_lock);
832
833 }
834
835 static void
836 batadv_tt_global_del_orig_entry(struct bat_priv *bat_priv,
837 struct tt_global_entry *tt_global_entry,
838 struct orig_node *orig_node,
839 const char *message)
840 {
841 struct hlist_head *head;
842 struct hlist_node *node, *safe;
843 struct tt_orig_list_entry *orig_entry;
844
845 spin_lock_bh(&tt_global_entry->list_lock);
846 head = &tt_global_entry->orig_list;
847 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
848 if (orig_entry->orig_node == orig_node) {
849 batadv_dbg(DBG_TT, bat_priv,
850 "Deleting %pM from global tt entry %pM: %s\n",
851 orig_node->orig,
852 tt_global_entry->common.addr, message);
853 hlist_del_rcu(node);
854 batadv_tt_orig_list_entry_free_ref(orig_entry);
855 }
856 }
857 spin_unlock_bh(&tt_global_entry->list_lock);
858 }
859
860 static void batadv_tt_global_del_struct(struct bat_priv *bat_priv,
861 struct tt_global_entry *tt_global_entry,
862 const char *message)
863 {
864 batadv_dbg(DBG_TT, bat_priv, "Deleting global tt entry %pM: %s\n",
865 tt_global_entry->common.addr, message);
866
867 batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
868 batadv_choose_orig, tt_global_entry->common.addr);
869 batadv_tt_global_entry_free_ref(tt_global_entry);
870
871 }
872
873 /* If the client is to be deleted, we check if it is the last origantor entry
874 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
875 * otherwise we simply remove the originator scheduled for deletion.
876 */
877 static void
878 batadv_tt_global_del_roaming(struct bat_priv *bat_priv,
879 struct tt_global_entry *tt_global_entry,
880 struct orig_node *orig_node, const char *message)
881 {
882 bool last_entry = true;
883 struct hlist_head *head;
884 struct hlist_node *node;
885 struct tt_orig_list_entry *orig_entry;
886
887 /* no local entry exists, case 1:
888 * Check if this is the last one or if other entries exist.
889 */
890
891 rcu_read_lock();
892 head = &tt_global_entry->orig_list;
893 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
894 if (orig_entry->orig_node != orig_node) {
895 last_entry = false;
896 break;
897 }
898 }
899 rcu_read_unlock();
900
901 if (last_entry) {
902 /* its the last one, mark for roaming. */
903 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
904 tt_global_entry->roam_at = jiffies;
905 } else
906 /* there is another entry, we can simply delete this
907 * one and can still use the other one.
908 */
909 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
910 orig_node, message);
911 }
912
913
914
915 static void batadv_tt_global_del(struct bat_priv *bat_priv,
916 struct orig_node *orig_node,
917 const unsigned char *addr,
918 const char *message, bool roaming)
919 {
920 struct tt_global_entry *tt_global_entry = NULL;
921 struct tt_local_entry *local_entry = NULL;
922
923 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
924 if (!tt_global_entry)
925 goto out;
926
927 if (!roaming) {
928 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
929 orig_node, message);
930
931 if (hlist_empty(&tt_global_entry->orig_list))
932 batadv_tt_global_del_struct(bat_priv, tt_global_entry,
933 message);
934
935 goto out;
936 }
937
938 /* if we are deleting a global entry due to a roam
939 * event, there are two possibilities:
940 * 1) the client roamed from node A to node B => if there
941 * is only one originator left for this client, we mark
942 * it with TT_CLIENT_ROAM, we start a timer and we
943 * wait for node B to claim it. In case of timeout
944 * the entry is purged.
945 *
946 * If there are other originators left, we directly delete
947 * the originator.
948 * 2) the client roamed to us => we can directly delete
949 * the global entry, since it is useless now.
950 */
951 local_entry = batadv_tt_local_hash_find(bat_priv,
952 tt_global_entry->common.addr);
953 if (local_entry) {
954 /* local entry exists, case 2: client roamed to us. */
955 batadv_tt_global_del_orig_list(tt_global_entry);
956 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
957 } else
958 /* no local entry exists, case 1: check for roaming */
959 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
960 orig_node, message);
961
962
963 out:
964 if (tt_global_entry)
965 batadv_tt_global_entry_free_ref(tt_global_entry);
966 if (local_entry)
967 batadv_tt_local_entry_free_ref(local_entry);
968 }
969
970 void batadv_tt_global_del_orig(struct bat_priv *bat_priv,
971 struct orig_node *orig_node, const char *message)
972 {
973 struct tt_global_entry *global_entry;
974 struct tt_common_entry *tt_common_entry;
975 uint32_t i;
976 struct hashtable_t *hash = bat_priv->tt_global_hash;
977 struct hlist_node *node, *safe;
978 struct hlist_head *head;
979 spinlock_t *list_lock; /* protects write access to the hash lists */
980
981 if (!hash)
982 return;
983
984 for (i = 0; i < hash->size; i++) {
985 head = &hash->table[i];
986 list_lock = &hash->list_locks[i];
987
988 spin_lock_bh(list_lock);
989 hlist_for_each_entry_safe(tt_common_entry, node, safe,
990 head, hash_entry) {
991 global_entry = container_of(tt_common_entry,
992 struct tt_global_entry,
993 common);
994
995 batadv_tt_global_del_orig_entry(bat_priv, global_entry,
996 orig_node, message);
997
998 if (hlist_empty(&global_entry->orig_list)) {
999 batadv_dbg(DBG_TT, bat_priv,
1000 "Deleting global tt entry %pM: %s\n",
1001 global_entry->common.addr, message);
1002 hlist_del_rcu(node);
1003 batadv_tt_global_entry_free_ref(global_entry);
1004 }
1005 }
1006 spin_unlock_bh(list_lock);
1007 }
1008 orig_node->tt_initialised = false;
1009 }
1010
1011 static void batadv_tt_global_roam_purge(struct bat_priv *bat_priv)
1012 {
1013 struct hashtable_t *hash = bat_priv->tt_global_hash;
1014 struct tt_common_entry *tt_common_entry;
1015 struct tt_global_entry *tt_global_entry;
1016 struct hlist_node *node, *node_tmp;
1017 struct hlist_head *head;
1018 spinlock_t *list_lock; /* protects write access to the hash lists */
1019 uint32_t i;
1020
1021 for (i = 0; i < hash->size; i++) {
1022 head = &hash->table[i];
1023 list_lock = &hash->list_locks[i];
1024
1025 spin_lock_bh(list_lock);
1026 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1027 head, hash_entry) {
1028 tt_global_entry = container_of(tt_common_entry,
1029 struct tt_global_entry,
1030 common);
1031 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
1032 continue;
1033 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1034 TT_CLIENT_ROAM_TIMEOUT))
1035 continue;
1036
1037 batadv_dbg(DBG_TT, bat_priv,
1038 "Deleting global tt entry (%pM): Roaming timeout\n",
1039 tt_global_entry->common.addr);
1040
1041 hlist_del_rcu(node);
1042 batadv_tt_global_entry_free_ref(tt_global_entry);
1043 }
1044 spin_unlock_bh(list_lock);
1045 }
1046
1047 }
1048
1049 static void batadv_tt_global_table_free(struct bat_priv *bat_priv)
1050 {
1051 struct hashtable_t *hash;
1052 spinlock_t *list_lock; /* protects write access to the hash lists */
1053 struct tt_common_entry *tt_common_entry;
1054 struct tt_global_entry *tt_global_entry;
1055 struct hlist_node *node, *node_tmp;
1056 struct hlist_head *head;
1057 uint32_t i;
1058
1059 if (!bat_priv->tt_global_hash)
1060 return;
1061
1062 hash = bat_priv->tt_global_hash;
1063
1064 for (i = 0; i < hash->size; i++) {
1065 head = &hash->table[i];
1066 list_lock = &hash->list_locks[i];
1067
1068 spin_lock_bh(list_lock);
1069 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1070 head, hash_entry) {
1071 hlist_del_rcu(node);
1072 tt_global_entry = container_of(tt_common_entry,
1073 struct tt_global_entry,
1074 common);
1075 batadv_tt_global_entry_free_ref(tt_global_entry);
1076 }
1077 spin_unlock_bh(list_lock);
1078 }
1079
1080 batadv_hash_destroy(hash);
1081
1082 bat_priv->tt_global_hash = NULL;
1083 }
1084
1085 static bool _batadv_is_ap_isolated(struct tt_local_entry *tt_local_entry,
1086 struct tt_global_entry *tt_global_entry)
1087 {
1088 bool ret = false;
1089
1090 if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
1091 tt_global_entry->common.flags & TT_CLIENT_WIFI)
1092 ret = true;
1093
1094 return ret;
1095 }
1096
1097 struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv,
1098 const uint8_t *src,
1099 const uint8_t *addr)
1100 {
1101 struct tt_local_entry *tt_local_entry = NULL;
1102 struct tt_global_entry *tt_global_entry = NULL;
1103 struct orig_node *orig_node = NULL;
1104 struct neigh_node *router = NULL;
1105 struct hlist_head *head;
1106 struct hlist_node *node;
1107 struct tt_orig_list_entry *orig_entry;
1108 int best_tq;
1109
1110 if (src && atomic_read(&bat_priv->ap_isolation)) {
1111 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1112 if (!tt_local_entry)
1113 goto out;
1114 }
1115
1116 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1117 if (!tt_global_entry)
1118 goto out;
1119
1120 /* check whether the clients should not communicate due to AP
1121 * isolation
1122 */
1123 if (tt_local_entry &&
1124 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1125 goto out;
1126
1127 best_tq = 0;
1128
1129 rcu_read_lock();
1130 head = &tt_global_entry->orig_list;
1131 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1132 router = batadv_orig_node_get_router(orig_entry->orig_node);
1133 if (!router)
1134 continue;
1135
1136 if (router->tq_avg > best_tq) {
1137 orig_node = orig_entry->orig_node;
1138 best_tq = router->tq_avg;
1139 }
1140 batadv_neigh_node_free_ref(router);
1141 }
1142 /* found anything? */
1143 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1144 orig_node = NULL;
1145 rcu_read_unlock();
1146 out:
1147 if (tt_global_entry)
1148 batadv_tt_global_entry_free_ref(tt_global_entry);
1149 if (tt_local_entry)
1150 batadv_tt_local_entry_free_ref(tt_local_entry);
1151
1152 return orig_node;
1153 }
1154
1155 /* Calculates the checksum of the local table of a given orig_node */
1156 static uint16_t batadv_tt_global_crc(struct bat_priv *bat_priv,
1157 struct orig_node *orig_node)
1158 {
1159 uint16_t total = 0, total_one;
1160 struct hashtable_t *hash = bat_priv->tt_global_hash;
1161 struct tt_common_entry *tt_common_entry;
1162 struct tt_global_entry *tt_global_entry;
1163 struct hlist_node *node;
1164 struct hlist_head *head;
1165 uint32_t i;
1166 int j;
1167
1168 for (i = 0; i < hash->size; i++) {
1169 head = &hash->table[i];
1170
1171 rcu_read_lock();
1172 hlist_for_each_entry_rcu(tt_common_entry, node,
1173 head, hash_entry) {
1174 tt_global_entry = container_of(tt_common_entry,
1175 struct tt_global_entry,
1176 common);
1177 /* Roaming clients are in the global table for
1178 * consistency only. They don't have to be
1179 * taken into account while computing the
1180 * global crc
1181 */
1182 if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
1183 continue;
1184
1185 /* find out if this global entry is announced by this
1186 * originator
1187 */
1188 if (!batadv_tt_global_entry_has_orig(tt_global_entry,
1189 orig_node))
1190 continue;
1191
1192 total_one = 0;
1193 for (j = 0; j < ETH_ALEN; j++)
1194 total_one = crc16_byte(total_one,
1195 tt_global_entry->common.addr[j]);
1196 total ^= total_one;
1197 }
1198 rcu_read_unlock();
1199 }
1200
1201 return total;
1202 }
1203
1204 /* Calculates the checksum of the local table */
1205 static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv)
1206 {
1207 uint16_t total = 0, total_one;
1208 struct hashtable_t *hash = bat_priv->tt_local_hash;
1209 struct tt_common_entry *tt_common_entry;
1210 struct hlist_node *node;
1211 struct hlist_head *head;
1212 uint32_t i;
1213 int j;
1214
1215 for (i = 0; i < hash->size; i++) {
1216 head = &hash->table[i];
1217
1218 rcu_read_lock();
1219 hlist_for_each_entry_rcu(tt_common_entry, node,
1220 head, hash_entry) {
1221 /* not yet committed clients have not to be taken into
1222 * account while computing the CRC
1223 */
1224 if (tt_common_entry->flags & TT_CLIENT_NEW)
1225 continue;
1226 total_one = 0;
1227 for (j = 0; j < ETH_ALEN; j++)
1228 total_one = crc16_byte(total_one,
1229 tt_common_entry->addr[j]);
1230 total ^= total_one;
1231 }
1232 rcu_read_unlock();
1233 }
1234
1235 return total;
1236 }
1237
1238 static void batadv_tt_req_list_free(struct bat_priv *bat_priv)
1239 {
1240 struct tt_req_node *node, *safe;
1241
1242 spin_lock_bh(&bat_priv->tt_req_list_lock);
1243
1244 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1245 list_del(&node->list);
1246 kfree(node);
1247 }
1248
1249 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1250 }
1251
1252 static void batadv_tt_save_orig_buffer(struct bat_priv *bat_priv,
1253 struct orig_node *orig_node,
1254 const unsigned char *tt_buff,
1255 uint8_t tt_num_changes)
1256 {
1257 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1258
1259 /* Replace the old buffer only if I received something in the
1260 * last OGM (the OGM could carry no changes)
1261 */
1262 spin_lock_bh(&orig_node->tt_buff_lock);
1263 if (tt_buff_len > 0) {
1264 kfree(orig_node->tt_buff);
1265 orig_node->tt_buff_len = 0;
1266 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1267 if (orig_node->tt_buff) {
1268 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1269 orig_node->tt_buff_len = tt_buff_len;
1270 }
1271 }
1272 spin_unlock_bh(&orig_node->tt_buff_lock);
1273 }
1274
1275 static void batadv_tt_req_purge(struct bat_priv *bat_priv)
1276 {
1277 struct tt_req_node *node, *safe;
1278
1279 spin_lock_bh(&bat_priv->tt_req_list_lock);
1280 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1281 if (batadv_has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
1282 list_del(&node->list);
1283 kfree(node);
1284 }
1285 }
1286 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1287 }
1288
1289 /* returns the pointer to the new tt_req_node struct if no request
1290 * has already been issued for this orig_node, NULL otherwise
1291 */
1292 static struct tt_req_node *batadv_new_tt_req_node(struct bat_priv *bat_priv,
1293 struct orig_node *orig_node)
1294 {
1295 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1296
1297 spin_lock_bh(&bat_priv->tt_req_list_lock);
1298 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
1299 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1300 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1301 TT_REQUEST_TIMEOUT))
1302 goto unlock;
1303 }
1304
1305 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1306 if (!tt_req_node)
1307 goto unlock;
1308
1309 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1310 tt_req_node->issued_at = jiffies;
1311
1312 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
1313 unlock:
1314 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1315 return tt_req_node;
1316 }
1317
1318 /* data_ptr is useless here, but has to be kept to respect the prototype */
1319 static int batadv_tt_local_valid_entry(const void *entry_ptr,
1320 const void *data_ptr)
1321 {
1322 const struct tt_common_entry *tt_common_entry = entry_ptr;
1323
1324 if (tt_common_entry->flags & TT_CLIENT_NEW)
1325 return 0;
1326 return 1;
1327 }
1328
1329 static int batadv_tt_global_valid(const void *entry_ptr,
1330 const void *data_ptr)
1331 {
1332 const struct tt_common_entry *tt_common_entry = entry_ptr;
1333 const struct tt_global_entry *tt_global_entry;
1334 const struct orig_node *orig_node = data_ptr;
1335
1336 if (tt_common_entry->flags & TT_CLIENT_ROAM)
1337 return 0;
1338
1339 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1340 common);
1341
1342 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1343 }
1344
1345 static struct sk_buff *
1346 batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1347 struct hashtable_t *hash,
1348 struct hard_iface *primary_if,
1349 int (*valid_cb)(const void *, const void *),
1350 void *cb_data)
1351 {
1352 struct tt_common_entry *tt_common_entry;
1353 struct tt_query_packet *tt_response;
1354 struct tt_change *tt_change;
1355 struct hlist_node *node;
1356 struct hlist_head *head;
1357 struct sk_buff *skb = NULL;
1358 uint16_t tt_tot, tt_count;
1359 ssize_t tt_query_size = sizeof(struct tt_query_packet);
1360 uint32_t i;
1361
1362 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1363 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1364 tt_len -= tt_len % sizeof(struct tt_change);
1365 }
1366 tt_tot = tt_len / sizeof(struct tt_change);
1367
1368 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1369 if (!skb)
1370 goto out;
1371
1372 skb_reserve(skb, ETH_HLEN);
1373 tt_response = (struct tt_query_packet *)skb_put(skb,
1374 tt_query_size + tt_len);
1375 tt_response->ttvn = ttvn;
1376
1377 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1378 tt_count = 0;
1379
1380 rcu_read_lock();
1381 for (i = 0; i < hash->size; i++) {
1382 head = &hash->table[i];
1383
1384 hlist_for_each_entry_rcu(tt_common_entry, node,
1385 head, hash_entry) {
1386 if (tt_count == tt_tot)
1387 break;
1388
1389 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1390 continue;
1391
1392 memcpy(tt_change->addr, tt_common_entry->addr,
1393 ETH_ALEN);
1394 tt_change->flags = NO_FLAGS;
1395
1396 tt_count++;
1397 tt_change++;
1398 }
1399 }
1400 rcu_read_unlock();
1401
1402 /* store in the message the number of entries we have successfully
1403 * copied
1404 */
1405 tt_response->tt_data = htons(tt_count);
1406
1407 out:
1408 return skb;
1409 }
1410
1411 static int batadv_send_tt_request(struct bat_priv *bat_priv,
1412 struct orig_node *dst_orig_node,
1413 uint8_t ttvn, uint16_t tt_crc,
1414 bool full_table)
1415 {
1416 struct sk_buff *skb = NULL;
1417 struct tt_query_packet *tt_request;
1418 struct neigh_node *neigh_node = NULL;
1419 struct hard_iface *primary_if;
1420 struct tt_req_node *tt_req_node = NULL;
1421 int ret = 1;
1422
1423 primary_if = batadv_primary_if_get_selected(bat_priv);
1424 if (!primary_if)
1425 goto out;
1426
1427 /* The new tt_req will be issued only if I'm not waiting for a
1428 * reply from the same orig_node yet
1429 */
1430 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1431 if (!tt_req_node)
1432 goto out;
1433
1434 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1435 if (!skb)
1436 goto out;
1437
1438 skb_reserve(skb, ETH_HLEN);
1439
1440 tt_request = (struct tt_query_packet *)skb_put(skb,
1441 sizeof(struct tt_query_packet));
1442
1443 tt_request->header.packet_type = BAT_TT_QUERY;
1444 tt_request->header.version = COMPAT_VERSION;
1445 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1446 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1447 tt_request->header.ttl = TTL;
1448 tt_request->ttvn = ttvn;
1449 tt_request->tt_data = htons(tt_crc);
1450 tt_request->flags = TT_REQUEST;
1451
1452 if (full_table)
1453 tt_request->flags |= TT_FULL_TABLE;
1454
1455 neigh_node = batadv_orig_node_get_router(dst_orig_node);
1456 if (!neigh_node)
1457 goto out;
1458
1459 batadv_dbg(DBG_TT, bat_priv,
1460 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1461 dst_orig_node->orig, neigh_node->addr,
1462 (full_table ? 'F' : '.'));
1463
1464 batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX);
1465
1466 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1467 ret = 0;
1468
1469 out:
1470 if (neigh_node)
1471 batadv_neigh_node_free_ref(neigh_node);
1472 if (primary_if)
1473 batadv_hardif_free_ref(primary_if);
1474 if (ret)
1475 kfree_skb(skb);
1476 if (ret && tt_req_node) {
1477 spin_lock_bh(&bat_priv->tt_req_list_lock);
1478 list_del(&tt_req_node->list);
1479 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1480 kfree(tt_req_node);
1481 }
1482 return ret;
1483 }
1484
1485 static bool batadv_send_other_tt_response(struct bat_priv *bat_priv,
1486 struct tt_query_packet *tt_request)
1487 {
1488 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1489 struct neigh_node *neigh_node = NULL;
1490 struct hard_iface *primary_if = NULL;
1491 uint8_t orig_ttvn, req_ttvn, ttvn;
1492 int ret = false;
1493 unsigned char *tt_buff;
1494 bool full_table;
1495 uint16_t tt_len, tt_tot;
1496 struct sk_buff *skb = NULL;
1497 struct tt_query_packet *tt_response;
1498
1499 batadv_dbg(DBG_TT, bat_priv,
1500 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1501 tt_request->src, tt_request->ttvn, tt_request->dst,
1502 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1503
1504 /* Let's get the orig node of the REAL destination */
1505 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1506 if (!req_dst_orig_node)
1507 goto out;
1508
1509 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1510 if (!res_dst_orig_node)
1511 goto out;
1512
1513 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1514 if (!neigh_node)
1515 goto out;
1516
1517 primary_if = batadv_primary_if_get_selected(bat_priv);
1518 if (!primary_if)
1519 goto out;
1520
1521 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1522 req_ttvn = tt_request->ttvn;
1523
1524 /* I don't have the requested data */
1525 if (orig_ttvn != req_ttvn ||
1526 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1527 goto out;
1528
1529 /* If the full table has been explicitly requested */
1530 if (tt_request->flags & TT_FULL_TABLE ||
1531 !req_dst_orig_node->tt_buff)
1532 full_table = true;
1533 else
1534 full_table = false;
1535
1536 /* In this version, fragmentation is not implemented, then
1537 * I'll send only one packet with as much TT entries as I can
1538 */
1539 if (!full_table) {
1540 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1541 tt_len = req_dst_orig_node->tt_buff_len;
1542 tt_tot = tt_len / sizeof(struct tt_change);
1543
1544 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1545 tt_len + ETH_HLEN);
1546 if (!skb)
1547 goto unlock;
1548
1549 skb_reserve(skb, ETH_HLEN);
1550 tt_response = (struct tt_query_packet *)skb_put(skb,
1551 sizeof(struct tt_query_packet) + tt_len);
1552 tt_response->ttvn = req_ttvn;
1553 tt_response->tt_data = htons(tt_tot);
1554
1555 tt_buff = skb->data + sizeof(struct tt_query_packet);
1556 /* Copy the last orig_node's OGM buffer */
1557 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1558 req_dst_orig_node->tt_buff_len);
1559
1560 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1561 } else {
1562 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1563 sizeof(struct tt_change);
1564 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1565
1566 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1567 bat_priv->tt_global_hash,
1568 primary_if,
1569 batadv_tt_global_valid,
1570 req_dst_orig_node);
1571 if (!skb)
1572 goto out;
1573
1574 tt_response = (struct tt_query_packet *)skb->data;
1575 }
1576
1577 tt_response->header.packet_type = BAT_TT_QUERY;
1578 tt_response->header.version = COMPAT_VERSION;
1579 tt_response->header.ttl = TTL;
1580 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1581 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1582 tt_response->flags = TT_RESPONSE;
1583
1584 if (full_table)
1585 tt_response->flags |= TT_FULL_TABLE;
1586
1587 batadv_dbg(DBG_TT, bat_priv,
1588 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1589 res_dst_orig_node->orig, neigh_node->addr,
1590 req_dst_orig_node->orig, req_ttvn);
1591
1592 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1593
1594 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1595 ret = true;
1596 goto out;
1597
1598 unlock:
1599 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1600
1601 out:
1602 if (res_dst_orig_node)
1603 batadv_orig_node_free_ref(res_dst_orig_node);
1604 if (req_dst_orig_node)
1605 batadv_orig_node_free_ref(req_dst_orig_node);
1606 if (neigh_node)
1607 batadv_neigh_node_free_ref(neigh_node);
1608 if (primary_if)
1609 batadv_hardif_free_ref(primary_if);
1610 if (!ret)
1611 kfree_skb(skb);
1612 return ret;
1613
1614 }
1615 static bool batadv_send_my_tt_response(struct bat_priv *bat_priv,
1616 struct tt_query_packet *tt_request)
1617 {
1618 struct orig_node *orig_node = NULL;
1619 struct neigh_node *neigh_node = NULL;
1620 struct hard_iface *primary_if = NULL;
1621 uint8_t my_ttvn, req_ttvn, ttvn;
1622 int ret = false;
1623 unsigned char *tt_buff;
1624 bool full_table;
1625 uint16_t tt_len, tt_tot;
1626 struct sk_buff *skb = NULL;
1627 struct tt_query_packet *tt_response;
1628
1629 batadv_dbg(DBG_TT, bat_priv,
1630 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1631 tt_request->src, tt_request->ttvn,
1632 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1633
1634
1635 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1636 req_ttvn = tt_request->ttvn;
1637
1638 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1639 if (!orig_node)
1640 goto out;
1641
1642 neigh_node = batadv_orig_node_get_router(orig_node);
1643 if (!neigh_node)
1644 goto out;
1645
1646 primary_if = batadv_primary_if_get_selected(bat_priv);
1647 if (!primary_if)
1648 goto out;
1649
1650 /* If the full table has been explicitly requested or the gap
1651 * is too big send the whole local translation table
1652 */
1653 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1654 !bat_priv->tt_buff)
1655 full_table = true;
1656 else
1657 full_table = false;
1658
1659 /* In this version, fragmentation is not implemented, then
1660 * I'll send only one packet with as much TT entries as I can
1661 */
1662 if (!full_table) {
1663 spin_lock_bh(&bat_priv->tt_buff_lock);
1664 tt_len = bat_priv->tt_buff_len;
1665 tt_tot = tt_len / sizeof(struct tt_change);
1666
1667 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1668 tt_len + ETH_HLEN);
1669 if (!skb)
1670 goto unlock;
1671
1672 skb_reserve(skb, ETH_HLEN);
1673 tt_response = (struct tt_query_packet *)skb_put(skb,
1674 sizeof(struct tt_query_packet) + tt_len);
1675 tt_response->ttvn = req_ttvn;
1676 tt_response->tt_data = htons(tt_tot);
1677
1678 tt_buff = skb->data + sizeof(struct tt_query_packet);
1679 memcpy(tt_buff, bat_priv->tt_buff,
1680 bat_priv->tt_buff_len);
1681 spin_unlock_bh(&bat_priv->tt_buff_lock);
1682 } else {
1683 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1684 sizeof(struct tt_change);
1685 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1686
1687 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1688 bat_priv->tt_local_hash,
1689 primary_if,
1690 batadv_tt_local_valid_entry,
1691 NULL);
1692 if (!skb)
1693 goto out;
1694
1695 tt_response = (struct tt_query_packet *)skb->data;
1696 }
1697
1698 tt_response->header.packet_type = BAT_TT_QUERY;
1699 tt_response->header.version = COMPAT_VERSION;
1700 tt_response->header.ttl = TTL;
1701 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1702 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1703 tt_response->flags = TT_RESPONSE;
1704
1705 if (full_table)
1706 tt_response->flags |= TT_FULL_TABLE;
1707
1708 batadv_dbg(DBG_TT, bat_priv,
1709 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1710 orig_node->orig, neigh_node->addr,
1711 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1712
1713 batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX);
1714
1715 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1716 ret = true;
1717 goto out;
1718
1719 unlock:
1720 spin_unlock_bh(&bat_priv->tt_buff_lock);
1721 out:
1722 if (orig_node)
1723 batadv_orig_node_free_ref(orig_node);
1724 if (neigh_node)
1725 batadv_neigh_node_free_ref(neigh_node);
1726 if (primary_if)
1727 batadv_hardif_free_ref(primary_if);
1728 if (!ret)
1729 kfree_skb(skb);
1730 /* This packet was for me, so it doesn't need to be re-routed */
1731 return true;
1732 }
1733
1734 bool batadv_send_tt_response(struct bat_priv *bat_priv,
1735 struct tt_query_packet *tt_request)
1736 {
1737 if (batadv_is_my_mac(tt_request->dst)) {
1738 /* don't answer backbone gws! */
1739 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1740 return true;
1741
1742 return batadv_send_my_tt_response(bat_priv, tt_request);
1743 } else {
1744 return batadv_send_other_tt_response(bat_priv, tt_request);
1745 }
1746 }
1747
1748 static void _batadv_tt_update_changes(struct bat_priv *bat_priv,
1749 struct orig_node *orig_node,
1750 struct tt_change *tt_change,
1751 uint16_t tt_num_changes, uint8_t ttvn)
1752 {
1753 int i;
1754 int roams;
1755
1756 for (i = 0; i < tt_num_changes; i++) {
1757 if ((tt_change + i)->flags & TT_CLIENT_DEL) {
1758 roams = (tt_change + i)->flags & TT_CLIENT_ROAM;
1759 batadv_tt_global_del(bat_priv, orig_node,
1760 (tt_change + i)->addr,
1761 "tt removed by changes",
1762 roams);
1763 } else {
1764 if (!batadv_tt_global_add(bat_priv, orig_node,
1765 (tt_change + i)->addr,
1766 (tt_change + i)->flags, ttvn))
1767 /* In case of problem while storing a
1768 * global_entry, we stop the updating
1769 * procedure without committing the
1770 * ttvn change. This will avoid to send
1771 * corrupted data on tt_request
1772 */
1773 return;
1774 }
1775 }
1776 orig_node->tt_initialised = true;
1777 }
1778
1779 static void batadv_tt_fill_gtable(struct bat_priv *bat_priv,
1780 struct tt_query_packet *tt_response)
1781 {
1782 struct orig_node *orig_node = NULL;
1783
1784 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1785 if (!orig_node)
1786 goto out;
1787
1788 /* Purge the old table first.. */
1789 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1790
1791 _batadv_tt_update_changes(bat_priv, orig_node,
1792 (struct tt_change *)(tt_response + 1),
1793 ntohs(tt_response->tt_data),
1794 tt_response->ttvn);
1795
1796 spin_lock_bh(&orig_node->tt_buff_lock);
1797 kfree(orig_node->tt_buff);
1798 orig_node->tt_buff_len = 0;
1799 orig_node->tt_buff = NULL;
1800 spin_unlock_bh(&orig_node->tt_buff_lock);
1801
1802 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1803
1804 out:
1805 if (orig_node)
1806 batadv_orig_node_free_ref(orig_node);
1807 }
1808
1809 static void batadv_tt_update_changes(struct bat_priv *bat_priv,
1810 struct orig_node *orig_node,
1811 uint16_t tt_num_changes, uint8_t ttvn,
1812 struct tt_change *tt_change)
1813 {
1814 _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1815 tt_num_changes, ttvn);
1816
1817 batadv_tt_save_orig_buffer(bat_priv, orig_node,
1818 (unsigned char *)tt_change, tt_num_changes);
1819 atomic_set(&orig_node->last_ttvn, ttvn);
1820 }
1821
1822 bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1823 {
1824 struct tt_local_entry *tt_local_entry = NULL;
1825 bool ret = false;
1826
1827 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1828 if (!tt_local_entry)
1829 goto out;
1830 /* Check if the client has been logically deleted (but is kept for
1831 * consistency purpose)
1832 */
1833 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1834 goto out;
1835 ret = true;
1836 out:
1837 if (tt_local_entry)
1838 batadv_tt_local_entry_free_ref(tt_local_entry);
1839 return ret;
1840 }
1841
1842 void batadv_handle_tt_response(struct bat_priv *bat_priv,
1843 struct tt_query_packet *tt_response)
1844 {
1845 struct tt_req_node *node, *safe;
1846 struct orig_node *orig_node = NULL;
1847
1848 batadv_dbg(DBG_TT, bat_priv,
1849 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1850 tt_response->src, tt_response->ttvn,
1851 ntohs(tt_response->tt_data),
1852 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1853
1854 /* we should have never asked a backbone gw */
1855 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1856 goto out;
1857
1858 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1859 if (!orig_node)
1860 goto out;
1861
1862 if (tt_response->flags & TT_FULL_TABLE)
1863 batadv_tt_fill_gtable(bat_priv, tt_response);
1864 else
1865 batadv_tt_update_changes(bat_priv, orig_node,
1866 ntohs(tt_response->tt_data),
1867 tt_response->ttvn,
1868 (struct tt_change *)(tt_response + 1));
1869
1870 /* Delete the tt_req_node from pending tt_requests list */
1871 spin_lock_bh(&bat_priv->tt_req_list_lock);
1872 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1873 if (!batadv_compare_eth(node->addr, tt_response->src))
1874 continue;
1875 list_del(&node->list);
1876 kfree(node);
1877 }
1878 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1879
1880 /* Recalculate the CRC for this orig_node and store it */
1881 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1882 /* Roaming phase is over: tables are in sync again. I can
1883 * unset the flag
1884 */
1885 orig_node->tt_poss_change = false;
1886 out:
1887 if (orig_node)
1888 batadv_orig_node_free_ref(orig_node);
1889 }
1890
1891 int batadv_tt_init(struct bat_priv *bat_priv)
1892 {
1893 int ret;
1894
1895 ret = batadv_tt_local_init(bat_priv);
1896 if (ret < 0)
1897 return ret;
1898
1899 ret = batadv_tt_global_init(bat_priv);
1900 if (ret < 0)
1901 return ret;
1902
1903 batadv_tt_start_timer(bat_priv);
1904
1905 return 1;
1906 }
1907
1908 static void batadv_tt_roam_list_free(struct bat_priv *bat_priv)
1909 {
1910 struct tt_roam_node *node, *safe;
1911
1912 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1913
1914 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1915 list_del(&node->list);
1916 kfree(node);
1917 }
1918
1919 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1920 }
1921
1922 static void batadv_tt_roam_purge(struct bat_priv *bat_priv)
1923 {
1924 struct tt_roam_node *node, *safe;
1925
1926 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1927 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1928 if (!batadv_has_timed_out(node->first_time, ROAMING_MAX_TIME))
1929 continue;
1930
1931 list_del(&node->list);
1932 kfree(node);
1933 }
1934 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1935 }
1936
1937 /* This function checks whether the client already reached the
1938 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1939 * will not be sent.
1940 *
1941 * returns true if the ROAMING_ADV can be sent, false otherwise
1942 */
1943 static bool batadv_tt_check_roam_count(struct bat_priv *bat_priv,
1944 uint8_t *client)
1945 {
1946 struct tt_roam_node *tt_roam_node;
1947 bool ret = false;
1948
1949 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1950 /* The new tt_req will be issued only if I'm not waiting for a
1951 * reply from the same orig_node yet
1952 */
1953 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1954 if (!batadv_compare_eth(tt_roam_node->addr, client))
1955 continue;
1956
1957 if (batadv_has_timed_out(tt_roam_node->first_time,
1958 ROAMING_MAX_TIME))
1959 continue;
1960
1961 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
1962 /* Sorry, you roamed too many times! */
1963 goto unlock;
1964 ret = true;
1965 break;
1966 }
1967
1968 if (!ret) {
1969 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1970 if (!tt_roam_node)
1971 goto unlock;
1972
1973 tt_roam_node->first_time = jiffies;
1974 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1975 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1976
1977 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1978 ret = true;
1979 }
1980
1981 unlock:
1982 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1983 return ret;
1984 }
1985
1986 static void batadv_send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1987 struct orig_node *orig_node)
1988 {
1989 struct neigh_node *neigh_node = NULL;
1990 struct sk_buff *skb = NULL;
1991 struct roam_adv_packet *roam_adv_packet;
1992 int ret = 1;
1993 struct hard_iface *primary_if;
1994
1995 /* before going on we have to check whether the client has
1996 * already roamed to us too many times
1997 */
1998 if (!batadv_tt_check_roam_count(bat_priv, client))
1999 goto out;
2000
2001 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
2002 if (!skb)
2003 goto out;
2004
2005 skb_reserve(skb, ETH_HLEN);
2006
2007 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
2008 sizeof(struct roam_adv_packet));
2009
2010 roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
2011 roam_adv_packet->header.version = COMPAT_VERSION;
2012 roam_adv_packet->header.ttl = TTL;
2013 primary_if = batadv_primary_if_get_selected(bat_priv);
2014 if (!primary_if)
2015 goto out;
2016 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
2017 batadv_hardif_free_ref(primary_if);
2018 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
2019 memcpy(roam_adv_packet->client, client, ETH_ALEN);
2020
2021 neigh_node = batadv_orig_node_get_router(orig_node);
2022 if (!neigh_node)
2023 goto out;
2024
2025 batadv_dbg(DBG_TT, bat_priv,
2026 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2027 orig_node->orig, client, neigh_node->addr);
2028
2029 batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX);
2030
2031 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
2032 ret = 0;
2033
2034 out:
2035 if (neigh_node)
2036 batadv_neigh_node_free_ref(neigh_node);
2037 if (ret)
2038 kfree_skb(skb);
2039 return;
2040 }
2041
2042 static void batadv_tt_purge(struct work_struct *work)
2043 {
2044 struct delayed_work *delayed_work =
2045 container_of(work, struct delayed_work, work);
2046 struct bat_priv *bat_priv =
2047 container_of(delayed_work, struct bat_priv, tt_work);
2048
2049 batadv_tt_local_purge(bat_priv);
2050 batadv_tt_global_roam_purge(bat_priv);
2051 batadv_tt_req_purge(bat_priv);
2052 batadv_tt_roam_purge(bat_priv);
2053
2054 batadv_tt_start_timer(bat_priv);
2055 }
2056
2057 void batadv_tt_free(struct bat_priv *bat_priv)
2058 {
2059 cancel_delayed_work_sync(&bat_priv->tt_work);
2060
2061 batadv_tt_local_table_free(bat_priv);
2062 batadv_tt_global_table_free(bat_priv);
2063 batadv_tt_req_list_free(bat_priv);
2064 batadv_tt_changes_list_free(bat_priv);
2065 batadv_tt_roam_list_free(bat_priv);
2066
2067 kfree(bat_priv->tt_buff);
2068 }
2069
2070 /* This function will enable or disable the specified flags for all the entries
2071 * in the given hash table and returns the number of modified entries
2072 */
2073 static uint16_t batadv_tt_set_flags(struct hashtable_t *hash, uint16_t flags,
2074 bool enable)
2075 {
2076 uint32_t i;
2077 uint16_t changed_num = 0;
2078 struct hlist_head *head;
2079 struct hlist_node *node;
2080 struct tt_common_entry *tt_common_entry;
2081
2082 if (!hash)
2083 goto out;
2084
2085 for (i = 0; i < hash->size; i++) {
2086 head = &hash->table[i];
2087
2088 rcu_read_lock();
2089 hlist_for_each_entry_rcu(tt_common_entry, node,
2090 head, hash_entry) {
2091 if (enable) {
2092 if ((tt_common_entry->flags & flags) == flags)
2093 continue;
2094 tt_common_entry->flags |= flags;
2095 } else {
2096 if (!(tt_common_entry->flags & flags))
2097 continue;
2098 tt_common_entry->flags &= ~flags;
2099 }
2100 changed_num++;
2101 }
2102 rcu_read_unlock();
2103 }
2104 out:
2105 return changed_num;
2106 }
2107
2108 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2109 static void batadv_tt_local_purge_pending_clients(struct bat_priv *bat_priv)
2110 {
2111 struct hashtable_t *hash = bat_priv->tt_local_hash;
2112 struct tt_common_entry *tt_common_entry;
2113 struct tt_local_entry *tt_local_entry;
2114 struct hlist_node *node, *node_tmp;
2115 struct hlist_head *head;
2116 spinlock_t *list_lock; /* protects write access to the hash lists */
2117 uint32_t i;
2118
2119 if (!hash)
2120 return;
2121
2122 for (i = 0; i < hash->size; i++) {
2123 head = &hash->table[i];
2124 list_lock = &hash->list_locks[i];
2125
2126 spin_lock_bh(list_lock);
2127 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
2128 head, hash_entry) {
2129 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
2130 continue;
2131
2132 batadv_dbg(DBG_TT, bat_priv,
2133 "Deleting local tt entry (%pM): pending\n",
2134 tt_common_entry->addr);
2135
2136 atomic_dec(&bat_priv->num_local_tt);
2137 hlist_del_rcu(node);
2138 tt_local_entry = container_of(tt_common_entry,
2139 struct tt_local_entry,
2140 common);
2141 batadv_tt_local_entry_free_ref(tt_local_entry);
2142 }
2143 spin_unlock_bh(list_lock);
2144 }
2145
2146 }
2147
2148 static int batadv_tt_commit_changes(struct bat_priv *bat_priv,
2149 unsigned char **packet_buff,
2150 int *packet_buff_len, int packet_min_len)
2151 {
2152 uint16_t changed_num = 0;
2153
2154 if (atomic_read(&bat_priv->tt_local_changes) < 1)
2155 return -ENOENT;
2156
2157 changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
2158 TT_CLIENT_NEW, false);
2159
2160 /* all reset entries have to be counted as local entries */
2161 atomic_add(changed_num, &bat_priv->num_local_tt);
2162 batadv_tt_local_purge_pending_clients(bat_priv);
2163 bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
2164
2165 /* Increment the TTVN only once per OGM interval */
2166 atomic_inc(&bat_priv->ttvn);
2167 batadv_dbg(DBG_TT, bat_priv,
2168 "Local changes committed, updating to ttvn %u\n",
2169 (uint8_t)atomic_read(&bat_priv->ttvn));
2170 bat_priv->tt_poss_change = false;
2171
2172 /* reset the sending counter */
2173 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
2174
2175 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2176 packet_buff_len, packet_min_len);
2177 }
2178
2179 /* when calling this function (hard_iface == primary_if) has to be true */
2180 int batadv_tt_append_diff(struct bat_priv *bat_priv,
2181 unsigned char **packet_buff, int *packet_buff_len,
2182 int packet_min_len)
2183 {
2184 int tt_num_changes;
2185
2186 /* if at least one change happened */
2187 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2188 packet_buff_len,
2189 packet_min_len);
2190
2191 /* if the changes have been sent often enough */
2192 if ((tt_num_changes < 0) &&
2193 (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
2194 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2195 packet_min_len, packet_min_len);
2196 tt_num_changes = 0;
2197 }
2198
2199 return tt_num_changes;
2200 }
2201
2202 bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src,
2203 uint8_t *dst)
2204 {
2205 struct tt_local_entry *tt_local_entry = NULL;
2206 struct tt_global_entry *tt_global_entry = NULL;
2207 bool ret = false;
2208
2209 if (!atomic_read(&bat_priv->ap_isolation))
2210 goto out;
2211
2212 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2213 if (!tt_local_entry)
2214 goto out;
2215
2216 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2217 if (!tt_global_entry)
2218 goto out;
2219
2220 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2221 goto out;
2222
2223 ret = true;
2224
2225 out:
2226 if (tt_global_entry)
2227 batadv_tt_global_entry_free_ref(tt_global_entry);
2228 if (tt_local_entry)
2229 batadv_tt_local_entry_free_ref(tt_local_entry);
2230 return ret;
2231 }
2232
2233 void batadv_tt_update_orig(struct bat_priv *bat_priv,
2234 struct orig_node *orig_node,
2235 const unsigned char *tt_buff, uint8_t tt_num_changes,
2236 uint8_t ttvn, uint16_t tt_crc)
2237 {
2238 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2239 bool full_table = true;
2240
2241 /* don't care about a backbone gateways updates. */
2242 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2243 return;
2244
2245 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2246 * increased by one -> we can apply the attached changes
2247 */
2248 if ((!orig_node->tt_initialised && ttvn == 1) ||
2249 ttvn - orig_ttvn == 1) {
2250 /* the OGM could not contain the changes due to their size or
2251 * because they have already been sent TT_OGM_APPEND_MAX times.
2252 * In this case send a tt request
2253 */
2254 if (!tt_num_changes) {
2255 full_table = false;
2256 goto request_table;
2257 }
2258
2259 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2260 ttvn, (struct tt_change *)tt_buff);
2261
2262 /* Even if we received the precomputed crc with the OGM, we
2263 * prefer to recompute it to spot any possible inconsistency
2264 * in the global table
2265 */
2266 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2267
2268 /* The ttvn alone is not enough to guarantee consistency
2269 * because a single value could represent different states
2270 * (due to the wrap around). Thus a node has to check whether
2271 * the resulting table (after applying the changes) is still
2272 * consistent or not. E.g. a node could disconnect while its
2273 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2274 * checking the CRC value is mandatory to detect the
2275 * inconsistency
2276 */
2277 if (orig_node->tt_crc != tt_crc)
2278 goto request_table;
2279
2280 /* Roaming phase is over: tables are in sync again. I can
2281 * unset the flag
2282 */
2283 orig_node->tt_poss_change = false;
2284 } else {
2285 /* if we missed more than one change or our tables are not
2286 * in sync anymore -> request fresh tt data
2287 */
2288 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2289 orig_node->tt_crc != tt_crc) {
2290 request_table:
2291 batadv_dbg(DBG_TT, bat_priv,
2292 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2293 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2294 orig_node->tt_crc, tt_num_changes);
2295 batadv_send_tt_request(bat_priv, orig_node, ttvn,
2296 tt_crc, full_table);
2297 return;
2298 }
2299 }
2300 }
2301
2302 /* returns true whether we know that the client has moved from its old
2303 * originator to another one. This entry is kept is still kept for consistency
2304 * purposes
2305 */
2306 bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv,
2307 uint8_t *addr)
2308 {
2309 struct tt_global_entry *tt_global_entry;
2310 bool ret = false;
2311
2312 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2313 if (!tt_global_entry)
2314 goto out;
2315
2316 ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
2317 batadv_tt_global_entry_free_ref(tt_global_entry);
2318 out:
2319 return ret;
2320 }
This page took 0.082074 seconds and 5 git commands to generate.