1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
26 #include "originator.h"
28 #include "bridge_loop_avoidance.h"
30 #include <linux/crc16.h>
32 static void batadv_send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
33 struct orig_node
*orig_node
);
34 static void batadv_tt_purge(struct work_struct
*work
);
36 batadv_tt_global_del_orig_list(struct tt_global_entry
*tt_global_entry
);
38 /* returns 1 if they are the same mac addr */
39 static int batadv_compare_tt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct tt_common_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 static void batadv_tt_start_timer(struct bat_priv
*bat_priv
)
49 INIT_DELAYED_WORK(&bat_priv
->tt_work
, batadv_tt_purge
);
50 queue_delayed_work(batadv_event_workqueue
, &bat_priv
->tt_work
,
51 msecs_to_jiffies(5000));
54 static struct tt_common_entry
*batadv_tt_hash_find(struct hashtable_t
*hash
,
57 struct hlist_head
*head
;
58 struct hlist_node
*node
;
59 struct tt_common_entry
*tt_common_entry
, *tt_common_entry_tmp
= NULL
;
65 index
= batadv_choose_orig(data
, hash
->size
);
66 head
= &hash
->table
[index
];
69 hlist_for_each_entry_rcu(tt_common_entry
, node
, head
, hash_entry
) {
70 if (!batadv_compare_eth(tt_common_entry
, data
))
73 if (!atomic_inc_not_zero(&tt_common_entry
->refcount
))
76 tt_common_entry_tmp
= tt_common_entry
;
81 return tt_common_entry_tmp
;
84 static struct tt_local_entry
*
85 batadv_tt_local_hash_find(struct bat_priv
*bat_priv
, const void *data
)
87 struct tt_common_entry
*tt_common_entry
;
88 struct tt_local_entry
*tt_local_entry
= NULL
;
90 tt_common_entry
= batadv_tt_hash_find(bat_priv
->tt_local_hash
, data
);
92 tt_local_entry
= container_of(tt_common_entry
,
93 struct tt_local_entry
, common
);
94 return tt_local_entry
;
97 static struct tt_global_entry
*
98 batadv_tt_global_hash_find(struct bat_priv
*bat_priv
, const void *data
)
100 struct tt_common_entry
*tt_common_entry
;
101 struct tt_global_entry
*tt_global_entry
= NULL
;
103 tt_common_entry
= batadv_tt_hash_find(bat_priv
->tt_global_hash
, data
);
105 tt_global_entry
= container_of(tt_common_entry
,
106 struct tt_global_entry
, common
);
107 return tt_global_entry
;
112 batadv_tt_local_entry_free_ref(struct tt_local_entry
*tt_local_entry
)
114 if (atomic_dec_and_test(&tt_local_entry
->common
.refcount
))
115 kfree_rcu(tt_local_entry
, common
.rcu
);
118 static void batadv_tt_global_entry_free_rcu(struct rcu_head
*rcu
)
120 struct tt_common_entry
*tt_common_entry
;
121 struct tt_global_entry
*tt_global_entry
;
123 tt_common_entry
= container_of(rcu
, struct tt_common_entry
, rcu
);
124 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
127 kfree(tt_global_entry
);
131 batadv_tt_global_entry_free_ref(struct tt_global_entry
*tt_global_entry
)
133 if (atomic_dec_and_test(&tt_global_entry
->common
.refcount
)) {
134 batadv_tt_global_del_orig_list(tt_global_entry
);
135 call_rcu(&tt_global_entry
->common
.rcu
,
136 batadv_tt_global_entry_free_rcu
);
140 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head
*rcu
)
142 struct tt_orig_list_entry
*orig_entry
;
144 orig_entry
= container_of(rcu
, struct tt_orig_list_entry
, rcu
);
145 batadv_orig_node_free_ref(orig_entry
->orig_node
);
150 batadv_tt_orig_list_entry_free_ref(struct tt_orig_list_entry
*orig_entry
)
152 /* to avoid race conditions, immediately decrease the tt counter */
153 atomic_dec(&orig_entry
->orig_node
->tt_size
);
154 call_rcu(&orig_entry
->rcu
, batadv_tt_orig_list_entry_free_rcu
);
157 static void batadv_tt_local_event(struct bat_priv
*bat_priv
,
158 const uint8_t *addr
, uint8_t flags
)
160 struct tt_change_node
*tt_change_node
, *entry
, *safe
;
161 bool event_removed
= false;
162 bool del_op_requested
, del_op_entry
;
164 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
169 tt_change_node
->change
.flags
= flags
;
170 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
172 del_op_requested
= flags
& TT_CLIENT_DEL
;
174 /* check for ADD+DEL or DEL+ADD events */
175 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
176 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
178 if (!batadv_compare_eth(entry
->change
.addr
, addr
))
181 /* DEL+ADD in the same orig interval have no effect and can be
182 * removed to avoid silly behaviour on the receiver side. The
183 * other way around (ADD+DEL) can happen in case of roaming of
184 * a client still in the NEW state. Roaming of NEW clients is
185 * now possible due to automatically recognition of "temporary"
188 del_op_entry
= entry
->change
.flags
& TT_CLIENT_DEL
;
189 if (!del_op_requested
&& del_op_entry
)
191 if (del_op_requested
&& !del_op_entry
)
195 list_del(&entry
->list
);
197 event_removed
= true;
201 /* track the change in the OGMinterval list */
202 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt_changes_list
);
205 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
208 atomic_dec(&bat_priv
->tt_local_changes
);
210 atomic_inc(&bat_priv
->tt_local_changes
);
213 int batadv_tt_len(int changes_num
)
215 return changes_num
* sizeof(struct tt_change
);
218 static int batadv_tt_local_init(struct bat_priv
*bat_priv
)
220 if (bat_priv
->tt_local_hash
)
223 bat_priv
->tt_local_hash
= batadv_hash_new(1024);
225 if (!bat_priv
->tt_local_hash
)
231 void batadv_tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
,
234 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
235 struct tt_local_entry
*tt_local_entry
= NULL
;
236 struct tt_global_entry
*tt_global_entry
= NULL
;
237 struct hlist_head
*head
;
238 struct hlist_node
*node
;
239 struct tt_orig_list_entry
*orig_entry
;
242 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
244 if (tt_local_entry
) {
245 tt_local_entry
->last_seen
= jiffies
;
246 /* possibly unset the TT_CLIENT_PENDING flag */
247 tt_local_entry
->common
.flags
&= ~TT_CLIENT_PENDING
;
251 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
255 batadv_dbg(DBG_TT
, bat_priv
,
256 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
257 (uint8_t)atomic_read(&bat_priv
->ttvn
));
259 memcpy(tt_local_entry
->common
.addr
, addr
, ETH_ALEN
);
260 tt_local_entry
->common
.flags
= NO_FLAGS
;
261 if (batadv_is_wifi_iface(ifindex
))
262 tt_local_entry
->common
.flags
|= TT_CLIENT_WIFI
;
263 atomic_set(&tt_local_entry
->common
.refcount
, 2);
264 tt_local_entry
->last_seen
= jiffies
;
266 /* the batman interface mac address should never be purged */
267 if (batadv_compare_eth(addr
, soft_iface
->dev_addr
))
268 tt_local_entry
->common
.flags
|= TT_CLIENT_NOPURGE
;
270 /* The local entry has to be marked as NEW to avoid to send it in
271 * a full table response going out before the next ttvn increment
272 * (consistency check)
274 tt_local_entry
->common
.flags
|= TT_CLIENT_NEW
;
276 hash_added
= batadv_hash_add(bat_priv
->tt_local_hash
, batadv_compare_tt
,
278 &tt_local_entry
->common
,
279 &tt_local_entry
->common
.hash_entry
);
281 if (unlikely(hash_added
!= 0)) {
282 /* remove the reference for the hash */
283 batadv_tt_local_entry_free_ref(tt_local_entry
);
287 batadv_tt_local_event(bat_priv
, addr
, tt_local_entry
->common
.flags
);
289 /* remove address from global hash if present */
290 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
292 /* Check whether it is a roaming! */
293 if (tt_global_entry
) {
294 /* These node are probably going to update their tt table */
295 head
= &tt_global_entry
->orig_list
;
297 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
298 orig_entry
->orig_node
->tt_poss_change
= true;
300 batadv_send_roam_adv(bat_priv
,
301 tt_global_entry
->common
.addr
,
302 orig_entry
->orig_node
);
305 /* The global entry has to be marked as ROAMING and
306 * has to be kept for consistency purpose
308 tt_global_entry
->common
.flags
|= TT_CLIENT_ROAM
;
309 tt_global_entry
->roam_at
= jiffies
;
313 batadv_tt_local_entry_free_ref(tt_local_entry
);
315 batadv_tt_global_entry_free_ref(tt_global_entry
);
318 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff
,
319 int *packet_buff_len
,
323 unsigned char *new_buff
;
325 new_buff
= kmalloc(new_packet_len
, GFP_ATOMIC
);
327 /* keep old buffer if kmalloc should fail */
329 memcpy(new_buff
, *packet_buff
, min_packet_len
);
331 *packet_buff
= new_buff
;
332 *packet_buff_len
= new_packet_len
;
336 static void batadv_tt_prepare_packet_buff(struct bat_priv
*bat_priv
,
337 unsigned char **packet_buff
,
338 int *packet_buff_len
,
341 struct hard_iface
*primary_if
;
344 primary_if
= batadv_primary_if_get_selected(bat_priv
);
346 req_len
= min_packet_len
;
347 req_len
+= batadv_tt_len(atomic_read(&bat_priv
->tt_local_changes
));
349 /* if we have too many changes for one packet don't send any
350 * and wait for the tt table request which will be fragmented
352 if ((!primary_if
) || (req_len
> primary_if
->soft_iface
->mtu
))
353 req_len
= min_packet_len
;
355 batadv_tt_realloc_packet_buff(packet_buff
, packet_buff_len
,
356 min_packet_len
, req_len
);
359 batadv_hardif_free_ref(primary_if
);
362 static int batadv_tt_changes_fill_buff(struct bat_priv
*bat_priv
,
363 unsigned char **packet_buff
,
364 int *packet_buff_len
,
367 struct tt_change_node
*entry
, *safe
;
368 int count
= 0, tot_changes
= 0, new_len
;
369 unsigned char *tt_buff
;
371 batadv_tt_prepare_packet_buff(bat_priv
, packet_buff
,
372 packet_buff_len
, min_packet_len
);
374 new_len
= *packet_buff_len
- min_packet_len
;
375 tt_buff
= *packet_buff
+ min_packet_len
;
378 tot_changes
= new_len
/ batadv_tt_len(1);
380 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
381 atomic_set(&bat_priv
->tt_local_changes
, 0);
383 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
385 if (count
< tot_changes
) {
386 memcpy(tt_buff
+ batadv_tt_len(count
),
387 &entry
->change
, sizeof(struct tt_change
));
390 list_del(&entry
->list
);
393 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
395 /* Keep the buffer for possible tt_request */
396 spin_lock_bh(&bat_priv
->tt_buff_lock
);
397 kfree(bat_priv
->tt_buff
);
398 bat_priv
->tt_buff_len
= 0;
399 bat_priv
->tt_buff
= NULL
;
400 /* check whether this new OGM has no changes due to size problems */
402 /* if kmalloc() fails we will reply with the full table
403 * instead of providing the diff
405 bat_priv
->tt_buff
= kmalloc(new_len
, GFP_ATOMIC
);
406 if (bat_priv
->tt_buff
) {
407 memcpy(bat_priv
->tt_buff
, tt_buff
, new_len
);
408 bat_priv
->tt_buff_len
= new_len
;
411 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
416 int batadv_tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
418 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
419 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
420 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
421 struct tt_common_entry
*tt_common_entry
;
422 struct hard_iface
*primary_if
;
423 struct hlist_node
*node
;
424 struct hlist_head
*head
;
428 primary_if
= batadv_primary_if_get_selected(bat_priv
);
430 ret
= seq_printf(seq
,
431 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
436 if (primary_if
->if_status
!= IF_ACTIVE
) {
437 ret
= seq_printf(seq
,
438 "BATMAN mesh %s disabled - primary interface not active\n",
444 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
445 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->ttvn
));
447 for (i
= 0; i
< hash
->size
; i
++) {
448 head
= &hash
->table
[i
];
451 hlist_for_each_entry_rcu(tt_common_entry
, node
,
453 seq_printf(seq
, " * %pM [%c%c%c%c%c]\n",
454 tt_common_entry
->addr
,
455 (tt_common_entry
->flags
&
456 TT_CLIENT_ROAM
? 'R' : '.'),
457 (tt_common_entry
->flags
&
458 TT_CLIENT_NOPURGE
? 'P' : '.'),
459 (tt_common_entry
->flags
&
460 TT_CLIENT_NEW
? 'N' : '.'),
461 (tt_common_entry
->flags
&
462 TT_CLIENT_PENDING
? 'X' : '.'),
463 (tt_common_entry
->flags
&
464 TT_CLIENT_WIFI
? 'W' : '.'));
470 batadv_hardif_free_ref(primary_if
);
474 static void batadv_tt_local_set_pending(struct bat_priv
*bat_priv
,
475 struct tt_local_entry
*tt_local_entry
,
476 uint16_t flags
, const char *message
)
478 batadv_tt_local_event(bat_priv
, tt_local_entry
->common
.addr
,
479 tt_local_entry
->common
.flags
| flags
);
481 /* The local client has to be marked as "pending to be removed" but has
482 * to be kept in the table in order to send it in a full table
483 * response issued before the net ttvn increment (consistency check)
485 tt_local_entry
->common
.flags
|= TT_CLIENT_PENDING
;
487 batadv_dbg(DBG_TT
, bat_priv
,
488 "Local tt entry (%pM) pending to be removed: %s\n",
489 tt_local_entry
->common
.addr
, message
);
492 void batadv_tt_local_remove(struct bat_priv
*bat_priv
, const uint8_t *addr
,
493 const char *message
, bool roaming
)
495 struct tt_local_entry
*tt_local_entry
= NULL
;
497 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
501 batadv_tt_local_set_pending(bat_priv
, tt_local_entry
, TT_CLIENT_DEL
|
502 (roaming
? TT_CLIENT_ROAM
: NO_FLAGS
),
506 batadv_tt_local_entry_free_ref(tt_local_entry
);
509 static void batadv_tt_local_purge(struct bat_priv
*bat_priv
)
511 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
512 struct tt_local_entry
*tt_local_entry
;
513 struct tt_common_entry
*tt_common_entry
;
514 struct hlist_node
*node
, *node_tmp
;
515 struct hlist_head
*head
;
516 spinlock_t
*list_lock
; /* protects write access to the hash lists */
519 for (i
= 0; i
< hash
->size
; i
++) {
520 head
= &hash
->table
[i
];
521 list_lock
= &hash
->list_locks
[i
];
523 spin_lock_bh(list_lock
);
524 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
526 tt_local_entry
= container_of(tt_common_entry
,
527 struct tt_local_entry
,
529 if (tt_local_entry
->common
.flags
& TT_CLIENT_NOPURGE
)
532 /* entry already marked for deletion */
533 if (tt_local_entry
->common
.flags
& TT_CLIENT_PENDING
)
536 if (!batadv_has_timed_out(tt_local_entry
->last_seen
,
540 batadv_tt_local_set_pending(bat_priv
, tt_local_entry
,
541 TT_CLIENT_DEL
, "timed out");
543 spin_unlock_bh(list_lock
);
548 static void batadv_tt_local_table_free(struct bat_priv
*bat_priv
)
550 struct hashtable_t
*hash
;
551 spinlock_t
*list_lock
; /* protects write access to the hash lists */
552 struct tt_common_entry
*tt_common_entry
;
553 struct tt_local_entry
*tt_local_entry
;
554 struct hlist_node
*node
, *node_tmp
;
555 struct hlist_head
*head
;
558 if (!bat_priv
->tt_local_hash
)
561 hash
= bat_priv
->tt_local_hash
;
563 for (i
= 0; i
< hash
->size
; i
++) {
564 head
= &hash
->table
[i
];
565 list_lock
= &hash
->list_locks
[i
];
567 spin_lock_bh(list_lock
);
568 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
571 tt_local_entry
= container_of(tt_common_entry
,
572 struct tt_local_entry
,
574 batadv_tt_local_entry_free_ref(tt_local_entry
);
576 spin_unlock_bh(list_lock
);
579 batadv_hash_destroy(hash
);
581 bat_priv
->tt_local_hash
= NULL
;
584 static int batadv_tt_global_init(struct bat_priv
*bat_priv
)
586 if (bat_priv
->tt_global_hash
)
589 bat_priv
->tt_global_hash
= batadv_hash_new(1024);
591 if (!bat_priv
->tt_global_hash
)
597 static void batadv_tt_changes_list_free(struct bat_priv
*bat_priv
)
599 struct tt_change_node
*entry
, *safe
;
601 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
603 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
605 list_del(&entry
->list
);
609 atomic_set(&bat_priv
->tt_local_changes
, 0);
610 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
613 /* find out if an orig_node is already in the list of a tt_global_entry.
614 * returns 1 if found, 0 otherwise
616 static bool batadv_tt_global_entry_has_orig(const struct tt_global_entry
*entry
,
617 const struct orig_node
*orig_node
)
619 struct tt_orig_list_entry
*tmp_orig_entry
;
620 const struct hlist_head
*head
;
621 struct hlist_node
*node
;
625 head
= &entry
->orig_list
;
626 hlist_for_each_entry_rcu(tmp_orig_entry
, node
, head
, list
) {
627 if (tmp_orig_entry
->orig_node
== orig_node
) {
637 batadv_tt_global_add_orig_entry(struct tt_global_entry
*tt_global_entry
,
638 struct orig_node
*orig_node
, int ttvn
)
640 struct tt_orig_list_entry
*orig_entry
;
642 orig_entry
= kzalloc(sizeof(*orig_entry
), GFP_ATOMIC
);
646 INIT_HLIST_NODE(&orig_entry
->list
);
647 atomic_inc(&orig_node
->refcount
);
648 atomic_inc(&orig_node
->tt_size
);
649 orig_entry
->orig_node
= orig_node
;
650 orig_entry
->ttvn
= ttvn
;
652 spin_lock_bh(&tt_global_entry
->list_lock
);
653 hlist_add_head_rcu(&orig_entry
->list
,
654 &tt_global_entry
->orig_list
);
655 spin_unlock_bh(&tt_global_entry
->list_lock
);
658 /* caller must hold orig_node refcount */
659 int batadv_tt_global_add(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
660 const unsigned char *tt_addr
, uint8_t flags
,
663 struct tt_global_entry
*tt_global_entry
= NULL
;
666 struct tt_common_entry
*common
;
668 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, tt_addr
);
670 if (!tt_global_entry
) {
671 tt_global_entry
= kzalloc(sizeof(*tt_global_entry
), GFP_ATOMIC
);
672 if (!tt_global_entry
)
675 common
= &tt_global_entry
->common
;
676 memcpy(common
->addr
, tt_addr
, ETH_ALEN
);
678 common
->flags
= flags
;
679 tt_global_entry
->roam_at
= 0;
680 atomic_set(&common
->refcount
, 2);
682 INIT_HLIST_HEAD(&tt_global_entry
->orig_list
);
683 spin_lock_init(&tt_global_entry
->list_lock
);
685 hash_added
= batadv_hash_add(bat_priv
->tt_global_hash
,
687 batadv_choose_orig
, common
,
688 &common
->hash_entry
);
690 if (unlikely(hash_added
!= 0)) {
691 /* remove the reference for the hash */
692 batadv_tt_global_entry_free_ref(tt_global_entry
);
696 batadv_tt_global_add_orig_entry(tt_global_entry
, orig_node
,
699 /* there is already a global entry, use this one. */
701 /* If there is the TT_CLIENT_ROAM flag set, there is only one
702 * originator left in the list and we previously received a
703 * delete + roaming change for this originator.
705 * We should first delete the old originator before adding the
708 if (tt_global_entry
->common
.flags
& TT_CLIENT_ROAM
) {
709 batadv_tt_global_del_orig_list(tt_global_entry
);
710 tt_global_entry
->common
.flags
&= ~TT_CLIENT_ROAM
;
711 tt_global_entry
->roam_at
= 0;
714 if (!batadv_tt_global_entry_has_orig(tt_global_entry
,
716 batadv_tt_global_add_orig_entry(tt_global_entry
,
720 batadv_dbg(DBG_TT
, bat_priv
,
721 "Creating new global tt entry: %pM (via %pM)\n",
722 tt_global_entry
->common
.addr
, orig_node
->orig
);
725 /* remove address from local hash if present */
726 batadv_tt_local_remove(bat_priv
, tt_global_entry
->common
.addr
,
727 "global tt received", flags
& TT_CLIENT_ROAM
);
731 batadv_tt_global_entry_free_ref(tt_global_entry
);
735 /* print all orig nodes who announce the address for this global entry.
736 * it is assumed that the caller holds rcu_read_lock();
739 batadv_tt_global_print_entry(struct tt_global_entry
*tt_global_entry
,
740 struct seq_file
*seq
)
742 struct hlist_head
*head
;
743 struct hlist_node
*node
;
744 struct tt_orig_list_entry
*orig_entry
;
745 struct tt_common_entry
*tt_common_entry
;
749 tt_common_entry
= &tt_global_entry
->common
;
751 head
= &tt_global_entry
->orig_list
;
753 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
754 flags
= tt_common_entry
->flags
;
755 last_ttvn
= atomic_read(&orig_entry
->orig_node
->last_ttvn
);
756 seq_printf(seq
, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
757 tt_global_entry
->common
.addr
, orig_entry
->ttvn
,
758 orig_entry
->orig_node
->orig
, last_ttvn
,
759 (flags
& TT_CLIENT_ROAM
? 'R' : '.'),
760 (flags
& TT_CLIENT_WIFI
? 'W' : '.'));
764 int batadv_tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
766 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
767 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
768 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
769 struct tt_common_entry
*tt_common_entry
;
770 struct tt_global_entry
*tt_global_entry
;
771 struct hard_iface
*primary_if
;
772 struct hlist_node
*node
;
773 struct hlist_head
*head
;
777 primary_if
= batadv_primary_if_get_selected(bat_priv
);
779 ret
= seq_printf(seq
,
780 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
785 if (primary_if
->if_status
!= IF_ACTIVE
) {
786 ret
= seq_printf(seq
,
787 "BATMAN mesh %s disabled - primary interface not active\n",
793 "Globally announced TT entries received via the mesh %s\n",
795 seq_printf(seq
, " %-13s %s %-15s %s %s\n",
796 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
798 for (i
= 0; i
< hash
->size
; i
++) {
799 head
= &hash
->table
[i
];
802 hlist_for_each_entry_rcu(tt_common_entry
, node
,
804 tt_global_entry
= container_of(tt_common_entry
,
805 struct tt_global_entry
,
807 batadv_tt_global_print_entry(tt_global_entry
, seq
);
813 batadv_hardif_free_ref(primary_if
);
817 /* deletes the orig list of a tt_global_entry */
819 batadv_tt_global_del_orig_list(struct tt_global_entry
*tt_global_entry
)
821 struct hlist_head
*head
;
822 struct hlist_node
*node
, *safe
;
823 struct tt_orig_list_entry
*orig_entry
;
825 spin_lock_bh(&tt_global_entry
->list_lock
);
826 head
= &tt_global_entry
->orig_list
;
827 hlist_for_each_entry_safe(orig_entry
, node
, safe
, head
, list
) {
829 batadv_tt_orig_list_entry_free_ref(orig_entry
);
831 spin_unlock_bh(&tt_global_entry
->list_lock
);
836 batadv_tt_global_del_orig_entry(struct bat_priv
*bat_priv
,
837 struct tt_global_entry
*tt_global_entry
,
838 struct orig_node
*orig_node
,
841 struct hlist_head
*head
;
842 struct hlist_node
*node
, *safe
;
843 struct tt_orig_list_entry
*orig_entry
;
845 spin_lock_bh(&tt_global_entry
->list_lock
);
846 head
= &tt_global_entry
->orig_list
;
847 hlist_for_each_entry_safe(orig_entry
, node
, safe
, head
, list
) {
848 if (orig_entry
->orig_node
== orig_node
) {
849 batadv_dbg(DBG_TT
, bat_priv
,
850 "Deleting %pM from global tt entry %pM: %s\n",
852 tt_global_entry
->common
.addr
, message
);
854 batadv_tt_orig_list_entry_free_ref(orig_entry
);
857 spin_unlock_bh(&tt_global_entry
->list_lock
);
860 static void batadv_tt_global_del_struct(struct bat_priv
*bat_priv
,
861 struct tt_global_entry
*tt_global_entry
,
864 batadv_dbg(DBG_TT
, bat_priv
, "Deleting global tt entry %pM: %s\n",
865 tt_global_entry
->common
.addr
, message
);
867 batadv_hash_remove(bat_priv
->tt_global_hash
, batadv_compare_tt
,
868 batadv_choose_orig
, tt_global_entry
->common
.addr
);
869 batadv_tt_global_entry_free_ref(tt_global_entry
);
873 /* If the client is to be deleted, we check if it is the last origantor entry
874 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
875 * otherwise we simply remove the originator scheduled for deletion.
878 batadv_tt_global_del_roaming(struct bat_priv
*bat_priv
,
879 struct tt_global_entry
*tt_global_entry
,
880 struct orig_node
*orig_node
, const char *message
)
882 bool last_entry
= true;
883 struct hlist_head
*head
;
884 struct hlist_node
*node
;
885 struct tt_orig_list_entry
*orig_entry
;
887 /* no local entry exists, case 1:
888 * Check if this is the last one or if other entries exist.
892 head
= &tt_global_entry
->orig_list
;
893 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
894 if (orig_entry
->orig_node
!= orig_node
) {
902 /* its the last one, mark for roaming. */
903 tt_global_entry
->common
.flags
|= TT_CLIENT_ROAM
;
904 tt_global_entry
->roam_at
= jiffies
;
906 /* there is another entry, we can simply delete this
907 * one and can still use the other one.
909 batadv_tt_global_del_orig_entry(bat_priv
, tt_global_entry
,
915 static void batadv_tt_global_del(struct bat_priv
*bat_priv
,
916 struct orig_node
*orig_node
,
917 const unsigned char *addr
,
918 const char *message
, bool roaming
)
920 struct tt_global_entry
*tt_global_entry
= NULL
;
921 struct tt_local_entry
*local_entry
= NULL
;
923 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
924 if (!tt_global_entry
)
928 batadv_tt_global_del_orig_entry(bat_priv
, tt_global_entry
,
931 if (hlist_empty(&tt_global_entry
->orig_list
))
932 batadv_tt_global_del_struct(bat_priv
, tt_global_entry
,
938 /* if we are deleting a global entry due to a roam
939 * event, there are two possibilities:
940 * 1) the client roamed from node A to node B => if there
941 * is only one originator left for this client, we mark
942 * it with TT_CLIENT_ROAM, we start a timer and we
943 * wait for node B to claim it. In case of timeout
944 * the entry is purged.
946 * If there are other originators left, we directly delete
948 * 2) the client roamed to us => we can directly delete
949 * the global entry, since it is useless now.
951 local_entry
= batadv_tt_local_hash_find(bat_priv
,
952 tt_global_entry
->common
.addr
);
954 /* local entry exists, case 2: client roamed to us. */
955 batadv_tt_global_del_orig_list(tt_global_entry
);
956 batadv_tt_global_del_struct(bat_priv
, tt_global_entry
, message
);
958 /* no local entry exists, case 1: check for roaming */
959 batadv_tt_global_del_roaming(bat_priv
, tt_global_entry
,
965 batadv_tt_global_entry_free_ref(tt_global_entry
);
967 batadv_tt_local_entry_free_ref(local_entry
);
970 void batadv_tt_global_del_orig(struct bat_priv
*bat_priv
,
971 struct orig_node
*orig_node
, const char *message
)
973 struct tt_global_entry
*global_entry
;
974 struct tt_common_entry
*tt_common_entry
;
976 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
977 struct hlist_node
*node
, *safe
;
978 struct hlist_head
*head
;
979 spinlock_t
*list_lock
; /* protects write access to the hash lists */
984 for (i
= 0; i
< hash
->size
; i
++) {
985 head
= &hash
->table
[i
];
986 list_lock
= &hash
->list_locks
[i
];
988 spin_lock_bh(list_lock
);
989 hlist_for_each_entry_safe(tt_common_entry
, node
, safe
,
991 global_entry
= container_of(tt_common_entry
,
992 struct tt_global_entry
,
995 batadv_tt_global_del_orig_entry(bat_priv
, global_entry
,
998 if (hlist_empty(&global_entry
->orig_list
)) {
999 batadv_dbg(DBG_TT
, bat_priv
,
1000 "Deleting global tt entry %pM: %s\n",
1001 global_entry
->common
.addr
, message
);
1002 hlist_del_rcu(node
);
1003 batadv_tt_global_entry_free_ref(global_entry
);
1006 spin_unlock_bh(list_lock
);
1008 orig_node
->tt_initialised
= false;
1011 static void batadv_tt_global_roam_purge(struct bat_priv
*bat_priv
)
1013 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
1014 struct tt_common_entry
*tt_common_entry
;
1015 struct tt_global_entry
*tt_global_entry
;
1016 struct hlist_node
*node
, *node_tmp
;
1017 struct hlist_head
*head
;
1018 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1021 for (i
= 0; i
< hash
->size
; i
++) {
1022 head
= &hash
->table
[i
];
1023 list_lock
= &hash
->list_locks
[i
];
1025 spin_lock_bh(list_lock
);
1026 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
1028 tt_global_entry
= container_of(tt_common_entry
,
1029 struct tt_global_entry
,
1031 if (!(tt_global_entry
->common
.flags
& TT_CLIENT_ROAM
))
1033 if (!batadv_has_timed_out(tt_global_entry
->roam_at
,
1034 TT_CLIENT_ROAM_TIMEOUT
))
1037 batadv_dbg(DBG_TT
, bat_priv
,
1038 "Deleting global tt entry (%pM): Roaming timeout\n",
1039 tt_global_entry
->common
.addr
);
1041 hlist_del_rcu(node
);
1042 batadv_tt_global_entry_free_ref(tt_global_entry
);
1044 spin_unlock_bh(list_lock
);
1049 static void batadv_tt_global_table_free(struct bat_priv
*bat_priv
)
1051 struct hashtable_t
*hash
;
1052 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1053 struct tt_common_entry
*tt_common_entry
;
1054 struct tt_global_entry
*tt_global_entry
;
1055 struct hlist_node
*node
, *node_tmp
;
1056 struct hlist_head
*head
;
1059 if (!bat_priv
->tt_global_hash
)
1062 hash
= bat_priv
->tt_global_hash
;
1064 for (i
= 0; i
< hash
->size
; i
++) {
1065 head
= &hash
->table
[i
];
1066 list_lock
= &hash
->list_locks
[i
];
1068 spin_lock_bh(list_lock
);
1069 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
1071 hlist_del_rcu(node
);
1072 tt_global_entry
= container_of(tt_common_entry
,
1073 struct tt_global_entry
,
1075 batadv_tt_global_entry_free_ref(tt_global_entry
);
1077 spin_unlock_bh(list_lock
);
1080 batadv_hash_destroy(hash
);
1082 bat_priv
->tt_global_hash
= NULL
;
1085 static bool _batadv_is_ap_isolated(struct tt_local_entry
*tt_local_entry
,
1086 struct tt_global_entry
*tt_global_entry
)
1090 if (tt_local_entry
->common
.flags
& TT_CLIENT_WIFI
&&
1091 tt_global_entry
->common
.flags
& TT_CLIENT_WIFI
)
1097 struct orig_node
*batadv_transtable_search(struct bat_priv
*bat_priv
,
1099 const uint8_t *addr
)
1101 struct tt_local_entry
*tt_local_entry
= NULL
;
1102 struct tt_global_entry
*tt_global_entry
= NULL
;
1103 struct orig_node
*orig_node
= NULL
;
1104 struct neigh_node
*router
= NULL
;
1105 struct hlist_head
*head
;
1106 struct hlist_node
*node
;
1107 struct tt_orig_list_entry
*orig_entry
;
1110 if (src
&& atomic_read(&bat_priv
->ap_isolation
)) {
1111 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, src
);
1112 if (!tt_local_entry
)
1116 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
1117 if (!tt_global_entry
)
1120 /* check whether the clients should not communicate due to AP
1123 if (tt_local_entry
&&
1124 _batadv_is_ap_isolated(tt_local_entry
, tt_global_entry
))
1130 head
= &tt_global_entry
->orig_list
;
1131 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
1132 router
= batadv_orig_node_get_router(orig_entry
->orig_node
);
1136 if (router
->tq_avg
> best_tq
) {
1137 orig_node
= orig_entry
->orig_node
;
1138 best_tq
= router
->tq_avg
;
1140 batadv_neigh_node_free_ref(router
);
1142 /* found anything? */
1143 if (orig_node
&& !atomic_inc_not_zero(&orig_node
->refcount
))
1147 if (tt_global_entry
)
1148 batadv_tt_global_entry_free_ref(tt_global_entry
);
1150 batadv_tt_local_entry_free_ref(tt_local_entry
);
1155 /* Calculates the checksum of the local table of a given orig_node */
1156 static uint16_t batadv_tt_global_crc(struct bat_priv
*bat_priv
,
1157 struct orig_node
*orig_node
)
1159 uint16_t total
= 0, total_one
;
1160 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
1161 struct tt_common_entry
*tt_common_entry
;
1162 struct tt_global_entry
*tt_global_entry
;
1163 struct hlist_node
*node
;
1164 struct hlist_head
*head
;
1168 for (i
= 0; i
< hash
->size
; i
++) {
1169 head
= &hash
->table
[i
];
1172 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1174 tt_global_entry
= container_of(tt_common_entry
,
1175 struct tt_global_entry
,
1177 /* Roaming clients are in the global table for
1178 * consistency only. They don't have to be
1179 * taken into account while computing the
1182 if (tt_global_entry
->common
.flags
& TT_CLIENT_ROAM
)
1185 /* find out if this global entry is announced by this
1188 if (!batadv_tt_global_entry_has_orig(tt_global_entry
,
1193 for (j
= 0; j
< ETH_ALEN
; j
++)
1194 total_one
= crc16_byte(total_one
,
1195 tt_global_entry
->common
.addr
[j
]);
1204 /* Calculates the checksum of the local table */
1205 static uint16_t batadv_tt_local_crc(struct bat_priv
*bat_priv
)
1207 uint16_t total
= 0, total_one
;
1208 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1209 struct tt_common_entry
*tt_common_entry
;
1210 struct hlist_node
*node
;
1211 struct hlist_head
*head
;
1215 for (i
= 0; i
< hash
->size
; i
++) {
1216 head
= &hash
->table
[i
];
1219 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1221 /* not yet committed clients have not to be taken into
1222 * account while computing the CRC
1224 if (tt_common_entry
->flags
& TT_CLIENT_NEW
)
1227 for (j
= 0; j
< ETH_ALEN
; j
++)
1228 total_one
= crc16_byte(total_one
,
1229 tt_common_entry
->addr
[j
]);
1238 static void batadv_tt_req_list_free(struct bat_priv
*bat_priv
)
1240 struct tt_req_node
*node
, *safe
;
1242 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1244 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1245 list_del(&node
->list
);
1249 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1252 static void batadv_tt_save_orig_buffer(struct bat_priv
*bat_priv
,
1253 struct orig_node
*orig_node
,
1254 const unsigned char *tt_buff
,
1255 uint8_t tt_num_changes
)
1257 uint16_t tt_buff_len
= batadv_tt_len(tt_num_changes
);
1259 /* Replace the old buffer only if I received something in the
1260 * last OGM (the OGM could carry no changes)
1262 spin_lock_bh(&orig_node
->tt_buff_lock
);
1263 if (tt_buff_len
> 0) {
1264 kfree(orig_node
->tt_buff
);
1265 orig_node
->tt_buff_len
= 0;
1266 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
1267 if (orig_node
->tt_buff
) {
1268 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
1269 orig_node
->tt_buff_len
= tt_buff_len
;
1272 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1275 static void batadv_tt_req_purge(struct bat_priv
*bat_priv
)
1277 struct tt_req_node
*node
, *safe
;
1279 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1280 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1281 if (batadv_has_timed_out(node
->issued_at
, TT_REQUEST_TIMEOUT
)) {
1282 list_del(&node
->list
);
1286 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1289 /* returns the pointer to the new tt_req_node struct if no request
1290 * has already been issued for this orig_node, NULL otherwise
1292 static struct tt_req_node
*batadv_new_tt_req_node(struct bat_priv
*bat_priv
,
1293 struct orig_node
*orig_node
)
1295 struct tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
1297 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1298 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt_req_list
, list
) {
1299 if (batadv_compare_eth(tt_req_node_tmp
, orig_node
) &&
1300 !batadv_has_timed_out(tt_req_node_tmp
->issued_at
,
1301 TT_REQUEST_TIMEOUT
))
1305 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
1309 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
1310 tt_req_node
->issued_at
= jiffies
;
1312 list_add(&tt_req_node
->list
, &bat_priv
->tt_req_list
);
1314 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1318 /* data_ptr is useless here, but has to be kept to respect the prototype */
1319 static int batadv_tt_local_valid_entry(const void *entry_ptr
,
1320 const void *data_ptr
)
1322 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1324 if (tt_common_entry
->flags
& TT_CLIENT_NEW
)
1329 static int batadv_tt_global_valid(const void *entry_ptr
,
1330 const void *data_ptr
)
1332 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1333 const struct tt_global_entry
*tt_global_entry
;
1334 const struct orig_node
*orig_node
= data_ptr
;
1336 if (tt_common_entry
->flags
& TT_CLIENT_ROAM
)
1339 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
1342 return batadv_tt_global_entry_has_orig(tt_global_entry
, orig_node
);
1345 static struct sk_buff
*
1346 batadv_tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
1347 struct hashtable_t
*hash
,
1348 struct hard_iface
*primary_if
,
1349 int (*valid_cb
)(const void *, const void *),
1352 struct tt_common_entry
*tt_common_entry
;
1353 struct tt_query_packet
*tt_response
;
1354 struct tt_change
*tt_change
;
1355 struct hlist_node
*node
;
1356 struct hlist_head
*head
;
1357 struct sk_buff
*skb
= NULL
;
1358 uint16_t tt_tot
, tt_count
;
1359 ssize_t tt_query_size
= sizeof(struct tt_query_packet
);
1362 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1363 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1364 tt_len
-= tt_len
% sizeof(struct tt_change
);
1366 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1368 skb
= dev_alloc_skb(tt_query_size
+ tt_len
+ ETH_HLEN
);
1372 skb_reserve(skb
, ETH_HLEN
);
1373 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1374 tt_query_size
+ tt_len
);
1375 tt_response
->ttvn
= ttvn
;
1377 tt_change
= (struct tt_change
*)(skb
->data
+ tt_query_size
);
1381 for (i
= 0; i
< hash
->size
; i
++) {
1382 head
= &hash
->table
[i
];
1384 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1386 if (tt_count
== tt_tot
)
1389 if ((valid_cb
) && (!valid_cb(tt_common_entry
, cb_data
)))
1392 memcpy(tt_change
->addr
, tt_common_entry
->addr
,
1394 tt_change
->flags
= NO_FLAGS
;
1402 /* store in the message the number of entries we have successfully
1405 tt_response
->tt_data
= htons(tt_count
);
1411 static int batadv_send_tt_request(struct bat_priv
*bat_priv
,
1412 struct orig_node
*dst_orig_node
,
1413 uint8_t ttvn
, uint16_t tt_crc
,
1416 struct sk_buff
*skb
= NULL
;
1417 struct tt_query_packet
*tt_request
;
1418 struct neigh_node
*neigh_node
= NULL
;
1419 struct hard_iface
*primary_if
;
1420 struct tt_req_node
*tt_req_node
= NULL
;
1423 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1427 /* The new tt_req will be issued only if I'm not waiting for a
1428 * reply from the same orig_node yet
1430 tt_req_node
= batadv_new_tt_req_node(bat_priv
, dst_orig_node
);
1434 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) + ETH_HLEN
);
1438 skb_reserve(skb
, ETH_HLEN
);
1440 tt_request
= (struct tt_query_packet
*)skb_put(skb
,
1441 sizeof(struct tt_query_packet
));
1443 tt_request
->header
.packet_type
= BAT_TT_QUERY
;
1444 tt_request
->header
.version
= COMPAT_VERSION
;
1445 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1446 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1447 tt_request
->header
.ttl
= TTL
;
1448 tt_request
->ttvn
= ttvn
;
1449 tt_request
->tt_data
= htons(tt_crc
);
1450 tt_request
->flags
= TT_REQUEST
;
1453 tt_request
->flags
|= TT_FULL_TABLE
;
1455 neigh_node
= batadv_orig_node_get_router(dst_orig_node
);
1459 batadv_dbg(DBG_TT
, bat_priv
,
1460 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1461 dst_orig_node
->orig
, neigh_node
->addr
,
1462 (full_table
? 'F' : '.'));
1464 batadv_inc_counter(bat_priv
, BAT_CNT_TT_REQUEST_TX
);
1466 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1471 batadv_neigh_node_free_ref(neigh_node
);
1473 batadv_hardif_free_ref(primary_if
);
1476 if (ret
&& tt_req_node
) {
1477 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1478 list_del(&tt_req_node
->list
);
1479 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1485 static bool batadv_send_other_tt_response(struct bat_priv
*bat_priv
,
1486 struct tt_query_packet
*tt_request
)
1488 struct orig_node
*req_dst_orig_node
= NULL
, *res_dst_orig_node
= NULL
;
1489 struct neigh_node
*neigh_node
= NULL
;
1490 struct hard_iface
*primary_if
= NULL
;
1491 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1493 unsigned char *tt_buff
;
1495 uint16_t tt_len
, tt_tot
;
1496 struct sk_buff
*skb
= NULL
;
1497 struct tt_query_packet
*tt_response
;
1499 batadv_dbg(DBG_TT
, bat_priv
,
1500 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1501 tt_request
->src
, tt_request
->ttvn
, tt_request
->dst
,
1502 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1504 /* Let's get the orig node of the REAL destination */
1505 req_dst_orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->dst
);
1506 if (!req_dst_orig_node
)
1509 res_dst_orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->src
);
1510 if (!res_dst_orig_node
)
1513 neigh_node
= batadv_orig_node_get_router(res_dst_orig_node
);
1517 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1521 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1522 req_ttvn
= tt_request
->ttvn
;
1524 /* I don't have the requested data */
1525 if (orig_ttvn
!= req_ttvn
||
1526 tt_request
->tt_data
!= htons(req_dst_orig_node
->tt_crc
))
1529 /* If the full table has been explicitly requested */
1530 if (tt_request
->flags
& TT_FULL_TABLE
||
1531 !req_dst_orig_node
->tt_buff
)
1536 /* In this version, fragmentation is not implemented, then
1537 * I'll send only one packet with as much TT entries as I can
1540 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1541 tt_len
= req_dst_orig_node
->tt_buff_len
;
1542 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1544 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1549 skb_reserve(skb
, ETH_HLEN
);
1550 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1551 sizeof(struct tt_query_packet
) + tt_len
);
1552 tt_response
->ttvn
= req_ttvn
;
1553 tt_response
->tt_data
= htons(tt_tot
);
1555 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1556 /* Copy the last orig_node's OGM buffer */
1557 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1558 req_dst_orig_node
->tt_buff_len
);
1560 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1562 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
) *
1563 sizeof(struct tt_change
);
1564 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1566 skb
= batadv_tt_response_fill_table(tt_len
, ttvn
,
1567 bat_priv
->tt_global_hash
,
1569 batadv_tt_global_valid
,
1574 tt_response
= (struct tt_query_packet
*)skb
->data
;
1577 tt_response
->header
.packet_type
= BAT_TT_QUERY
;
1578 tt_response
->header
.version
= COMPAT_VERSION
;
1579 tt_response
->header
.ttl
= TTL
;
1580 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1581 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1582 tt_response
->flags
= TT_RESPONSE
;
1585 tt_response
->flags
|= TT_FULL_TABLE
;
1587 batadv_dbg(DBG_TT
, bat_priv
,
1588 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1589 res_dst_orig_node
->orig
, neigh_node
->addr
,
1590 req_dst_orig_node
->orig
, req_ttvn
);
1592 batadv_inc_counter(bat_priv
, BAT_CNT_TT_RESPONSE_TX
);
1594 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1599 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1602 if (res_dst_orig_node
)
1603 batadv_orig_node_free_ref(res_dst_orig_node
);
1604 if (req_dst_orig_node
)
1605 batadv_orig_node_free_ref(req_dst_orig_node
);
1607 batadv_neigh_node_free_ref(neigh_node
);
1609 batadv_hardif_free_ref(primary_if
);
1615 static bool batadv_send_my_tt_response(struct bat_priv
*bat_priv
,
1616 struct tt_query_packet
*tt_request
)
1618 struct orig_node
*orig_node
= NULL
;
1619 struct neigh_node
*neigh_node
= NULL
;
1620 struct hard_iface
*primary_if
= NULL
;
1621 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1623 unsigned char *tt_buff
;
1625 uint16_t tt_len
, tt_tot
;
1626 struct sk_buff
*skb
= NULL
;
1627 struct tt_query_packet
*tt_response
;
1629 batadv_dbg(DBG_TT
, bat_priv
,
1630 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1631 tt_request
->src
, tt_request
->ttvn
,
1632 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1635 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1636 req_ttvn
= tt_request
->ttvn
;
1638 orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->src
);
1642 neigh_node
= batadv_orig_node_get_router(orig_node
);
1646 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1650 /* If the full table has been explicitly requested or the gap
1651 * is too big send the whole local translation table
1653 if (tt_request
->flags
& TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1659 /* In this version, fragmentation is not implemented, then
1660 * I'll send only one packet with as much TT entries as I can
1663 spin_lock_bh(&bat_priv
->tt_buff_lock
);
1664 tt_len
= bat_priv
->tt_buff_len
;
1665 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1667 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1672 skb_reserve(skb
, ETH_HLEN
);
1673 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1674 sizeof(struct tt_query_packet
) + tt_len
);
1675 tt_response
->ttvn
= req_ttvn
;
1676 tt_response
->tt_data
= htons(tt_tot
);
1678 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1679 memcpy(tt_buff
, bat_priv
->tt_buff
,
1680 bat_priv
->tt_buff_len
);
1681 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1683 tt_len
= (uint16_t)atomic_read(&bat_priv
->num_local_tt
) *
1684 sizeof(struct tt_change
);
1685 ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1687 skb
= batadv_tt_response_fill_table(tt_len
, ttvn
,
1688 bat_priv
->tt_local_hash
,
1690 batadv_tt_local_valid_entry
,
1695 tt_response
= (struct tt_query_packet
*)skb
->data
;
1698 tt_response
->header
.packet_type
= BAT_TT_QUERY
;
1699 tt_response
->header
.version
= COMPAT_VERSION
;
1700 tt_response
->header
.ttl
= TTL
;
1701 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1702 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1703 tt_response
->flags
= TT_RESPONSE
;
1706 tt_response
->flags
|= TT_FULL_TABLE
;
1708 batadv_dbg(DBG_TT
, bat_priv
,
1709 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1710 orig_node
->orig
, neigh_node
->addr
,
1711 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1713 batadv_inc_counter(bat_priv
, BAT_CNT_TT_RESPONSE_TX
);
1715 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1720 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1723 batadv_orig_node_free_ref(orig_node
);
1725 batadv_neigh_node_free_ref(neigh_node
);
1727 batadv_hardif_free_ref(primary_if
);
1730 /* This packet was for me, so it doesn't need to be re-routed */
1734 bool batadv_send_tt_response(struct bat_priv
*bat_priv
,
1735 struct tt_query_packet
*tt_request
)
1737 if (batadv_is_my_mac(tt_request
->dst
)) {
1738 /* don't answer backbone gws! */
1739 if (batadv_bla_is_backbone_gw_orig(bat_priv
, tt_request
->src
))
1742 return batadv_send_my_tt_response(bat_priv
, tt_request
);
1744 return batadv_send_other_tt_response(bat_priv
, tt_request
);
1748 static void _batadv_tt_update_changes(struct bat_priv
*bat_priv
,
1749 struct orig_node
*orig_node
,
1750 struct tt_change
*tt_change
,
1751 uint16_t tt_num_changes
, uint8_t ttvn
)
1756 for (i
= 0; i
< tt_num_changes
; i
++) {
1757 if ((tt_change
+ i
)->flags
& TT_CLIENT_DEL
) {
1758 roams
= (tt_change
+ i
)->flags
& TT_CLIENT_ROAM
;
1759 batadv_tt_global_del(bat_priv
, orig_node
,
1760 (tt_change
+ i
)->addr
,
1761 "tt removed by changes",
1764 if (!batadv_tt_global_add(bat_priv
, orig_node
,
1765 (tt_change
+ i
)->addr
,
1766 (tt_change
+ i
)->flags
, ttvn
))
1767 /* In case of problem while storing a
1768 * global_entry, we stop the updating
1769 * procedure without committing the
1770 * ttvn change. This will avoid to send
1771 * corrupted data on tt_request
1776 orig_node
->tt_initialised
= true;
1779 static void batadv_tt_fill_gtable(struct bat_priv
*bat_priv
,
1780 struct tt_query_packet
*tt_response
)
1782 struct orig_node
*orig_node
= NULL
;
1784 orig_node
= batadv_orig_hash_find(bat_priv
, tt_response
->src
);
1788 /* Purge the old table first.. */
1789 batadv_tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1791 _batadv_tt_update_changes(bat_priv
, orig_node
,
1792 (struct tt_change
*)(tt_response
+ 1),
1793 ntohs(tt_response
->tt_data
),
1796 spin_lock_bh(&orig_node
->tt_buff_lock
);
1797 kfree(orig_node
->tt_buff
);
1798 orig_node
->tt_buff_len
= 0;
1799 orig_node
->tt_buff
= NULL
;
1800 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1802 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1806 batadv_orig_node_free_ref(orig_node
);
1809 static void batadv_tt_update_changes(struct bat_priv
*bat_priv
,
1810 struct orig_node
*orig_node
,
1811 uint16_t tt_num_changes
, uint8_t ttvn
,
1812 struct tt_change
*tt_change
)
1814 _batadv_tt_update_changes(bat_priv
, orig_node
, tt_change
,
1815 tt_num_changes
, ttvn
);
1817 batadv_tt_save_orig_buffer(bat_priv
, orig_node
,
1818 (unsigned char *)tt_change
, tt_num_changes
);
1819 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1822 bool batadv_is_my_client(struct bat_priv
*bat_priv
, const uint8_t *addr
)
1824 struct tt_local_entry
*tt_local_entry
= NULL
;
1827 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
1828 if (!tt_local_entry
)
1830 /* Check if the client has been logically deleted (but is kept for
1831 * consistency purpose)
1833 if (tt_local_entry
->common
.flags
& TT_CLIENT_PENDING
)
1838 batadv_tt_local_entry_free_ref(tt_local_entry
);
1842 void batadv_handle_tt_response(struct bat_priv
*bat_priv
,
1843 struct tt_query_packet
*tt_response
)
1845 struct tt_req_node
*node
, *safe
;
1846 struct orig_node
*orig_node
= NULL
;
1848 batadv_dbg(DBG_TT
, bat_priv
,
1849 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1850 tt_response
->src
, tt_response
->ttvn
,
1851 ntohs(tt_response
->tt_data
),
1852 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1854 /* we should have never asked a backbone gw */
1855 if (batadv_bla_is_backbone_gw_orig(bat_priv
, tt_response
->src
))
1858 orig_node
= batadv_orig_hash_find(bat_priv
, tt_response
->src
);
1862 if (tt_response
->flags
& TT_FULL_TABLE
)
1863 batadv_tt_fill_gtable(bat_priv
, tt_response
);
1865 batadv_tt_update_changes(bat_priv
, orig_node
,
1866 ntohs(tt_response
->tt_data
),
1868 (struct tt_change
*)(tt_response
+ 1));
1870 /* Delete the tt_req_node from pending tt_requests list */
1871 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1872 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1873 if (!batadv_compare_eth(node
->addr
, tt_response
->src
))
1875 list_del(&node
->list
);
1878 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1880 /* Recalculate the CRC for this orig_node and store it */
1881 orig_node
->tt_crc
= batadv_tt_global_crc(bat_priv
, orig_node
);
1882 /* Roaming phase is over: tables are in sync again. I can
1885 orig_node
->tt_poss_change
= false;
1888 batadv_orig_node_free_ref(orig_node
);
1891 int batadv_tt_init(struct bat_priv
*bat_priv
)
1895 ret
= batadv_tt_local_init(bat_priv
);
1899 ret
= batadv_tt_global_init(bat_priv
);
1903 batadv_tt_start_timer(bat_priv
);
1908 static void batadv_tt_roam_list_free(struct bat_priv
*bat_priv
)
1910 struct tt_roam_node
*node
, *safe
;
1912 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1914 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1915 list_del(&node
->list
);
1919 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1922 static void batadv_tt_roam_purge(struct bat_priv
*bat_priv
)
1924 struct tt_roam_node
*node
, *safe
;
1926 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1927 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1928 if (!batadv_has_timed_out(node
->first_time
, ROAMING_MAX_TIME
))
1931 list_del(&node
->list
);
1934 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1937 /* This function checks whether the client already reached the
1938 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1941 * returns true if the ROAMING_ADV can be sent, false otherwise
1943 static bool batadv_tt_check_roam_count(struct bat_priv
*bat_priv
,
1946 struct tt_roam_node
*tt_roam_node
;
1949 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1950 /* The new tt_req will be issued only if I'm not waiting for a
1951 * reply from the same orig_node yet
1953 list_for_each_entry(tt_roam_node
, &bat_priv
->tt_roam_list
, list
) {
1954 if (!batadv_compare_eth(tt_roam_node
->addr
, client
))
1957 if (batadv_has_timed_out(tt_roam_node
->first_time
,
1961 if (!batadv_atomic_dec_not_zero(&tt_roam_node
->counter
))
1962 /* Sorry, you roamed too many times! */
1969 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
1973 tt_roam_node
->first_time
= jiffies
;
1974 atomic_set(&tt_roam_node
->counter
, ROAMING_MAX_COUNT
- 1);
1975 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
1977 list_add(&tt_roam_node
->list
, &bat_priv
->tt_roam_list
);
1982 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1986 static void batadv_send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
1987 struct orig_node
*orig_node
)
1989 struct neigh_node
*neigh_node
= NULL
;
1990 struct sk_buff
*skb
= NULL
;
1991 struct roam_adv_packet
*roam_adv_packet
;
1993 struct hard_iface
*primary_if
;
1995 /* before going on we have to check whether the client has
1996 * already roamed to us too many times
1998 if (!batadv_tt_check_roam_count(bat_priv
, client
))
2001 skb
= dev_alloc_skb(sizeof(struct roam_adv_packet
) + ETH_HLEN
);
2005 skb_reserve(skb
, ETH_HLEN
);
2007 roam_adv_packet
= (struct roam_adv_packet
*)skb_put(skb
,
2008 sizeof(struct roam_adv_packet
));
2010 roam_adv_packet
->header
.packet_type
= BAT_ROAM_ADV
;
2011 roam_adv_packet
->header
.version
= COMPAT_VERSION
;
2012 roam_adv_packet
->header
.ttl
= TTL
;
2013 primary_if
= batadv_primary_if_get_selected(bat_priv
);
2016 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
2017 batadv_hardif_free_ref(primary_if
);
2018 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
2019 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
2021 neigh_node
= batadv_orig_node_get_router(orig_node
);
2025 batadv_dbg(DBG_TT
, bat_priv
,
2026 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2027 orig_node
->orig
, client
, neigh_node
->addr
);
2029 batadv_inc_counter(bat_priv
, BAT_CNT_TT_ROAM_ADV_TX
);
2031 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
2036 batadv_neigh_node_free_ref(neigh_node
);
2042 static void batadv_tt_purge(struct work_struct
*work
)
2044 struct delayed_work
*delayed_work
=
2045 container_of(work
, struct delayed_work
, work
);
2046 struct bat_priv
*bat_priv
=
2047 container_of(delayed_work
, struct bat_priv
, tt_work
);
2049 batadv_tt_local_purge(bat_priv
);
2050 batadv_tt_global_roam_purge(bat_priv
);
2051 batadv_tt_req_purge(bat_priv
);
2052 batadv_tt_roam_purge(bat_priv
);
2054 batadv_tt_start_timer(bat_priv
);
2057 void batadv_tt_free(struct bat_priv
*bat_priv
)
2059 cancel_delayed_work_sync(&bat_priv
->tt_work
);
2061 batadv_tt_local_table_free(bat_priv
);
2062 batadv_tt_global_table_free(bat_priv
);
2063 batadv_tt_req_list_free(bat_priv
);
2064 batadv_tt_changes_list_free(bat_priv
);
2065 batadv_tt_roam_list_free(bat_priv
);
2067 kfree(bat_priv
->tt_buff
);
2070 /* This function will enable or disable the specified flags for all the entries
2071 * in the given hash table and returns the number of modified entries
2073 static uint16_t batadv_tt_set_flags(struct hashtable_t
*hash
, uint16_t flags
,
2077 uint16_t changed_num
= 0;
2078 struct hlist_head
*head
;
2079 struct hlist_node
*node
;
2080 struct tt_common_entry
*tt_common_entry
;
2085 for (i
= 0; i
< hash
->size
; i
++) {
2086 head
= &hash
->table
[i
];
2089 hlist_for_each_entry_rcu(tt_common_entry
, node
,
2092 if ((tt_common_entry
->flags
& flags
) == flags
)
2094 tt_common_entry
->flags
|= flags
;
2096 if (!(tt_common_entry
->flags
& flags
))
2098 tt_common_entry
->flags
&= ~flags
;
2108 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
2109 static void batadv_tt_local_purge_pending_clients(struct bat_priv
*bat_priv
)
2111 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
2112 struct tt_common_entry
*tt_common_entry
;
2113 struct tt_local_entry
*tt_local_entry
;
2114 struct hlist_node
*node
, *node_tmp
;
2115 struct hlist_head
*head
;
2116 spinlock_t
*list_lock
; /* protects write access to the hash lists */
2122 for (i
= 0; i
< hash
->size
; i
++) {
2123 head
= &hash
->table
[i
];
2124 list_lock
= &hash
->list_locks
[i
];
2126 spin_lock_bh(list_lock
);
2127 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
2129 if (!(tt_common_entry
->flags
& TT_CLIENT_PENDING
))
2132 batadv_dbg(DBG_TT
, bat_priv
,
2133 "Deleting local tt entry (%pM): pending\n",
2134 tt_common_entry
->addr
);
2136 atomic_dec(&bat_priv
->num_local_tt
);
2137 hlist_del_rcu(node
);
2138 tt_local_entry
= container_of(tt_common_entry
,
2139 struct tt_local_entry
,
2141 batadv_tt_local_entry_free_ref(tt_local_entry
);
2143 spin_unlock_bh(list_lock
);
2148 static int batadv_tt_commit_changes(struct bat_priv
*bat_priv
,
2149 unsigned char **packet_buff
,
2150 int *packet_buff_len
, int packet_min_len
)
2152 uint16_t changed_num
= 0;
2154 if (atomic_read(&bat_priv
->tt_local_changes
) < 1)
2157 changed_num
= batadv_tt_set_flags(bat_priv
->tt_local_hash
,
2158 TT_CLIENT_NEW
, false);
2160 /* all reset entries have to be counted as local entries */
2161 atomic_add(changed_num
, &bat_priv
->num_local_tt
);
2162 batadv_tt_local_purge_pending_clients(bat_priv
);
2163 bat_priv
->tt_crc
= batadv_tt_local_crc(bat_priv
);
2165 /* Increment the TTVN only once per OGM interval */
2166 atomic_inc(&bat_priv
->ttvn
);
2167 batadv_dbg(DBG_TT
, bat_priv
,
2168 "Local changes committed, updating to ttvn %u\n",
2169 (uint8_t)atomic_read(&bat_priv
->ttvn
));
2170 bat_priv
->tt_poss_change
= false;
2172 /* reset the sending counter */
2173 atomic_set(&bat_priv
->tt_ogm_append_cnt
, TT_OGM_APPEND_MAX
);
2175 return batadv_tt_changes_fill_buff(bat_priv
, packet_buff
,
2176 packet_buff_len
, packet_min_len
);
2179 /* when calling this function (hard_iface == primary_if) has to be true */
2180 int batadv_tt_append_diff(struct bat_priv
*bat_priv
,
2181 unsigned char **packet_buff
, int *packet_buff_len
,
2186 /* if at least one change happened */
2187 tt_num_changes
= batadv_tt_commit_changes(bat_priv
, packet_buff
,
2191 /* if the changes have been sent often enough */
2192 if ((tt_num_changes
< 0) &&
2193 (!batadv_atomic_dec_not_zero(&bat_priv
->tt_ogm_append_cnt
))) {
2194 batadv_tt_realloc_packet_buff(packet_buff
, packet_buff_len
,
2195 packet_min_len
, packet_min_len
);
2199 return tt_num_changes
;
2202 bool batadv_is_ap_isolated(struct bat_priv
*bat_priv
, uint8_t *src
,
2205 struct tt_local_entry
*tt_local_entry
= NULL
;
2206 struct tt_global_entry
*tt_global_entry
= NULL
;
2209 if (!atomic_read(&bat_priv
->ap_isolation
))
2212 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, dst
);
2213 if (!tt_local_entry
)
2216 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, src
);
2217 if (!tt_global_entry
)
2220 if (!_batadv_is_ap_isolated(tt_local_entry
, tt_global_entry
))
2226 if (tt_global_entry
)
2227 batadv_tt_global_entry_free_ref(tt_global_entry
);
2229 batadv_tt_local_entry_free_ref(tt_local_entry
);
2233 void batadv_tt_update_orig(struct bat_priv
*bat_priv
,
2234 struct orig_node
*orig_node
,
2235 const unsigned char *tt_buff
, uint8_t tt_num_changes
,
2236 uint8_t ttvn
, uint16_t tt_crc
)
2238 uint8_t orig_ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
2239 bool full_table
= true;
2241 /* don't care about a backbone gateways updates. */
2242 if (batadv_bla_is_backbone_gw_orig(bat_priv
, orig_node
->orig
))
2245 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2246 * increased by one -> we can apply the attached changes
2248 if ((!orig_node
->tt_initialised
&& ttvn
== 1) ||
2249 ttvn
- orig_ttvn
== 1) {
2250 /* the OGM could not contain the changes due to their size or
2251 * because they have already been sent TT_OGM_APPEND_MAX times.
2252 * In this case send a tt request
2254 if (!tt_num_changes
) {
2259 batadv_tt_update_changes(bat_priv
, orig_node
, tt_num_changes
,
2260 ttvn
, (struct tt_change
*)tt_buff
);
2262 /* Even if we received the precomputed crc with the OGM, we
2263 * prefer to recompute it to spot any possible inconsistency
2264 * in the global table
2266 orig_node
->tt_crc
= batadv_tt_global_crc(bat_priv
, orig_node
);
2268 /* The ttvn alone is not enough to guarantee consistency
2269 * because a single value could represent different states
2270 * (due to the wrap around). Thus a node has to check whether
2271 * the resulting table (after applying the changes) is still
2272 * consistent or not. E.g. a node could disconnect while its
2273 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2274 * checking the CRC value is mandatory to detect the
2277 if (orig_node
->tt_crc
!= tt_crc
)
2280 /* Roaming phase is over: tables are in sync again. I can
2283 orig_node
->tt_poss_change
= false;
2285 /* if we missed more than one change or our tables are not
2286 * in sync anymore -> request fresh tt data
2288 if (!orig_node
->tt_initialised
|| ttvn
!= orig_ttvn
||
2289 orig_node
->tt_crc
!= tt_crc
) {
2291 batadv_dbg(DBG_TT
, bat_priv
,
2292 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2293 orig_node
->orig
, ttvn
, orig_ttvn
, tt_crc
,
2294 orig_node
->tt_crc
, tt_num_changes
);
2295 batadv_send_tt_request(bat_priv
, orig_node
, ttvn
,
2296 tt_crc
, full_table
);
2302 /* returns true whether we know that the client has moved from its old
2303 * originator to another one. This entry is kept is still kept for consistency
2306 bool batadv_tt_global_client_is_roaming(struct bat_priv
*bat_priv
,
2309 struct tt_global_entry
*tt_global_entry
;
2312 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
2313 if (!tt_global_entry
)
2316 ret
= tt_global_entry
->common
.flags
& TT_CLIENT_ROAM
;
2317 batadv_tt_global_entry_free_ref(tt_global_entry
);