batman-adv: Calculate sizeof using variable insead of types
[deliverable/linux.git] / net / batman-adv / translation-table.c
1 /*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "hash.h"
27 #include "originator.h"
28
29 static void tt_local_purge(struct work_struct *work);
30 static void _tt_global_del_orig(struct bat_priv *bat_priv,
31 struct tt_global_entry *tt_global_entry,
32 const char *message);
33
34 /* returns 1 if they are the same mac addr */
35 static int compare_ltt(const struct hlist_node *node, const void *data2)
36 {
37 const void *data1 = container_of(node, struct tt_local_entry,
38 hash_entry);
39
40 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
41 }
42
43 /* returns 1 if they are the same mac addr */
44 static int compare_gtt(const struct hlist_node *node, const void *data2)
45 {
46 const void *data1 = container_of(node, struct tt_global_entry,
47 hash_entry);
48
49 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
50 }
51
52 static void tt_local_start_timer(struct bat_priv *bat_priv)
53 {
54 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
55 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
56 }
57
58 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
59 const void *data)
60 {
61 struct hashtable_t *hash = bat_priv->tt_local_hash;
62 struct hlist_head *head;
63 struct hlist_node *node;
64 struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
65 int index;
66
67 if (!hash)
68 return NULL;
69
70 index = choose_orig(data, hash->size);
71 head = &hash->table[index];
72
73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
75 if (!compare_eth(tt_local_entry, data))
76 continue;
77
78 tt_local_entry_tmp = tt_local_entry;
79 break;
80 }
81 rcu_read_unlock();
82
83 return tt_local_entry_tmp;
84 }
85
86 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
87 const void *data)
88 {
89 struct hashtable_t *hash = bat_priv->tt_global_hash;
90 struct hlist_head *head;
91 struct hlist_node *node;
92 struct tt_global_entry *tt_global_entry;
93 struct tt_global_entry *tt_global_entry_tmp = NULL;
94 int index;
95
96 if (!hash)
97 return NULL;
98
99 index = choose_orig(data, hash->size);
100 head = &hash->table[index];
101
102 rcu_read_lock();
103 hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
104 if (!compare_eth(tt_global_entry, data))
105 continue;
106
107 tt_global_entry_tmp = tt_global_entry;
108 break;
109 }
110 rcu_read_unlock();
111
112 return tt_global_entry_tmp;
113 }
114
115 int tt_local_init(struct bat_priv *bat_priv)
116 {
117 if (bat_priv->tt_local_hash)
118 return 1;
119
120 bat_priv->tt_local_hash = hash_new(1024);
121
122 if (!bat_priv->tt_local_hash)
123 return 0;
124
125 atomic_set(&bat_priv->tt_local_changed, 0);
126 tt_local_start_timer(bat_priv);
127
128 return 1;
129 }
130
131 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
132 {
133 struct bat_priv *bat_priv = netdev_priv(soft_iface);
134 struct tt_local_entry *tt_local_entry;
135 struct tt_global_entry *tt_global_entry;
136 int required_bytes;
137
138 spin_lock_bh(&bat_priv->tt_lhash_lock);
139 tt_local_entry = tt_local_hash_find(bat_priv, addr);
140 spin_unlock_bh(&bat_priv->tt_lhash_lock);
141
142 if (tt_local_entry) {
143 tt_local_entry->last_seen = jiffies;
144 return;
145 }
146
147 /* only announce as many hosts as possible in the batman-packet and
148 space in batman_packet->num_tt That also should give a limit to
149 MAC-flooding. */
150 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
151 required_bytes += BAT_PACKET_LEN;
152
153 if ((required_bytes > ETH_DATA_LEN) ||
154 (atomic_read(&bat_priv->aggregated_ogms) &&
155 required_bytes > MAX_AGGREGATION_BYTES) ||
156 (bat_priv->num_local_tt + 1 > 255)) {
157 bat_dbg(DBG_ROUTES, bat_priv,
158 "Can't add new local tt entry (%pM): "
159 "number of local tt entries exceeds packet size\n",
160 addr);
161 return;
162 }
163
164 bat_dbg(DBG_ROUTES, bat_priv,
165 "Creating new local tt entry: %pM\n", addr);
166
167 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
168 if (!tt_local_entry)
169 return;
170
171 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
172 tt_local_entry->last_seen = jiffies;
173
174 /* the batman interface mac address should never be purged */
175 if (compare_eth(addr, soft_iface->dev_addr))
176 tt_local_entry->never_purge = 1;
177 else
178 tt_local_entry->never_purge = 0;
179
180 spin_lock_bh(&bat_priv->tt_lhash_lock);
181
182 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
183 tt_local_entry, &tt_local_entry->hash_entry);
184 bat_priv->num_local_tt++;
185 atomic_set(&bat_priv->tt_local_changed, 1);
186
187 spin_unlock_bh(&bat_priv->tt_lhash_lock);
188
189 /* remove address from global hash if present */
190 spin_lock_bh(&bat_priv->tt_ghash_lock);
191
192 tt_global_entry = tt_global_hash_find(bat_priv, addr);
193
194 if (tt_global_entry)
195 _tt_global_del_orig(bat_priv, tt_global_entry,
196 "local tt received");
197
198 spin_unlock_bh(&bat_priv->tt_ghash_lock);
199 }
200
201 int tt_local_fill_buffer(struct bat_priv *bat_priv,
202 unsigned char *buff, int buff_len)
203 {
204 struct hashtable_t *hash = bat_priv->tt_local_hash;
205 struct tt_local_entry *tt_local_entry;
206 struct hlist_node *node;
207 struct hlist_head *head;
208 int i, count = 0;
209
210 spin_lock_bh(&bat_priv->tt_lhash_lock);
211
212 for (i = 0; i < hash->size; i++) {
213 head = &hash->table[i];
214
215 rcu_read_lock();
216 hlist_for_each_entry_rcu(tt_local_entry, node,
217 head, hash_entry) {
218 if (buff_len < (count + 1) * ETH_ALEN)
219 break;
220
221 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
222 ETH_ALEN);
223
224 count++;
225 }
226 rcu_read_unlock();
227 }
228
229 /* if we did not get all new local tts see you next time ;-) */
230 if (count == bat_priv->num_local_tt)
231 atomic_set(&bat_priv->tt_local_changed, 0);
232
233 spin_unlock_bh(&bat_priv->tt_lhash_lock);
234 return count;
235 }
236
237 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
238 {
239 struct net_device *net_dev = (struct net_device *)seq->private;
240 struct bat_priv *bat_priv = netdev_priv(net_dev);
241 struct hashtable_t *hash = bat_priv->tt_local_hash;
242 struct tt_local_entry *tt_local_entry;
243 struct hard_iface *primary_if;
244 struct hlist_node *node;
245 struct hlist_head *head;
246 size_t buf_size, pos;
247 char *buff;
248 int i, ret = 0;
249
250 primary_if = primary_if_get_selected(bat_priv);
251 if (!primary_if) {
252 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
253 "please specify interfaces to enable it\n",
254 net_dev->name);
255 goto out;
256 }
257
258 if (primary_if->if_status != IF_ACTIVE) {
259 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
260 "primary interface not active\n",
261 net_dev->name);
262 goto out;
263 }
264
265 seq_printf(seq, "Locally retrieved addresses (from %s) "
266 "announced via TT:\n",
267 net_dev->name);
268
269 spin_lock_bh(&bat_priv->tt_lhash_lock);
270
271 buf_size = 1;
272 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
273 for (i = 0; i < hash->size; i++) {
274 head = &hash->table[i];
275
276 rcu_read_lock();
277 __hlist_for_each_rcu(node, head)
278 buf_size += 21;
279 rcu_read_unlock();
280 }
281
282 buff = kmalloc(buf_size, GFP_ATOMIC);
283 if (!buff) {
284 spin_unlock_bh(&bat_priv->tt_lhash_lock);
285 ret = -ENOMEM;
286 goto out;
287 }
288
289 buff[0] = '\0';
290 pos = 0;
291
292 for (i = 0; i < hash->size; i++) {
293 head = &hash->table[i];
294
295 rcu_read_lock();
296 hlist_for_each_entry_rcu(tt_local_entry, node,
297 head, hash_entry) {
298 pos += snprintf(buff + pos, 22, " * %pM\n",
299 tt_local_entry->addr);
300 }
301 rcu_read_unlock();
302 }
303
304 spin_unlock_bh(&bat_priv->tt_lhash_lock);
305
306 seq_printf(seq, "%s", buff);
307 kfree(buff);
308 out:
309 if (primary_if)
310 hardif_free_ref(primary_if);
311 return ret;
312 }
313
314 static void _tt_local_del(struct hlist_node *node, void *arg)
315 {
316 struct bat_priv *bat_priv = arg;
317 void *data = container_of(node, struct tt_local_entry, hash_entry);
318
319 kfree(data);
320 bat_priv->num_local_tt--;
321 atomic_set(&bat_priv->tt_local_changed, 1);
322 }
323
324 static void tt_local_del(struct bat_priv *bat_priv,
325 struct tt_local_entry *tt_local_entry,
326 const char *message)
327 {
328 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
329 tt_local_entry->addr, message);
330
331 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
332 tt_local_entry->addr);
333 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
334 }
335
336 void tt_local_remove(struct bat_priv *bat_priv,
337 const uint8_t *addr, const char *message)
338 {
339 struct tt_local_entry *tt_local_entry;
340
341 spin_lock_bh(&bat_priv->tt_lhash_lock);
342
343 tt_local_entry = tt_local_hash_find(bat_priv, addr);
344
345 if (tt_local_entry)
346 tt_local_del(bat_priv, tt_local_entry, message);
347
348 spin_unlock_bh(&bat_priv->tt_lhash_lock);
349 }
350
351 static void tt_local_purge(struct work_struct *work)
352 {
353 struct delayed_work *delayed_work =
354 container_of(work, struct delayed_work, work);
355 struct bat_priv *bat_priv =
356 container_of(delayed_work, struct bat_priv, tt_work);
357 struct hashtable_t *hash = bat_priv->tt_local_hash;
358 struct tt_local_entry *tt_local_entry;
359 struct hlist_node *node, *node_tmp;
360 struct hlist_head *head;
361 unsigned long timeout;
362 int i;
363
364 spin_lock_bh(&bat_priv->tt_lhash_lock);
365
366 for (i = 0; i < hash->size; i++) {
367 head = &hash->table[i];
368
369 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
370 head, hash_entry) {
371 if (tt_local_entry->never_purge)
372 continue;
373
374 timeout = tt_local_entry->last_seen;
375 timeout += TT_LOCAL_TIMEOUT * HZ;
376
377 if (time_before(jiffies, timeout))
378 continue;
379
380 tt_local_del(bat_priv, tt_local_entry,
381 "address timed out");
382 }
383 }
384
385 spin_unlock_bh(&bat_priv->tt_lhash_lock);
386 tt_local_start_timer(bat_priv);
387 }
388
389 void tt_local_free(struct bat_priv *bat_priv)
390 {
391 if (!bat_priv->tt_local_hash)
392 return;
393
394 cancel_delayed_work_sync(&bat_priv->tt_work);
395 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
396 bat_priv->tt_local_hash = NULL;
397 }
398
399 int tt_global_init(struct bat_priv *bat_priv)
400 {
401 if (bat_priv->tt_global_hash)
402 return 1;
403
404 bat_priv->tt_global_hash = hash_new(1024);
405
406 if (!bat_priv->tt_global_hash)
407 return 0;
408
409 return 1;
410 }
411
412 void tt_global_add_orig(struct bat_priv *bat_priv,
413 struct orig_node *orig_node,
414 const unsigned char *tt_buff, int tt_buff_len)
415 {
416 struct tt_global_entry *tt_global_entry;
417 struct tt_local_entry *tt_local_entry;
418 int tt_buff_count = 0;
419 const unsigned char *tt_ptr;
420
421 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
422 spin_lock_bh(&bat_priv->tt_ghash_lock);
423
424 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
425 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
426
427 if (!tt_global_entry) {
428 spin_unlock_bh(&bat_priv->tt_ghash_lock);
429
430 tt_global_entry = kmalloc(sizeof(*tt_global_entry),
431 GFP_ATOMIC);
432
433 if (!tt_global_entry)
434 break;
435
436 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
437
438 bat_dbg(DBG_ROUTES, bat_priv,
439 "Creating new global tt entry: "
440 "%pM (via %pM)\n",
441 tt_global_entry->addr, orig_node->orig);
442
443 spin_lock_bh(&bat_priv->tt_ghash_lock);
444 hash_add(bat_priv->tt_global_hash, compare_gtt,
445 choose_orig, tt_global_entry,
446 &tt_global_entry->hash_entry);
447
448 }
449
450 tt_global_entry->orig_node = orig_node;
451 spin_unlock_bh(&bat_priv->tt_ghash_lock);
452
453 /* remove address from local hash if present */
454 spin_lock_bh(&bat_priv->tt_lhash_lock);
455
456 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
457 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
458
459 if (tt_local_entry)
460 tt_local_del(bat_priv, tt_local_entry,
461 "global tt received");
462
463 spin_unlock_bh(&bat_priv->tt_lhash_lock);
464
465 tt_buff_count++;
466 }
467
468 /* initialize, and overwrite if malloc succeeds */
469 orig_node->tt_buff = NULL;
470 orig_node->tt_buff_len = 0;
471
472 if (tt_buff_len > 0) {
473 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
474 if (orig_node->tt_buff) {
475 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
476 orig_node->tt_buff_len = tt_buff_len;
477 }
478 }
479 }
480
481 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
482 {
483 struct net_device *net_dev = (struct net_device *)seq->private;
484 struct bat_priv *bat_priv = netdev_priv(net_dev);
485 struct hashtable_t *hash = bat_priv->tt_global_hash;
486 struct tt_global_entry *tt_global_entry;
487 struct hard_iface *primary_if;
488 struct hlist_node *node;
489 struct hlist_head *head;
490 size_t buf_size, pos;
491 char *buff;
492 int i, ret = 0;
493
494 primary_if = primary_if_get_selected(bat_priv);
495 if (!primary_if) {
496 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
497 "specify interfaces to enable it\n",
498 net_dev->name);
499 goto out;
500 }
501
502 if (primary_if->if_status != IF_ACTIVE) {
503 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
504 "primary interface not active\n",
505 net_dev->name);
506 goto out;
507 }
508
509 seq_printf(seq,
510 "Globally announced TT entries received via the mesh %s\n",
511 net_dev->name);
512
513 spin_lock_bh(&bat_priv->tt_ghash_lock);
514
515 buf_size = 1;
516 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
517 for (i = 0; i < hash->size; i++) {
518 head = &hash->table[i];
519
520 rcu_read_lock();
521 __hlist_for_each_rcu(node, head)
522 buf_size += 43;
523 rcu_read_unlock();
524 }
525
526 buff = kmalloc(buf_size, GFP_ATOMIC);
527 if (!buff) {
528 spin_unlock_bh(&bat_priv->tt_ghash_lock);
529 ret = -ENOMEM;
530 goto out;
531 }
532 buff[0] = '\0';
533 pos = 0;
534
535 for (i = 0; i < hash->size; i++) {
536 head = &hash->table[i];
537
538 rcu_read_lock();
539 hlist_for_each_entry_rcu(tt_global_entry, node,
540 head, hash_entry) {
541 pos += snprintf(buff + pos, 44,
542 " * %pM via %pM\n",
543 tt_global_entry->addr,
544 tt_global_entry->orig_node->orig);
545 }
546 rcu_read_unlock();
547 }
548
549 spin_unlock_bh(&bat_priv->tt_ghash_lock);
550
551 seq_printf(seq, "%s", buff);
552 kfree(buff);
553 out:
554 if (primary_if)
555 hardif_free_ref(primary_if);
556 return ret;
557 }
558
559 static void _tt_global_del_orig(struct bat_priv *bat_priv,
560 struct tt_global_entry *tt_global_entry,
561 const char *message)
562 {
563 bat_dbg(DBG_ROUTES, bat_priv,
564 "Deleting global tt entry %pM (via %pM): %s\n",
565 tt_global_entry->addr, tt_global_entry->orig_node->orig,
566 message);
567
568 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
569 tt_global_entry->addr);
570 kfree(tt_global_entry);
571 }
572
573 void tt_global_del_orig(struct bat_priv *bat_priv,
574 struct orig_node *orig_node, const char *message)
575 {
576 struct tt_global_entry *tt_global_entry;
577 int tt_buff_count = 0;
578 unsigned char *tt_ptr;
579
580 if (orig_node->tt_buff_len == 0)
581 return;
582
583 spin_lock_bh(&bat_priv->tt_ghash_lock);
584
585 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
586 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
587 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
588
589 if ((tt_global_entry) &&
590 (tt_global_entry->orig_node == orig_node))
591 _tt_global_del_orig(bat_priv, tt_global_entry,
592 message);
593
594 tt_buff_count++;
595 }
596
597 spin_unlock_bh(&bat_priv->tt_ghash_lock);
598
599 orig_node->tt_buff_len = 0;
600 kfree(orig_node->tt_buff);
601 orig_node->tt_buff = NULL;
602 }
603
604 static void tt_global_del(struct hlist_node *node, void *arg)
605 {
606 void *data = container_of(node, struct tt_global_entry, hash_entry);
607
608 kfree(data);
609 }
610
611 void tt_global_free(struct bat_priv *bat_priv)
612 {
613 if (!bat_priv->tt_global_hash)
614 return;
615
616 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
617 bat_priv->tt_global_hash = NULL;
618 }
619
620 struct orig_node *transtable_search(struct bat_priv *bat_priv,
621 const uint8_t *addr)
622 {
623 struct tt_global_entry *tt_global_entry;
624 struct orig_node *orig_node = NULL;
625
626 spin_lock_bh(&bat_priv->tt_ghash_lock);
627 tt_global_entry = tt_global_hash_find(bat_priv, addr);
628
629 if (!tt_global_entry)
630 goto out;
631
632 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
633 goto out;
634
635 orig_node = tt_global_entry->orig_node;
636
637 out:
638 spin_unlock_bh(&bat_priv->tt_ghash_lock);
639 return orig_node;
640 }
This page took 0.054886 seconds and 5 git commands to generate.