batman-adv: protect each hash row with rcu locks
[deliverable/linux.git] / net / batman-adv / routing.c
1 /*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "routing.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "icmp_socket.h"
29 #include "translation-table.h"
30 #include "originator.h"
31 #include "ring_buffer.h"
32 #include "vis.h"
33 #include "aggregation.h"
34 #include "gateway_common.h"
35 #include "gateway_client.h"
36 #include "unicast.h"
37
38 void slide_own_bcast_window(struct batman_if *batman_if)
39 {
40 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *walk;
43 struct hlist_head *head;
44 struct element_t *bucket;
45 struct orig_node *orig_node;
46 unsigned long *word;
47 int i;
48 size_t word_index;
49
50 spin_lock_bh(&bat_priv->orig_hash_lock);
51
52 for (i = 0; i < hash->size; i++) {
53 head = &hash->table[i];
54
55 rcu_read_lock();
56 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
57 orig_node = bucket->data;
58 word_index = batman_if->if_num * NUM_WORDS;
59 word = &(orig_node->bcast_own[word_index]);
60
61 bit_get_packet(bat_priv, word, 1, 0);
62 orig_node->bcast_own_sum[batman_if->if_num] =
63 bit_packet_count(word);
64 }
65 rcu_read_unlock();
66 }
67
68 spin_unlock_bh(&bat_priv->orig_hash_lock);
69 }
70
71 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
72 unsigned char *hna_buff, int hna_buff_len)
73 {
74 if ((hna_buff_len != orig_node->hna_buff_len) ||
75 ((hna_buff_len > 0) &&
76 (orig_node->hna_buff_len > 0) &&
77 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
78
79 if (orig_node->hna_buff_len > 0)
80 hna_global_del_orig(bat_priv, orig_node,
81 "originator changed hna");
82
83 if ((hna_buff_len > 0) && (hna_buff))
84 hna_global_add_orig(bat_priv, orig_node,
85 hna_buff, hna_buff_len);
86 }
87 }
88
89 static void update_route(struct bat_priv *bat_priv,
90 struct orig_node *orig_node,
91 struct neigh_node *neigh_node,
92 unsigned char *hna_buff, int hna_buff_len)
93 {
94 struct neigh_node *neigh_node_tmp;
95
96 /* route deleted */
97 if ((orig_node->router) && (!neigh_node)) {
98
99 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
100 orig_node->orig);
101 hna_global_del_orig(bat_priv, orig_node,
102 "originator timed out");
103
104 /* route added */
105 } else if ((!orig_node->router) && (neigh_node)) {
106
107 bat_dbg(DBG_ROUTES, bat_priv,
108 "Adding route towards: %pM (via %pM)\n",
109 orig_node->orig, neigh_node->addr);
110 hna_global_add_orig(bat_priv, orig_node,
111 hna_buff, hna_buff_len);
112
113 /* route changed */
114 } else {
115 bat_dbg(DBG_ROUTES, bat_priv,
116 "Changing route towards: %pM "
117 "(now via %pM - was via %pM)\n",
118 orig_node->orig, neigh_node->addr,
119 orig_node->router->addr);
120 }
121
122 if (neigh_node)
123 kref_get(&neigh_node->refcount);
124 neigh_node_tmp = orig_node->router;
125 orig_node->router = neigh_node;
126 if (neigh_node_tmp)
127 kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
128 }
129
130
131 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
132 struct neigh_node *neigh_node, unsigned char *hna_buff,
133 int hna_buff_len)
134 {
135
136 if (!orig_node)
137 return;
138
139 if (orig_node->router != neigh_node)
140 update_route(bat_priv, orig_node, neigh_node,
141 hna_buff, hna_buff_len);
142 /* may be just HNA changed */
143 else
144 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
145 }
146
147 static int is_bidirectional_neigh(struct orig_node *orig_node,
148 struct orig_node *orig_neigh_node,
149 struct batman_packet *batman_packet,
150 struct batman_if *if_incoming)
151 {
152 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
153 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
154 struct hlist_node *node;
155 unsigned char total_count;
156 int ret = 0;
157
158 if (orig_node == orig_neigh_node) {
159 rcu_read_lock();
160 hlist_for_each_entry_rcu(tmp_neigh_node, node,
161 &orig_node->neigh_list, list) {
162
163 if (compare_orig(tmp_neigh_node->addr,
164 orig_neigh_node->orig) &&
165 (tmp_neigh_node->if_incoming == if_incoming))
166 neigh_node = tmp_neigh_node;
167 }
168
169 if (!neigh_node)
170 neigh_node = create_neighbor(orig_node,
171 orig_neigh_node,
172 orig_neigh_node->orig,
173 if_incoming);
174 /* create_neighbor failed, return 0 */
175 if (!neigh_node)
176 goto unlock;
177
178 kref_get(&neigh_node->refcount);
179 rcu_read_unlock();
180
181 neigh_node->last_valid = jiffies;
182 } else {
183 /* find packet count of corresponding one hop neighbor */
184 rcu_read_lock();
185 hlist_for_each_entry_rcu(tmp_neigh_node, node,
186 &orig_neigh_node->neigh_list, list) {
187
188 if (compare_orig(tmp_neigh_node->addr,
189 orig_neigh_node->orig) &&
190 (tmp_neigh_node->if_incoming == if_incoming))
191 neigh_node = tmp_neigh_node;
192 }
193
194 if (!neigh_node)
195 neigh_node = create_neighbor(orig_neigh_node,
196 orig_neigh_node,
197 orig_neigh_node->orig,
198 if_incoming);
199 /* create_neighbor failed, return 0 */
200 if (!neigh_node)
201 goto unlock;
202
203 kref_get(&neigh_node->refcount);
204 rcu_read_unlock();
205 }
206
207 orig_node->last_valid = jiffies;
208
209 /* pay attention to not get a value bigger than 100 % */
210 total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
211 neigh_node->real_packet_count ?
212 neigh_node->real_packet_count :
213 orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
214
215 /* if we have too few packets (too less data) we set tq_own to zero */
216 /* if we receive too few packets it is not considered bidirectional */
217 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
218 (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
219 orig_neigh_node->tq_own = 0;
220 else
221 /* neigh_node->real_packet_count is never zero as we
222 * only purge old information when getting new
223 * information */
224 orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
225 neigh_node->real_packet_count;
226
227 /*
228 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
229 * affect the nearly-symmetric links only a little, but
230 * punishes asymmetric links more. This will give a value
231 * between 0 and TQ_MAX_VALUE
232 */
233 orig_neigh_node->tq_asym_penalty =
234 TQ_MAX_VALUE -
235 (TQ_MAX_VALUE *
236 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
237 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
238 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
239 (TQ_LOCAL_WINDOW_SIZE *
240 TQ_LOCAL_WINDOW_SIZE *
241 TQ_LOCAL_WINDOW_SIZE);
242
243 batman_packet->tq = ((batman_packet->tq *
244 orig_neigh_node->tq_own *
245 orig_neigh_node->tq_asym_penalty) /
246 (TQ_MAX_VALUE * TQ_MAX_VALUE));
247
248 bat_dbg(DBG_BATMAN, bat_priv,
249 "bidirectional: "
250 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
251 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
252 "total tq: %3i\n",
253 orig_node->orig, orig_neigh_node->orig, total_count,
254 neigh_node->real_packet_count, orig_neigh_node->tq_own,
255 orig_neigh_node->tq_asym_penalty, batman_packet->tq);
256
257 /* if link has the minimum required transmission quality
258 * consider it bidirectional */
259 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
260 ret = 1;
261
262 goto out;
263
264 unlock:
265 rcu_read_unlock();
266 out:
267 if (neigh_node)
268 kref_put(&neigh_node->refcount, neigh_node_free_ref);
269 return ret;
270 }
271
272 static void update_orig(struct bat_priv *bat_priv,
273 struct orig_node *orig_node,
274 struct ethhdr *ethhdr,
275 struct batman_packet *batman_packet,
276 struct batman_if *if_incoming,
277 unsigned char *hna_buff, int hna_buff_len,
278 char is_duplicate)
279 {
280 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
281 struct hlist_node *node;
282 int tmp_hna_buff_len;
283
284 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
285 "Searching and updating originator entry of received packet\n");
286
287 rcu_read_lock();
288 hlist_for_each_entry_rcu(tmp_neigh_node, node,
289 &orig_node->neigh_list, list) {
290 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
291 (tmp_neigh_node->if_incoming == if_incoming)) {
292 neigh_node = tmp_neigh_node;
293 continue;
294 }
295
296 if (is_duplicate)
297 continue;
298
299 ring_buffer_set(tmp_neigh_node->tq_recv,
300 &tmp_neigh_node->tq_index, 0);
301 tmp_neigh_node->tq_avg =
302 ring_buffer_avg(tmp_neigh_node->tq_recv);
303 }
304
305 if (!neigh_node) {
306 struct orig_node *orig_tmp;
307
308 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
309 if (!orig_tmp)
310 goto unlock;
311
312 neigh_node = create_neighbor(orig_node, orig_tmp,
313 ethhdr->h_source, if_incoming);
314 if (!neigh_node)
315 goto unlock;
316 } else
317 bat_dbg(DBG_BATMAN, bat_priv,
318 "Updating existing last-hop neighbor of originator\n");
319
320 kref_get(&neigh_node->refcount);
321 rcu_read_unlock();
322
323 orig_node->flags = batman_packet->flags;
324 neigh_node->last_valid = jiffies;
325
326 ring_buffer_set(neigh_node->tq_recv,
327 &neigh_node->tq_index,
328 batman_packet->tq);
329 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
330
331 if (!is_duplicate) {
332 orig_node->last_ttl = batman_packet->ttl;
333 neigh_node->last_ttl = batman_packet->ttl;
334 }
335
336 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
337 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
338
339 /* if this neighbor already is our next hop there is nothing
340 * to change */
341 if (orig_node->router == neigh_node)
342 goto update_hna;
343
344 /* if this neighbor does not offer a better TQ we won't consider it */
345 if ((orig_node->router) &&
346 (orig_node->router->tq_avg > neigh_node->tq_avg))
347 goto update_hna;
348
349 /* if the TQ is the same and the link not more symetric we
350 * won't consider it either */
351 if ((orig_node->router) &&
352 ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
353 (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
354 >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
355 goto update_hna;
356
357 update_routes(bat_priv, orig_node, neigh_node,
358 hna_buff, tmp_hna_buff_len);
359 goto update_gw;
360
361 update_hna:
362 update_routes(bat_priv, orig_node, orig_node->router,
363 hna_buff, tmp_hna_buff_len);
364
365 update_gw:
366 if (orig_node->gw_flags != batman_packet->gw_flags)
367 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
368
369 orig_node->gw_flags = batman_packet->gw_flags;
370
371 /* restart gateway selection if fast or late switching was enabled */
372 if ((orig_node->gw_flags) &&
373 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
374 (atomic_read(&bat_priv->gw_sel_class) > 2))
375 gw_check_election(bat_priv, orig_node);
376
377 goto out;
378
379 unlock:
380 rcu_read_unlock();
381 out:
382 if (neigh_node)
383 kref_put(&neigh_node->refcount, neigh_node_free_ref);
384 }
385
386 /* checks whether the host restarted and is in the protection time.
387 * returns:
388 * 0 if the packet is to be accepted
389 * 1 if the packet is to be ignored.
390 */
391 static int window_protected(struct bat_priv *bat_priv,
392 int32_t seq_num_diff,
393 unsigned long *last_reset)
394 {
395 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
396 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
397 if (time_after(jiffies, *last_reset +
398 msecs_to_jiffies(RESET_PROTECTION_MS))) {
399
400 *last_reset = jiffies;
401 bat_dbg(DBG_BATMAN, bat_priv,
402 "old packet received, start protection\n");
403
404 return 0;
405 } else
406 return 1;
407 }
408 return 0;
409 }
410
411 /* processes a batman packet for all interfaces, adjusts the sequence number and
412 * finds out whether it is a duplicate.
413 * returns:
414 * 1 the packet is a duplicate
415 * 0 the packet has not yet been received
416 * -1 the packet is old and has been received while the seqno window
417 * was protected. Caller should drop it.
418 */
419 static char count_real_packets(struct ethhdr *ethhdr,
420 struct batman_packet *batman_packet,
421 struct batman_if *if_incoming)
422 {
423 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
424 struct orig_node *orig_node;
425 struct neigh_node *tmp_neigh_node;
426 struct hlist_node *node;
427 char is_duplicate = 0;
428 int32_t seq_diff;
429 int need_update = 0;
430 int set_mark;
431
432 orig_node = get_orig_node(bat_priv, batman_packet->orig);
433 if (!orig_node)
434 return 0;
435
436 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
437
438 /* signalize caller that the packet is to be dropped. */
439 if (window_protected(bat_priv, seq_diff,
440 &orig_node->batman_seqno_reset))
441 return -1;
442
443 rcu_read_lock();
444 hlist_for_each_entry_rcu(tmp_neigh_node, node,
445 &orig_node->neigh_list, list) {
446
447 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
448 orig_node->last_real_seqno,
449 batman_packet->seqno);
450
451 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
452 (tmp_neigh_node->if_incoming == if_incoming))
453 set_mark = 1;
454 else
455 set_mark = 0;
456
457 /* if the window moved, set the update flag. */
458 need_update |= bit_get_packet(bat_priv,
459 tmp_neigh_node->real_bits,
460 seq_diff, set_mark);
461
462 tmp_neigh_node->real_packet_count =
463 bit_packet_count(tmp_neigh_node->real_bits);
464 }
465 rcu_read_unlock();
466
467 if (need_update) {
468 bat_dbg(DBG_BATMAN, bat_priv,
469 "updating last_seqno: old %d, new %d\n",
470 orig_node->last_real_seqno, batman_packet->seqno);
471 orig_node->last_real_seqno = batman_packet->seqno;
472 }
473
474 return is_duplicate;
475 }
476
477 /* copy primary address for bonding */
478 static void mark_bonding_address(struct orig_node *orig_node,
479 struct orig_node *orig_neigh_node,
480 struct batman_packet *batman_packet)
481
482 {
483 if (batman_packet->flags & PRIMARIES_FIRST_HOP)
484 memcpy(orig_neigh_node->primary_addr,
485 orig_node->orig, ETH_ALEN);
486
487 return;
488 }
489
490 /* mark possible bond.candidates in the neighbor list */
491 void update_bonding_candidates(struct orig_node *orig_node)
492 {
493 int candidates;
494 int interference_candidate;
495 int best_tq;
496 struct hlist_node *node, *node2;
497 struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
498 struct neigh_node *first_candidate, *last_candidate;
499
500 /* update the candidates for this originator */
501 if (!orig_node->router) {
502 orig_node->bond.candidates = 0;
503 return;
504 }
505
506 best_tq = orig_node->router->tq_avg;
507
508 /* update bond.candidates */
509
510 candidates = 0;
511
512 /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
513 * as "bonding partner" */
514
515 /* first, zero the list */
516 rcu_read_lock();
517 hlist_for_each_entry_rcu(tmp_neigh_node, node,
518 &orig_node->neigh_list, list) {
519 tmp_neigh_node->next_bond_candidate = NULL;
520 }
521 rcu_read_unlock();
522
523 first_candidate = NULL;
524 last_candidate = NULL;
525
526 rcu_read_lock();
527 hlist_for_each_entry_rcu(tmp_neigh_node, node,
528 &orig_node->neigh_list, list) {
529
530 /* only consider if it has the same primary address ... */
531 if (memcmp(orig_node->orig,
532 tmp_neigh_node->orig_node->primary_addr,
533 ETH_ALEN) != 0)
534 continue;
535
536 /* ... and is good enough to be considered */
537 if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
538 continue;
539
540 /* check if we have another candidate with the same
541 * mac address or interface. If we do, we won't
542 * select this candidate because of possible interference. */
543
544 interference_candidate = 0;
545 hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
546 &orig_node->neigh_list, list) {
547
548 if (tmp_neigh_node2 == tmp_neigh_node)
549 continue;
550
551 /* we only care if the other candidate is even
552 * considered as candidate. */
553 if (!tmp_neigh_node2->next_bond_candidate)
554 continue;
555
556
557 if ((tmp_neigh_node->if_incoming ==
558 tmp_neigh_node2->if_incoming)
559 || (memcmp(tmp_neigh_node->addr,
560 tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
561
562 interference_candidate = 1;
563 break;
564 }
565 }
566 /* don't care further if it is an interference candidate */
567 if (interference_candidate)
568 continue;
569
570 if (!first_candidate) {
571 first_candidate = tmp_neigh_node;
572 tmp_neigh_node->next_bond_candidate = first_candidate;
573 } else
574 tmp_neigh_node->next_bond_candidate = last_candidate;
575
576 last_candidate = tmp_neigh_node;
577
578 candidates++;
579 }
580 rcu_read_unlock();
581
582 if (candidates > 0) {
583 first_candidate->next_bond_candidate = last_candidate;
584 orig_node->bond.selected = first_candidate;
585 }
586
587 orig_node->bond.candidates = candidates;
588 }
589
590 void receive_bat_packet(struct ethhdr *ethhdr,
591 struct batman_packet *batman_packet,
592 unsigned char *hna_buff, int hna_buff_len,
593 struct batman_if *if_incoming)
594 {
595 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
596 struct batman_if *batman_if;
597 struct orig_node *orig_neigh_node, *orig_node;
598 char has_directlink_flag;
599 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
600 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
601 char is_duplicate;
602 uint32_t if_incoming_seqno;
603
604 /* Silently drop when the batman packet is actually not a
605 * correct packet.
606 *
607 * This might happen if a packet is padded (e.g. Ethernet has a
608 * minimum frame length of 64 byte) and the aggregation interprets
609 * it as an additional length.
610 *
611 * TODO: A more sane solution would be to have a bit in the
612 * batman_packet to detect whether the packet is the last
613 * packet in an aggregation. Here we expect that the padding
614 * is always zero (or not 0x01)
615 */
616 if (batman_packet->packet_type != BAT_PACKET)
617 return;
618
619 /* could be changed by schedule_own_packet() */
620 if_incoming_seqno = atomic_read(&if_incoming->seqno);
621
622 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
623
624 is_single_hop_neigh = (compare_orig(ethhdr->h_source,
625 batman_packet->orig) ? 1 : 0);
626
627 bat_dbg(DBG_BATMAN, bat_priv,
628 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
629 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
630 "TTL %d, V %d, IDF %d)\n",
631 ethhdr->h_source, if_incoming->net_dev->name,
632 if_incoming->net_dev->dev_addr, batman_packet->orig,
633 batman_packet->prev_sender, batman_packet->seqno,
634 batman_packet->tq, batman_packet->ttl, batman_packet->version,
635 has_directlink_flag);
636
637 rcu_read_lock();
638 list_for_each_entry_rcu(batman_if, &if_list, list) {
639 if (batman_if->if_status != IF_ACTIVE)
640 continue;
641
642 if (batman_if->soft_iface != if_incoming->soft_iface)
643 continue;
644
645 if (compare_orig(ethhdr->h_source,
646 batman_if->net_dev->dev_addr))
647 is_my_addr = 1;
648
649 if (compare_orig(batman_packet->orig,
650 batman_if->net_dev->dev_addr))
651 is_my_orig = 1;
652
653 if (compare_orig(batman_packet->prev_sender,
654 batman_if->net_dev->dev_addr))
655 is_my_oldorig = 1;
656
657 if (compare_orig(ethhdr->h_source, broadcast_addr))
658 is_broadcast = 1;
659 }
660 rcu_read_unlock();
661
662 if (batman_packet->version != COMPAT_VERSION) {
663 bat_dbg(DBG_BATMAN, bat_priv,
664 "Drop packet: incompatible batman version (%i)\n",
665 batman_packet->version);
666 return;
667 }
668
669 if (is_my_addr) {
670 bat_dbg(DBG_BATMAN, bat_priv,
671 "Drop packet: received my own broadcast (sender: %pM"
672 ")\n",
673 ethhdr->h_source);
674 return;
675 }
676
677 if (is_broadcast) {
678 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
679 "ignoring all packets with broadcast source addr (sender: %pM"
680 ")\n", ethhdr->h_source);
681 return;
682 }
683
684 if (is_my_orig) {
685 unsigned long *word;
686 int offset;
687
688 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
689
690 if (!orig_neigh_node)
691 return;
692
693 /* neighbor has to indicate direct link and it has to
694 * come via the corresponding interface */
695 /* if received seqno equals last send seqno save new
696 * seqno for bidirectional check */
697 if (has_directlink_flag &&
698 compare_orig(if_incoming->net_dev->dev_addr,
699 batman_packet->orig) &&
700 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
701 offset = if_incoming->if_num * NUM_WORDS;
702 word = &(orig_neigh_node->bcast_own[offset]);
703 bit_mark(word, 0);
704 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
705 bit_packet_count(word);
706 }
707
708 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
709 "originator packet from myself (via neighbor)\n");
710 return;
711 }
712
713 if (is_my_oldorig) {
714 bat_dbg(DBG_BATMAN, bat_priv,
715 "Drop packet: ignoring all rebroadcast echos (sender: "
716 "%pM)\n", ethhdr->h_source);
717 return;
718 }
719
720 orig_node = get_orig_node(bat_priv, batman_packet->orig);
721 if (!orig_node)
722 return;
723
724 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
725
726 if (is_duplicate == -1) {
727 bat_dbg(DBG_BATMAN, bat_priv,
728 "Drop packet: packet within seqno protection time "
729 "(sender: %pM)\n", ethhdr->h_source);
730 return;
731 }
732
733 if (batman_packet->tq == 0) {
734 bat_dbg(DBG_BATMAN, bat_priv,
735 "Drop packet: originator packet with tq equal 0\n");
736 return;
737 }
738
739 /* avoid temporary routing loops */
740 if ((orig_node->router) &&
741 (orig_node->router->orig_node->router) &&
742 (compare_orig(orig_node->router->addr,
743 batman_packet->prev_sender)) &&
744 !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
745 (compare_orig(orig_node->router->addr,
746 orig_node->router->orig_node->router->addr))) {
747 bat_dbg(DBG_BATMAN, bat_priv,
748 "Drop packet: ignoring all rebroadcast packets that "
749 "may make me loop (sender: %pM)\n", ethhdr->h_source);
750 return;
751 }
752
753 /* if sender is a direct neighbor the sender mac equals
754 * originator mac */
755 orig_neigh_node = (is_single_hop_neigh ?
756 orig_node :
757 get_orig_node(bat_priv, ethhdr->h_source));
758 if (!orig_neigh_node)
759 return;
760
761 /* drop packet if sender is not a direct neighbor and if we
762 * don't route towards it */
763 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
764 bat_dbg(DBG_BATMAN, bat_priv,
765 "Drop packet: OGM via unknown neighbor!\n");
766 return;
767 }
768
769 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
770 batman_packet, if_incoming);
771
772 /* update ranking if it is not a duplicate or has the same
773 * seqno and similar ttl as the non-duplicate */
774 if (is_bidirectional &&
775 (!is_duplicate ||
776 ((orig_node->last_real_seqno == batman_packet->seqno) &&
777 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
778 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
779 if_incoming, hna_buff, hna_buff_len, is_duplicate);
780
781 mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
782 update_bonding_candidates(orig_node);
783
784 /* is single hop (direct) neighbor */
785 if (is_single_hop_neigh) {
786
787 /* mark direct link on incoming interface */
788 schedule_forward_packet(orig_node, ethhdr, batman_packet,
789 1, hna_buff_len, if_incoming);
790
791 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
792 "rebroadcast neighbor packet with direct link flag\n");
793 return;
794 }
795
796 /* multihop originator */
797 if (!is_bidirectional) {
798 bat_dbg(DBG_BATMAN, bat_priv,
799 "Drop packet: not received via bidirectional link\n");
800 return;
801 }
802
803 if (is_duplicate) {
804 bat_dbg(DBG_BATMAN, bat_priv,
805 "Drop packet: duplicate packet received\n");
806 return;
807 }
808
809 bat_dbg(DBG_BATMAN, bat_priv,
810 "Forwarding packet: rebroadcast originator packet\n");
811 schedule_forward_packet(orig_node, ethhdr, batman_packet,
812 0, hna_buff_len, if_incoming);
813 }
814
815 int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
816 {
817 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
818 struct ethhdr *ethhdr;
819
820 /* drop packet if it has not necessary minimum size */
821 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
822 return NET_RX_DROP;
823
824 ethhdr = (struct ethhdr *)skb_mac_header(skb);
825
826 /* packet with broadcast indication but unicast recipient */
827 if (!is_broadcast_ether_addr(ethhdr->h_dest))
828 return NET_RX_DROP;
829
830 /* packet with broadcast sender address */
831 if (is_broadcast_ether_addr(ethhdr->h_source))
832 return NET_RX_DROP;
833
834 /* create a copy of the skb, if needed, to modify it. */
835 if (skb_cow(skb, 0) < 0)
836 return NET_RX_DROP;
837
838 /* keep skb linear */
839 if (skb_linearize(skb) < 0)
840 return NET_RX_DROP;
841
842 ethhdr = (struct ethhdr *)skb_mac_header(skb);
843
844 spin_lock_bh(&bat_priv->orig_hash_lock);
845 receive_aggr_bat_packet(ethhdr,
846 skb->data,
847 skb_headlen(skb),
848 batman_if);
849 spin_unlock_bh(&bat_priv->orig_hash_lock);
850
851 kfree_skb(skb);
852 return NET_RX_SUCCESS;
853 }
854
855 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
856 struct sk_buff *skb, size_t icmp_len)
857 {
858 struct orig_node *orig_node;
859 struct icmp_packet_rr *icmp_packet;
860 struct batman_if *batman_if;
861 int ret;
862 uint8_t dstaddr[ETH_ALEN];
863
864 icmp_packet = (struct icmp_packet_rr *)skb->data;
865
866 /* add data to device queue */
867 if (icmp_packet->msg_type != ECHO_REQUEST) {
868 bat_socket_receive_packet(icmp_packet, icmp_len);
869 return NET_RX_DROP;
870 }
871
872 if (!bat_priv->primary_if)
873 return NET_RX_DROP;
874
875 /* answer echo request (ping) */
876 /* get routing information */
877 spin_lock_bh(&bat_priv->orig_hash_lock);
878 rcu_read_lock();
879 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
880 compare_orig, choose_orig,
881 icmp_packet->orig));
882 rcu_read_unlock();
883 ret = NET_RX_DROP;
884
885 if ((orig_node) && (orig_node->router)) {
886
887 /* don't lock while sending the packets ... we therefore
888 * copy the required data before sending */
889 batman_if = orig_node->router->if_incoming;
890 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
891 spin_unlock_bh(&bat_priv->orig_hash_lock);
892
893 /* create a copy of the skb, if needed, to modify it. */
894 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
895 return NET_RX_DROP;
896
897 icmp_packet = (struct icmp_packet_rr *)skb->data;
898
899 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
900 memcpy(icmp_packet->orig,
901 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
902 icmp_packet->msg_type = ECHO_REPLY;
903 icmp_packet->ttl = TTL;
904
905 send_skb_packet(skb, batman_if, dstaddr);
906 ret = NET_RX_SUCCESS;
907
908 } else
909 spin_unlock_bh(&bat_priv->orig_hash_lock);
910
911 return ret;
912 }
913
914 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
915 struct sk_buff *skb)
916 {
917 struct orig_node *orig_node;
918 struct icmp_packet *icmp_packet;
919 struct batman_if *batman_if;
920 int ret;
921 uint8_t dstaddr[ETH_ALEN];
922
923 icmp_packet = (struct icmp_packet *)skb->data;
924
925 /* send TTL exceeded if packet is an echo request (traceroute) */
926 if (icmp_packet->msg_type != ECHO_REQUEST) {
927 pr_debug("Warning - can't forward icmp packet from %pM to "
928 "%pM: ttl exceeded\n", icmp_packet->orig,
929 icmp_packet->dst);
930 return NET_RX_DROP;
931 }
932
933 if (!bat_priv->primary_if)
934 return NET_RX_DROP;
935
936 /* get routing information */
937 spin_lock_bh(&bat_priv->orig_hash_lock);
938 rcu_read_lock();
939 orig_node = ((struct orig_node *)
940 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
941 icmp_packet->orig));
942 rcu_read_unlock();
943 ret = NET_RX_DROP;
944
945 if ((orig_node) && (orig_node->router)) {
946
947 /* don't lock while sending the packets ... we therefore
948 * copy the required data before sending */
949 batman_if = orig_node->router->if_incoming;
950 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
951 spin_unlock_bh(&bat_priv->orig_hash_lock);
952
953 /* create a copy of the skb, if needed, to modify it. */
954 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
955 return NET_RX_DROP;
956
957 icmp_packet = (struct icmp_packet *) skb->data;
958
959 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
960 memcpy(icmp_packet->orig,
961 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
962 icmp_packet->msg_type = TTL_EXCEEDED;
963 icmp_packet->ttl = TTL;
964
965 send_skb_packet(skb, batman_if, dstaddr);
966 ret = NET_RX_SUCCESS;
967
968 } else
969 spin_unlock_bh(&bat_priv->orig_hash_lock);
970
971 return ret;
972 }
973
974
975 int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
976 {
977 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
978 struct icmp_packet_rr *icmp_packet;
979 struct ethhdr *ethhdr;
980 struct orig_node *orig_node;
981 struct batman_if *batman_if;
982 int hdr_size = sizeof(struct icmp_packet);
983 int ret;
984 uint8_t dstaddr[ETH_ALEN];
985
986 /**
987 * we truncate all incoming icmp packets if they don't match our size
988 */
989 if (skb->len >= sizeof(struct icmp_packet_rr))
990 hdr_size = sizeof(struct icmp_packet_rr);
991
992 /* drop packet if it has not necessary minimum size */
993 if (unlikely(!pskb_may_pull(skb, hdr_size)))
994 return NET_RX_DROP;
995
996 ethhdr = (struct ethhdr *)skb_mac_header(skb);
997
998 /* packet with unicast indication but broadcast recipient */
999 if (is_broadcast_ether_addr(ethhdr->h_dest))
1000 return NET_RX_DROP;
1001
1002 /* packet with broadcast sender address */
1003 if (is_broadcast_ether_addr(ethhdr->h_source))
1004 return NET_RX_DROP;
1005
1006 /* not for me */
1007 if (!is_my_mac(ethhdr->h_dest))
1008 return NET_RX_DROP;
1009
1010 icmp_packet = (struct icmp_packet_rr *)skb->data;
1011
1012 /* add record route information if not full */
1013 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1014 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1015 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1016 ethhdr->h_dest, ETH_ALEN);
1017 icmp_packet->rr_cur++;
1018 }
1019
1020 /* packet for me */
1021 if (is_my_mac(icmp_packet->dst))
1022 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1023
1024 /* TTL exceeded */
1025 if (icmp_packet->ttl < 2)
1026 return recv_icmp_ttl_exceeded(bat_priv, skb);
1027
1028 ret = NET_RX_DROP;
1029
1030 /* get routing information */
1031 spin_lock_bh(&bat_priv->orig_hash_lock);
1032 rcu_read_lock();
1033 orig_node = ((struct orig_node *)
1034 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1035 icmp_packet->dst));
1036 rcu_read_unlock();
1037
1038 if ((orig_node) && (orig_node->router)) {
1039
1040 /* don't lock while sending the packets ... we therefore
1041 * copy the required data before sending */
1042 batman_if = orig_node->router->if_incoming;
1043 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
1044 spin_unlock_bh(&bat_priv->orig_hash_lock);
1045
1046 /* create a copy of the skb, if needed, to modify it. */
1047 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1048 return NET_RX_DROP;
1049
1050 icmp_packet = (struct icmp_packet_rr *)skb->data;
1051
1052 /* decrement ttl */
1053 icmp_packet->ttl--;
1054
1055 /* route it */
1056 send_skb_packet(skb, batman_if, dstaddr);
1057 ret = NET_RX_SUCCESS;
1058
1059 } else
1060 spin_unlock_bh(&bat_priv->orig_hash_lock);
1061
1062 return ret;
1063 }
1064
1065 /* find a suitable router for this originator, and use
1066 * bonding if possible. */
1067 struct neigh_node *find_router(struct bat_priv *bat_priv,
1068 struct orig_node *orig_node,
1069 struct batman_if *recv_if)
1070 {
1071 struct orig_node *primary_orig_node;
1072 struct orig_node *router_orig;
1073 struct neigh_node *router, *first_candidate, *best_router;
1074 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1075 int bonding_enabled;
1076
1077 if (!orig_node)
1078 return NULL;
1079
1080 if (!orig_node->router)
1081 return NULL;
1082
1083 /* without bonding, the first node should
1084 * always choose the default router. */
1085
1086 bonding_enabled = atomic_read(&bat_priv->bonding);
1087
1088 if ((!recv_if) && (!bonding_enabled))
1089 return orig_node->router;
1090
1091 router_orig = orig_node->router->orig_node;
1092
1093 /* if we have something in the primary_addr, we can search
1094 * for a potential bonding candidate. */
1095 if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
1096 return orig_node->router;
1097
1098 /* find the orig_node which has the primary interface. might
1099 * even be the same as our router_orig in many cases */
1100
1101 if (memcmp(router_orig->primary_addr,
1102 router_orig->orig, ETH_ALEN) == 0) {
1103 primary_orig_node = router_orig;
1104 } else {
1105 rcu_read_lock();
1106 primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
1107 choose_orig,
1108 router_orig->primary_addr);
1109 rcu_read_unlock();
1110
1111 if (!primary_orig_node)
1112 return orig_node->router;
1113 }
1114
1115 /* with less than 2 candidates, we can't do any
1116 * bonding and prefer the original router. */
1117
1118 if (primary_orig_node->bond.candidates < 2)
1119 return orig_node->router;
1120
1121
1122 /* all nodes between should choose a candidate which
1123 * is is not on the interface where the packet came
1124 * in. */
1125 first_candidate = primary_orig_node->bond.selected;
1126 router = first_candidate;
1127
1128 if (bonding_enabled) {
1129 /* in the bonding case, send the packets in a round
1130 * robin fashion over the remaining interfaces. */
1131 do {
1132 /* recv_if == NULL on the first node. */
1133 if (router->if_incoming != recv_if)
1134 break;
1135
1136 router = router->next_bond_candidate;
1137 } while (router != first_candidate);
1138
1139 primary_orig_node->bond.selected = router->next_bond_candidate;
1140
1141 } else {
1142 /* if bonding is disabled, use the best of the
1143 * remaining candidates which are not using
1144 * this interface. */
1145 best_router = first_candidate;
1146
1147 do {
1148 /* recv_if == NULL on the first node. */
1149 if ((router->if_incoming != recv_if) &&
1150 (router->tq_avg > best_router->tq_avg))
1151 best_router = router;
1152
1153 router = router->next_bond_candidate;
1154 } while (router != first_candidate);
1155
1156 router = best_router;
1157 }
1158
1159 return router;
1160 }
1161
1162 static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1163 {
1164 struct ethhdr *ethhdr;
1165
1166 /* drop packet if it has not necessary minimum size */
1167 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1168 return -1;
1169
1170 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1171
1172 /* packet with unicast indication but broadcast recipient */
1173 if (is_broadcast_ether_addr(ethhdr->h_dest))
1174 return -1;
1175
1176 /* packet with broadcast sender address */
1177 if (is_broadcast_ether_addr(ethhdr->h_source))
1178 return -1;
1179
1180 /* not for me */
1181 if (!is_my_mac(ethhdr->h_dest))
1182 return -1;
1183
1184 return 0;
1185 }
1186
1187 int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1188 int hdr_size)
1189 {
1190 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1191 struct orig_node *orig_node;
1192 struct neigh_node *router;
1193 struct batman_if *batman_if;
1194 uint8_t dstaddr[ETH_ALEN];
1195 struct unicast_packet *unicast_packet;
1196 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1197 int ret;
1198 struct sk_buff *new_skb;
1199
1200 unicast_packet = (struct unicast_packet *)skb->data;
1201
1202 /* TTL exceeded */
1203 if (unicast_packet->ttl < 2) {
1204 pr_debug("Warning - can't forward unicast packet from %pM to "
1205 "%pM: ttl exceeded\n", ethhdr->h_source,
1206 unicast_packet->dest);
1207 return NET_RX_DROP;
1208 }
1209
1210 /* get routing information */
1211 spin_lock_bh(&bat_priv->orig_hash_lock);
1212 rcu_read_lock();
1213 orig_node = ((struct orig_node *)
1214 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1215 unicast_packet->dest));
1216 rcu_read_unlock();
1217
1218 router = find_router(bat_priv, orig_node, recv_if);
1219
1220 if (!router) {
1221 spin_unlock_bh(&bat_priv->orig_hash_lock);
1222 return NET_RX_DROP;
1223 }
1224
1225 /* don't lock while sending the packets ... we therefore
1226 * copy the required data before sending */
1227
1228 batman_if = router->if_incoming;
1229 memcpy(dstaddr, router->addr, ETH_ALEN);
1230
1231 spin_unlock_bh(&bat_priv->orig_hash_lock);
1232
1233 /* create a copy of the skb, if needed, to modify it. */
1234 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1235 return NET_RX_DROP;
1236
1237 unicast_packet = (struct unicast_packet *)skb->data;
1238
1239 if (unicast_packet->packet_type == BAT_UNICAST &&
1240 atomic_read(&bat_priv->fragmentation) &&
1241 skb->len > batman_if->net_dev->mtu)
1242 return frag_send_skb(skb, bat_priv, batman_if,
1243 dstaddr);
1244
1245 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
1246 frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
1247
1248 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1249
1250 if (ret == NET_RX_DROP)
1251 return NET_RX_DROP;
1252
1253 /* packet was buffered for late merge */
1254 if (!new_skb)
1255 return NET_RX_SUCCESS;
1256
1257 skb = new_skb;
1258 unicast_packet = (struct unicast_packet *)skb->data;
1259 }
1260
1261 /* decrement ttl */
1262 unicast_packet->ttl--;
1263
1264 /* route it */
1265 send_skb_packet(skb, batman_if, dstaddr);
1266
1267 return NET_RX_SUCCESS;
1268 }
1269
1270 int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1271 {
1272 struct unicast_packet *unicast_packet;
1273 int hdr_size = sizeof(struct unicast_packet);
1274
1275 if (check_unicast_packet(skb, hdr_size) < 0)
1276 return NET_RX_DROP;
1277
1278 unicast_packet = (struct unicast_packet *)skb->data;
1279
1280 /* packet for me */
1281 if (is_my_mac(unicast_packet->dest)) {
1282 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1283 return NET_RX_SUCCESS;
1284 }
1285
1286 return route_unicast_packet(skb, recv_if, hdr_size);
1287 }
1288
1289 int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1290 {
1291 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1292 struct unicast_frag_packet *unicast_packet;
1293 int hdr_size = sizeof(struct unicast_frag_packet);
1294 struct sk_buff *new_skb = NULL;
1295 int ret;
1296
1297 if (check_unicast_packet(skb, hdr_size) < 0)
1298 return NET_RX_DROP;
1299
1300 unicast_packet = (struct unicast_frag_packet *)skb->data;
1301
1302 /* packet for me */
1303 if (is_my_mac(unicast_packet->dest)) {
1304
1305 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1306
1307 if (ret == NET_RX_DROP)
1308 return NET_RX_DROP;
1309
1310 /* packet was buffered for late merge */
1311 if (!new_skb)
1312 return NET_RX_SUCCESS;
1313
1314 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1315 sizeof(struct unicast_packet));
1316 return NET_RX_SUCCESS;
1317 }
1318
1319 return route_unicast_packet(skb, recv_if, hdr_size);
1320 }
1321
1322
1323 int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1324 {
1325 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1326 struct orig_node *orig_node;
1327 struct bcast_packet *bcast_packet;
1328 struct ethhdr *ethhdr;
1329 int hdr_size = sizeof(struct bcast_packet);
1330 int32_t seq_diff;
1331
1332 /* drop packet if it has not necessary minimum size */
1333 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1334 return NET_RX_DROP;
1335
1336 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1337
1338 /* packet with broadcast indication but unicast recipient */
1339 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1340 return NET_RX_DROP;
1341
1342 /* packet with broadcast sender address */
1343 if (is_broadcast_ether_addr(ethhdr->h_source))
1344 return NET_RX_DROP;
1345
1346 /* ignore broadcasts sent by myself */
1347 if (is_my_mac(ethhdr->h_source))
1348 return NET_RX_DROP;
1349
1350 bcast_packet = (struct bcast_packet *)skb->data;
1351
1352 /* ignore broadcasts originated by myself */
1353 if (is_my_mac(bcast_packet->orig))
1354 return NET_RX_DROP;
1355
1356 if (bcast_packet->ttl < 2)
1357 return NET_RX_DROP;
1358
1359 spin_lock_bh(&bat_priv->orig_hash_lock);
1360 rcu_read_lock();
1361 orig_node = ((struct orig_node *)
1362 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1363 bcast_packet->orig));
1364 rcu_read_unlock();
1365
1366 if (!orig_node) {
1367 spin_unlock_bh(&bat_priv->orig_hash_lock);
1368 return NET_RX_DROP;
1369 }
1370
1371 /* check whether the packet is a duplicate */
1372 if (get_bit_status(orig_node->bcast_bits,
1373 orig_node->last_bcast_seqno,
1374 ntohl(bcast_packet->seqno))) {
1375 spin_unlock_bh(&bat_priv->orig_hash_lock);
1376 return NET_RX_DROP;
1377 }
1378
1379 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1380
1381 /* check whether the packet is old and the host just restarted. */
1382 if (window_protected(bat_priv, seq_diff,
1383 &orig_node->bcast_seqno_reset)) {
1384 spin_unlock_bh(&bat_priv->orig_hash_lock);
1385 return NET_RX_DROP;
1386 }
1387
1388 /* mark broadcast in flood history, update window position
1389 * if required. */
1390 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1391 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1392
1393 spin_unlock_bh(&bat_priv->orig_hash_lock);
1394 /* rebroadcast packet */
1395 add_bcast_packet_to_list(bat_priv, skb);
1396
1397 /* broadcast for me */
1398 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1399
1400 return NET_RX_SUCCESS;
1401 }
1402
1403 int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
1404 {
1405 struct vis_packet *vis_packet;
1406 struct ethhdr *ethhdr;
1407 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1408 int hdr_size = sizeof(struct vis_packet);
1409
1410 /* keep skb linear */
1411 if (skb_linearize(skb) < 0)
1412 return NET_RX_DROP;
1413
1414 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1415 return NET_RX_DROP;
1416
1417 vis_packet = (struct vis_packet *)skb->data;
1418 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1419
1420 /* not for me */
1421 if (!is_my_mac(ethhdr->h_dest))
1422 return NET_RX_DROP;
1423
1424 /* ignore own packets */
1425 if (is_my_mac(vis_packet->vis_orig))
1426 return NET_RX_DROP;
1427
1428 if (is_my_mac(vis_packet->sender_orig))
1429 return NET_RX_DROP;
1430
1431 switch (vis_packet->vis_type) {
1432 case VIS_TYPE_SERVER_SYNC:
1433 receive_server_sync_packet(bat_priv, vis_packet,
1434 skb_headlen(skb));
1435 break;
1436
1437 case VIS_TYPE_CLIENT_UPDATE:
1438 receive_client_update_packet(bat_priv, vis_packet,
1439 skb_headlen(skb));
1440 break;
1441
1442 default: /* ignore unknown packet */
1443 break;
1444 }
1445
1446 /* We take a copy of the data in the packet, so we should
1447 always free the skbuf. */
1448 return NET_RX_DROP;
1449 }
This page took 0.062204 seconds and 5 git commands to generate.