1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "distributed-arp-table.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "gateway_common.h"
28 #include "gateway_client.h"
29 #include "originator.h"
30 #include "network-coding.h"
32 #include <linux/if_ether.h>
34 static void batadv_send_outstanding_bcast_packet(struct work_struct
*work
);
36 /* send out an already prepared packet to the given address via the
37 * specified batman interface
39 int batadv_send_skb_packet(struct sk_buff
*skb
,
40 struct batadv_hard_iface
*hard_iface
,
41 const uint8_t *dst_addr
)
43 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
44 struct ethhdr
*ethhdr
;
46 if (hard_iface
->if_status
!= BATADV_IF_ACTIVE
)
49 if (unlikely(!hard_iface
->net_dev
))
52 if (!(hard_iface
->net_dev
->flags
& IFF_UP
)) {
53 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
54 hard_iface
->net_dev
->name
);
58 /* push to the ethernet header. */
59 if (batadv_skb_head_push(skb
, ETH_HLEN
) < 0)
62 skb_reset_mac_header(skb
);
64 ethhdr
= eth_hdr(skb
);
65 memcpy(ethhdr
->h_source
, hard_iface
->net_dev
->dev_addr
, ETH_ALEN
);
66 memcpy(ethhdr
->h_dest
, dst_addr
, ETH_ALEN
);
67 ethhdr
->h_proto
= __constant_htons(ETH_P_BATMAN
);
69 skb_set_network_header(skb
, ETH_HLEN
);
70 skb
->protocol
= __constant_htons(ETH_P_BATMAN
);
72 skb
->dev
= hard_iface
->net_dev
;
74 /* Save a clone of the skb to use when decoding coded packets */
75 batadv_nc_skb_store_for_decoding(bat_priv
, skb
);
77 /* dev_queue_xmit() returns a negative result on error. However on
78 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
79 * (which is > 0). This will not be treated as an error.
81 return dev_queue_xmit(skb
);
88 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
89 * @skb: Packet to be transmitted.
90 * @orig_node: Final destination of the packet.
91 * @recv_if: Interface used when receiving the packet (can be NULL).
93 * Looks up the best next-hop towards the passed originator and passes the
94 * skb on for preparation of MAC header. If the packet originated from this
95 * host, NULL can be passed as recv_if and no interface alternating is
98 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
99 * NET_XMIT_POLICED if the skb is buffered for later transmit.
101 int batadv_send_skb_to_orig(struct sk_buff
*skb
,
102 struct batadv_orig_node
*orig_node
,
103 struct batadv_hard_iface
*recv_if
)
105 struct batadv_priv
*bat_priv
= orig_node
->bat_priv
;
106 struct batadv_neigh_node
*neigh_node
;
107 int ret
= NET_XMIT_DROP
;
109 /* batadv_find_router() increases neigh_nodes refcount if found. */
110 neigh_node
= batadv_find_router(bat_priv
, orig_node
, recv_if
);
114 /* try to network code the packet, if it is received on an interface
115 * (i.e. being forwarded). If the packet originates from this node or if
116 * network coding fails, then send the packet as usual.
118 if (recv_if
&& batadv_nc_skb_forward(skb
, neigh_node
)) {
119 ret
= NET_XMIT_POLICED
;
121 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
,
123 ret
= NET_XMIT_SUCCESS
;
126 batadv_neigh_node_free_ref(neigh_node
);
132 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
133 * common fields for unicast packets
134 * @skb: the skb carrying the unicast header to initialize
135 * @hdr_size: amount of bytes to push at the beginning of the skb
136 * @orig_node: the destination node
138 * Returns false if the buffer extension was not possible or true otherwise.
141 batadv_send_skb_push_fill_unicast(struct sk_buff
*skb
, int hdr_size
,
142 struct batadv_orig_node
*orig_node
)
144 struct batadv_unicast_packet
*unicast_packet
;
145 uint8_t ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
147 if (batadv_skb_head_push(skb
, hdr_size
) < 0)
150 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
151 unicast_packet
->header
.version
= BATADV_COMPAT_VERSION
;
152 /* batman packet type: unicast */
153 unicast_packet
->header
.packet_type
= BATADV_UNICAST
;
154 /* set unicast ttl */
155 unicast_packet
->header
.ttl
= BATADV_TTL
;
156 /* copy the destination for faster routing */
157 memcpy(unicast_packet
->dest
, orig_node
->orig
, ETH_ALEN
);
158 /* set the destination tt version number */
159 unicast_packet
->ttvn
= ttvn
;
165 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
166 * @skb: the skb containing the payload to encapsulate
167 * @orig_node: the destination node
169 * Returns false if the payload could not be encapsulated or true otherwise.
171 static bool batadv_send_skb_prepare_unicast(struct sk_buff
*skb
,
172 struct batadv_orig_node
*orig_node
)
174 size_t uni_size
= sizeof(struct batadv_unicast_packet
);
176 return batadv_send_skb_push_fill_unicast(skb
, uni_size
, orig_node
);
180 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
181 * unicast 4addr header
182 * @bat_priv: the bat priv with all the soft interface information
183 * @skb: the skb containing the payload to encapsulate
184 * @orig_node: the destination node
185 * @packet_subtype: the unicast 4addr packet subtype to use
187 * Returns false if the payload could not be encapsulated or true otherwise.
189 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv
*bat_priv
,
191 struct batadv_orig_node
*orig
,
194 struct batadv_hard_iface
*primary_if
;
195 struct batadv_unicast_4addr_packet
*uc_4addr_packet
;
198 primary_if
= batadv_primary_if_get_selected(bat_priv
);
202 /* Pull the header space and fill the unicast_packet substructure.
203 * We can do that because the first member of the uc_4addr_packet
204 * is of type struct unicast_packet
206 if (!batadv_send_skb_push_fill_unicast(skb
, sizeof(*uc_4addr_packet
),
210 uc_4addr_packet
= (struct batadv_unicast_4addr_packet
*)skb
->data
;
211 uc_4addr_packet
->u
.header
.packet_type
= BATADV_UNICAST_4ADDR
;
212 memcpy(uc_4addr_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
213 uc_4addr_packet
->subtype
= packet_subtype
;
214 uc_4addr_packet
->reserved
= 0;
219 batadv_hardif_free_ref(primary_if
);
224 * batadv_send_generic_unicast_skb - send an skb as unicast
225 * @bat_priv: the bat priv with all the soft interface information
226 * @skb: payload to send
227 * @packet_type: the batman unicast packet type to use
228 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
231 * Returns 1 in case of error or 0 otherwise.
233 int batadv_send_skb_generic_unicast(struct batadv_priv
*bat_priv
,
234 struct sk_buff
*skb
, int packet_type
,
237 struct ethhdr
*ethhdr
= (struct ethhdr
*)skb
->data
;
238 struct batadv_unicast_packet
*unicast_packet
;
239 struct batadv_orig_node
*orig_node
;
240 struct batadv_neigh_node
*neigh_node
;
241 int ret
= NET_RX_DROP
;
243 /* get routing information */
244 if (is_multicast_ether_addr(ethhdr
->h_dest
)) {
245 orig_node
= batadv_gw_get_selected_orig(bat_priv
);
250 /* check for tt host - increases orig_node refcount.
251 * returns NULL in case of AP isolation
253 orig_node
= batadv_transtable_search(bat_priv
, ethhdr
->h_source
,
258 * - if orig_node is NULL it returns NULL
259 * - increases neigh_nodes refcount if found.
261 neigh_node
= batadv_find_router(bat_priv
, orig_node
, NULL
);
266 switch (packet_type
) {
268 batadv_send_skb_prepare_unicast(skb
, orig_node
);
270 case BATADV_UNICAST_4ADDR
:
271 batadv_send_skb_prepare_unicast_4addr(bat_priv
, skb
, orig_node
,
275 /* this function supports UNICAST and UNICAST_4ADDR only. It
276 * should never be invoked with any other packet type
281 unicast_packet
= (struct batadv_unicast_packet
*)skb
->data
;
283 /* inform the destination node that we are still missing a correct route
284 * for this client. The destination will receive this packet and will
285 * try to reroute it because the ttvn contained in the header is less
286 * than the current one
288 if (batadv_tt_global_client_is_roaming(bat_priv
, ethhdr
->h_dest
))
289 unicast_packet
->ttvn
= unicast_packet
->ttvn
- 1;
291 if (batadv_send_skb_to_orig(skb
, orig_node
, NULL
) != NET_XMIT_DROP
)
296 batadv_neigh_node_free_ref(neigh_node
);
298 batadv_orig_node_free_ref(orig_node
);
299 if (ret
== NET_RX_DROP
)
304 void batadv_schedule_bat_ogm(struct batadv_hard_iface
*hard_iface
)
306 struct batadv_priv
*bat_priv
= netdev_priv(hard_iface
->soft_iface
);
308 if ((hard_iface
->if_status
== BATADV_IF_NOT_IN_USE
) ||
309 (hard_iface
->if_status
== BATADV_IF_TO_BE_REMOVED
))
312 /* the interface gets activated here to avoid race conditions between
313 * the moment of activating the interface in
314 * hardif_activate_interface() where the originator mac is set and
315 * outdated packets (especially uninitialized mac addresses) in the
318 if (hard_iface
->if_status
== BATADV_IF_TO_BE_ACTIVATED
)
319 hard_iface
->if_status
= BATADV_IF_ACTIVE
;
321 bat_priv
->bat_algo_ops
->bat_ogm_schedule(hard_iface
);
324 static void batadv_forw_packet_free(struct batadv_forw_packet
*forw_packet
)
326 if (forw_packet
->skb
)
327 kfree_skb(forw_packet
->skb
);
328 if (forw_packet
->if_incoming
)
329 batadv_hardif_free_ref(forw_packet
->if_incoming
);
334 _batadv_add_bcast_packet_to_list(struct batadv_priv
*bat_priv
,
335 struct batadv_forw_packet
*forw_packet
,
336 unsigned long send_time
)
338 /* add new packet to packet list */
339 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
340 hlist_add_head(&forw_packet
->list
, &bat_priv
->forw_bcast_list
);
341 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
343 /* start timer for this packet */
344 queue_delayed_work(batadv_event_workqueue
, &forw_packet
->delayed_work
,
348 /* add a broadcast packet to the queue and setup timers. broadcast packets
349 * are sent multiple times to increase probability for being received.
351 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
354 * The skb is not consumed, so the caller should make sure that the
357 int batadv_add_bcast_packet_to_list(struct batadv_priv
*bat_priv
,
358 const struct sk_buff
*skb
,
361 struct batadv_hard_iface
*primary_if
= NULL
;
362 struct batadv_forw_packet
*forw_packet
;
363 struct batadv_bcast_packet
*bcast_packet
;
364 struct sk_buff
*newskb
;
366 if (!batadv_atomic_dec_not_zero(&bat_priv
->bcast_queue_left
)) {
367 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
368 "bcast packet queue full\n");
372 primary_if
= batadv_primary_if_get_selected(bat_priv
);
376 forw_packet
= kmalloc(sizeof(*forw_packet
), GFP_ATOMIC
);
381 newskb
= skb_copy(skb
, GFP_ATOMIC
);
385 /* as we have a copy now, it is safe to decrease the TTL */
386 bcast_packet
= (struct batadv_bcast_packet
*)newskb
->data
;
387 bcast_packet
->header
.ttl
--;
389 skb_reset_mac_header(newskb
);
391 forw_packet
->skb
= newskb
;
392 forw_packet
->if_incoming
= primary_if
;
394 /* how often did we send the bcast packet ? */
395 forw_packet
->num_packets
= 0;
397 INIT_DELAYED_WORK(&forw_packet
->delayed_work
,
398 batadv_send_outstanding_bcast_packet
);
400 _batadv_add_bcast_packet_to_list(bat_priv
, forw_packet
, delay
);
406 atomic_inc(&bat_priv
->bcast_queue_left
);
409 batadv_hardif_free_ref(primary_if
);
410 return NETDEV_TX_BUSY
;
413 static void batadv_send_outstanding_bcast_packet(struct work_struct
*work
)
415 struct batadv_hard_iface
*hard_iface
;
416 struct delayed_work
*delayed_work
;
417 struct batadv_forw_packet
*forw_packet
;
418 struct sk_buff
*skb1
;
419 struct net_device
*soft_iface
;
420 struct batadv_priv
*bat_priv
;
422 delayed_work
= container_of(work
, struct delayed_work
, work
);
423 forw_packet
= container_of(delayed_work
, struct batadv_forw_packet
,
425 soft_iface
= forw_packet
->if_incoming
->soft_iface
;
426 bat_priv
= netdev_priv(soft_iface
);
428 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
429 hlist_del(&forw_packet
->list
);
430 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
432 if (atomic_read(&bat_priv
->mesh_state
) == BATADV_MESH_DEACTIVATING
)
435 if (batadv_dat_drop_broadcast_packet(bat_priv
, forw_packet
))
438 /* rebroadcast packet */
440 list_for_each_entry_rcu(hard_iface
, &batadv_hardif_list
, list
) {
441 if (hard_iface
->soft_iface
!= soft_iface
)
444 if (forw_packet
->num_packets
>= hard_iface
->num_bcasts
)
447 /* send a copy of the saved skb */
448 skb1
= skb_clone(forw_packet
->skb
, GFP_ATOMIC
);
450 batadv_send_skb_packet(skb1
, hard_iface
,
451 batadv_broadcast_addr
);
455 forw_packet
->num_packets
++;
457 /* if we still have some more bcasts to send */
458 if (forw_packet
->num_packets
< BATADV_NUM_BCASTS_MAX
) {
459 _batadv_add_bcast_packet_to_list(bat_priv
, forw_packet
,
460 msecs_to_jiffies(5));
465 batadv_forw_packet_free(forw_packet
);
466 atomic_inc(&bat_priv
->bcast_queue_left
);
469 void batadv_send_outstanding_bat_ogm_packet(struct work_struct
*work
)
471 struct delayed_work
*delayed_work
;
472 struct batadv_forw_packet
*forw_packet
;
473 struct batadv_priv
*bat_priv
;
475 delayed_work
= container_of(work
, struct delayed_work
, work
);
476 forw_packet
= container_of(delayed_work
, struct batadv_forw_packet
,
478 bat_priv
= netdev_priv(forw_packet
->if_incoming
->soft_iface
);
479 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
480 hlist_del(&forw_packet
->list
);
481 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
483 if (atomic_read(&bat_priv
->mesh_state
) == BATADV_MESH_DEACTIVATING
)
486 bat_priv
->bat_algo_ops
->bat_ogm_emit(forw_packet
);
488 /* we have to have at least one packet in the queue
489 * to determine the queues wake up time unless we are
492 if (forw_packet
->own
)
493 batadv_schedule_bat_ogm(forw_packet
->if_incoming
);
496 /* don't count own packet */
497 if (!forw_packet
->own
)
498 atomic_inc(&bat_priv
->batman_queue_left
);
500 batadv_forw_packet_free(forw_packet
);
504 batadv_purge_outstanding_packets(struct batadv_priv
*bat_priv
,
505 const struct batadv_hard_iface
*hard_iface
)
507 struct batadv_forw_packet
*forw_packet
;
508 struct hlist_node
*safe_tmp_node
;
512 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
513 "purge_outstanding_packets(): %s\n",
514 hard_iface
->net_dev
->name
);
516 batadv_dbg(BATADV_DBG_BATMAN
, bat_priv
,
517 "purge_outstanding_packets()\n");
519 /* free bcast list */
520 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
521 hlist_for_each_entry_safe(forw_packet
, safe_tmp_node
,
522 &bat_priv
->forw_bcast_list
, list
) {
523 /* if purge_outstanding_packets() was called with an argument
524 * we delete only packets belonging to the given interface
527 (forw_packet
->if_incoming
!= hard_iface
))
530 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
532 /* batadv_send_outstanding_bcast_packet() will lock the list to
533 * delete the item from the list
535 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
536 spin_lock_bh(&bat_priv
->forw_bcast_list_lock
);
539 hlist_del(&forw_packet
->list
);
540 batadv_forw_packet_free(forw_packet
);
543 spin_unlock_bh(&bat_priv
->forw_bcast_list_lock
);
545 /* free batman packet list */
546 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
547 hlist_for_each_entry_safe(forw_packet
, safe_tmp_node
,
548 &bat_priv
->forw_bat_list
, list
) {
549 /* if purge_outstanding_packets() was called with an argument
550 * we delete only packets belonging to the given interface
553 (forw_packet
->if_incoming
!= hard_iface
))
556 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);
558 /* send_outstanding_bat_packet() will lock the list to
559 * delete the item from the list
561 pending
= cancel_delayed_work_sync(&forw_packet
->delayed_work
);
562 spin_lock_bh(&bat_priv
->forw_bat_list_lock
);
565 hlist_del(&forw_packet
->list
);
566 batadv_forw_packet_free(forw_packet
);
569 spin_unlock_bh(&bat_priv
->forw_bat_list_lock
);