mips: reserve elfcorehdr
[deliverable/linux.git] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "vis.h"
28 #include "gateway_common.h"
29 #include "originator.h"
30
31 #include <linux/if_ether.h>
32
33 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
34
35 /* send out an already prepared packet to the given address via the
36 * specified batman interface
37 */
38 int batadv_send_skb_packet(struct sk_buff *skb,
39 struct batadv_hard_iface *hard_iface,
40 const uint8_t *dst_addr)
41 {
42 struct ethhdr *ethhdr;
43
44 if (hard_iface->if_status != BATADV_IF_ACTIVE)
45 goto send_skb_err;
46
47 if (unlikely(!hard_iface->net_dev))
48 goto send_skb_err;
49
50 if (!(hard_iface->net_dev->flags & IFF_UP)) {
51 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
52 hard_iface->net_dev->name);
53 goto send_skb_err;
54 }
55
56 /* push to the ethernet header. */
57 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
58 goto send_skb_err;
59
60 skb_reset_mac_header(skb);
61
62 ethhdr = (struct ethhdr *)skb_mac_header(skb);
63 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
64 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
65 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
66
67 skb_set_network_header(skb, ETH_HLEN);
68 skb->priority = TC_PRIO_CONTROL;
69 skb->protocol = __constant_htons(ETH_P_BATMAN);
70
71 skb->dev = hard_iface->net_dev;
72
73 /* dev_queue_xmit() returns a negative result on error. However on
74 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
75 * (which is > 0). This will not be treated as an error.
76 */
77 return dev_queue_xmit(skb);
78 send_skb_err:
79 kfree_skb(skb);
80 return NET_XMIT_DROP;
81 }
82
83 /**
84 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
85 * @skb: Packet to be transmitted.
86 * @orig_node: Final destination of the packet.
87 * @recv_if: Interface used when receiving the packet (can be NULL).
88 *
89 * Looks up the best next-hop towards the passed originator and passes the
90 * skb on for preparation of MAC header. If the packet originated from this
91 * host, NULL can be passed as recv_if and no interface alternating is
92 * attempted.
93 *
94 * Returns TRUE on success; FALSE otherwise.
95 */
96 bool batadv_send_skb_to_orig(struct sk_buff *skb,
97 struct batadv_orig_node *orig_node,
98 struct batadv_hard_iface *recv_if)
99 {
100 struct batadv_priv *bat_priv = orig_node->bat_priv;
101 struct batadv_neigh_node *neigh_node;
102
103 /* batadv_find_router() increases neigh_nodes refcount if found. */
104 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
105 if (!neigh_node)
106 return false;
107
108 /* route it */
109 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
110
111 batadv_neigh_node_free_ref(neigh_node);
112
113 return true;
114 }
115
116 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
117 {
118 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
119
120 if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
121 (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
122 return;
123
124 /* the interface gets activated here to avoid race conditions between
125 * the moment of activating the interface in
126 * hardif_activate_interface() where the originator mac is set and
127 * outdated packets (especially uninitialized mac addresses) in the
128 * packet queue
129 */
130 if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
131 hard_iface->if_status = BATADV_IF_ACTIVE;
132
133 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
134 }
135
136 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
137 {
138 if (forw_packet->skb)
139 kfree_skb(forw_packet->skb);
140 if (forw_packet->if_incoming)
141 batadv_hardif_free_ref(forw_packet->if_incoming);
142 kfree(forw_packet);
143 }
144
145 static void
146 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
147 struct batadv_forw_packet *forw_packet,
148 unsigned long send_time)
149 {
150 INIT_HLIST_NODE(&forw_packet->list);
151
152 /* add new packet to packet list */
153 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
154 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
155 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
156
157 /* start timer for this packet */
158 INIT_DELAYED_WORK(&forw_packet->delayed_work,
159 batadv_send_outstanding_bcast_packet);
160 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
161 send_time);
162 }
163
164 /* add a broadcast packet to the queue and setup timers. broadcast packets
165 * are sent multiple times to increase probability for being received.
166 *
167 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
168 * errors.
169 *
170 * The skb is not consumed, so the caller should make sure that the
171 * skb is freed.
172 */
173 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
174 const struct sk_buff *skb,
175 unsigned long delay)
176 {
177 struct batadv_hard_iface *primary_if = NULL;
178 struct batadv_forw_packet *forw_packet;
179 struct batadv_bcast_packet *bcast_packet;
180 struct sk_buff *newskb;
181
182 if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
183 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
184 "bcast packet queue full\n");
185 goto out;
186 }
187
188 primary_if = batadv_primary_if_get_selected(bat_priv);
189 if (!primary_if)
190 goto out_and_inc;
191
192 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
193
194 if (!forw_packet)
195 goto out_and_inc;
196
197 newskb = skb_copy(skb, GFP_ATOMIC);
198 if (!newskb)
199 goto packet_free;
200
201 /* as we have a copy now, it is safe to decrease the TTL */
202 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
203 bcast_packet->header.ttl--;
204
205 skb_reset_mac_header(newskb);
206
207 forw_packet->skb = newskb;
208 forw_packet->if_incoming = primary_if;
209
210 /* how often did we send the bcast packet ? */
211 forw_packet->num_packets = 0;
212
213 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
214 return NETDEV_TX_OK;
215
216 packet_free:
217 kfree(forw_packet);
218 out_and_inc:
219 atomic_inc(&bat_priv->bcast_queue_left);
220 out:
221 if (primary_if)
222 batadv_hardif_free_ref(primary_if);
223 return NETDEV_TX_BUSY;
224 }
225
226 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
227 {
228 struct batadv_hard_iface *hard_iface;
229 struct delayed_work *delayed_work;
230 struct batadv_forw_packet *forw_packet;
231 struct sk_buff *skb1;
232 struct net_device *soft_iface;
233 struct batadv_priv *bat_priv;
234
235 delayed_work = container_of(work, struct delayed_work, work);
236 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
237 delayed_work);
238 soft_iface = forw_packet->if_incoming->soft_iface;
239 bat_priv = netdev_priv(soft_iface);
240
241 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
242 hlist_del(&forw_packet->list);
243 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
244
245 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
246 goto out;
247
248 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
249 goto out;
250
251 /* rebroadcast packet */
252 rcu_read_lock();
253 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
254 if (hard_iface->soft_iface != soft_iface)
255 continue;
256
257 /* send a copy of the saved skb */
258 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
259 if (skb1)
260 batadv_send_skb_packet(skb1, hard_iface,
261 batadv_broadcast_addr);
262 }
263 rcu_read_unlock();
264
265 forw_packet->num_packets++;
266
267 /* if we still have some more bcasts to send */
268 if (forw_packet->num_packets < 3) {
269 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
270 msecs_to_jiffies(5));
271 return;
272 }
273
274 out:
275 batadv_forw_packet_free(forw_packet);
276 atomic_inc(&bat_priv->bcast_queue_left);
277 }
278
279 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
280 {
281 struct delayed_work *delayed_work;
282 struct batadv_forw_packet *forw_packet;
283 struct batadv_priv *bat_priv;
284
285 delayed_work = container_of(work, struct delayed_work, work);
286 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
287 delayed_work);
288 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
289 spin_lock_bh(&bat_priv->forw_bat_list_lock);
290 hlist_del(&forw_packet->list);
291 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
292
293 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
294 goto out;
295
296 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
297
298 /* we have to have at least one packet in the queue
299 * to determine the queues wake up time unless we are
300 * shutting down
301 */
302 if (forw_packet->own)
303 batadv_schedule_bat_ogm(forw_packet->if_incoming);
304
305 out:
306 /* don't count own packet */
307 if (!forw_packet->own)
308 atomic_inc(&bat_priv->batman_queue_left);
309
310 batadv_forw_packet_free(forw_packet);
311 }
312
313 void
314 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
315 const struct batadv_hard_iface *hard_iface)
316 {
317 struct batadv_forw_packet *forw_packet;
318 struct hlist_node *tmp_node, *safe_tmp_node;
319 bool pending;
320
321 if (hard_iface)
322 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
323 "purge_outstanding_packets(): %s\n",
324 hard_iface->net_dev->name);
325 else
326 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
327 "purge_outstanding_packets()\n");
328
329 /* free bcast list */
330 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
331 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
332 &bat_priv->forw_bcast_list, list) {
333
334 /* if purge_outstanding_packets() was called with an argument
335 * we delete only packets belonging to the given interface
336 */
337 if ((hard_iface) &&
338 (forw_packet->if_incoming != hard_iface))
339 continue;
340
341 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
342
343 /* batadv_send_outstanding_bcast_packet() will lock the list to
344 * delete the item from the list
345 */
346 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
347 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
348
349 if (pending) {
350 hlist_del(&forw_packet->list);
351 batadv_forw_packet_free(forw_packet);
352 }
353 }
354 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
355
356 /* free batman packet list */
357 spin_lock_bh(&bat_priv->forw_bat_list_lock);
358 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
359 &bat_priv->forw_bat_list, list) {
360
361 /* if purge_outstanding_packets() was called with an argument
362 * we delete only packets belonging to the given interface
363 */
364 if ((hard_iface) &&
365 (forw_packet->if_incoming != hard_iface))
366 continue;
367
368 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
369
370 /* send_outstanding_bat_packet() will lock the list to
371 * delete the item from the list
372 */
373 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
374 spin_lock_bh(&bat_priv->forw_bat_list_lock);
375
376 if (pending) {
377 hlist_del(&forw_packet->list);
378 batadv_forw_packet_free(forw_packet);
379 }
380 }
381 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
382 }
This page took 0.052748 seconds and 5 git commands to generate.