batman-adv: Increase orig_node refcount before releasing rcu read lock
[deliverable/linux.git] / net / batman-adv / gateway_client.c
1 /*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "gateway_client.h"
24 #include "gateway_common.h"
25 #include "hard-interface.h"
26 #include <linux/ip.h>
27 #include <linux/ipv6.h>
28 #include <linux/udp.h>
29 #include <linux/if_vlan.h>
30
31 static void gw_node_free_rcu(struct rcu_head *rcu)
32 {
33 struct gw_node *gw_node;
34
35 gw_node = container_of(rcu, struct gw_node, rcu);
36 kfree(gw_node);
37 }
38
39 static void gw_node_free_ref(struct gw_node *gw_node)
40 {
41 if (atomic_dec_and_test(&gw_node->refcount))
42 call_rcu(&gw_node->rcu, gw_node_free_rcu);
43 }
44
45 void *gw_get_selected(struct bat_priv *bat_priv)
46 {
47 struct gw_node *curr_gateway_tmp;
48 struct orig_node *orig_node = NULL;
49
50 rcu_read_lock();
51 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
52 if (!curr_gateway_tmp)
53 goto out;
54
55 orig_node = curr_gateway_tmp->orig_node;
56
57 if (orig_node)
58 kref_get(&orig_node->refcount);
59
60 out:
61 rcu_read_unlock();
62 return orig_node;
63 }
64
65 void gw_deselect(struct bat_priv *bat_priv)
66 {
67 struct gw_node *gw_node;
68
69 spin_lock_bh(&bat_priv->gw_list_lock);
70 gw_node = rcu_dereference(bat_priv->curr_gw);
71 rcu_assign_pointer(bat_priv->curr_gw, NULL);
72 spin_unlock_bh(&bat_priv->gw_list_lock);
73
74 if (gw_node)
75 gw_node_free_ref(gw_node);
76 }
77
78 static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
79 {
80 struct gw_node *curr_gw_node;
81
82 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
83 new_gw_node = NULL;
84
85 spin_lock_bh(&bat_priv->gw_list_lock);
86 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
87 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
88 spin_unlock_bh(&bat_priv->gw_list_lock);
89
90 if (curr_gw_node)
91 gw_node_free_ref(curr_gw_node);
92 }
93
94 void gw_election(struct bat_priv *bat_priv)
95 {
96 struct hlist_node *node;
97 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
98 uint8_t max_tq = 0;
99 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
100 int down, up;
101
102 /**
103 * The batman daemon checks here if we already passed a full originator
104 * cycle in order to make sure we don't choose the first gateway we
105 * hear about. This check is based on the daemon's uptime which we
106 * don't have.
107 **/
108 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
109 return;
110
111 rcu_read_lock();
112 curr_gw = rcu_dereference(bat_priv->curr_gw);
113 if (curr_gw) {
114 rcu_read_unlock();
115 return;
116 }
117
118 if (hlist_empty(&bat_priv->gw_list)) {
119
120 if (curr_gw) {
121 rcu_read_unlock();
122 bat_dbg(DBG_BATMAN, bat_priv,
123 "Removing selected gateway - "
124 "no gateway in range\n");
125 gw_deselect(bat_priv);
126 } else
127 rcu_read_unlock();
128
129 return;
130 }
131
132 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
133 if (!gw_node->orig_node->router)
134 continue;
135
136 if (gw_node->deleted)
137 continue;
138
139 switch (atomic_read(&bat_priv->gw_sel_class)) {
140 case 1: /* fast connection */
141 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
142 &down, &up);
143
144 tmp_gw_factor = (gw_node->orig_node->router->tq_avg *
145 gw_node->orig_node->router->tq_avg *
146 down * 100 * 100) /
147 (TQ_LOCAL_WINDOW_SIZE *
148 TQ_LOCAL_WINDOW_SIZE * 64);
149
150 if ((tmp_gw_factor > max_gw_factor) ||
151 ((tmp_gw_factor == max_gw_factor) &&
152 (gw_node->orig_node->router->tq_avg > max_tq)))
153 curr_gw_tmp = gw_node;
154 break;
155
156 default: /**
157 * 2: stable connection (use best statistic)
158 * 3: fast-switch (use best statistic but change as
159 * soon as a better gateway appears)
160 * XX: late-switch (use best statistic but change as
161 * soon as a better gateway appears which has
162 * $routing_class more tq points)
163 **/
164 if (gw_node->orig_node->router->tq_avg > max_tq)
165 curr_gw_tmp = gw_node;
166 break;
167 }
168
169 if (gw_node->orig_node->router->tq_avg > max_tq)
170 max_tq = gw_node->orig_node->router->tq_avg;
171
172 if (tmp_gw_factor > max_gw_factor)
173 max_gw_factor = tmp_gw_factor;
174 }
175
176 if (curr_gw != curr_gw_tmp) {
177 if ((curr_gw) && (!curr_gw_tmp))
178 bat_dbg(DBG_BATMAN, bat_priv,
179 "Removing selected gateway - "
180 "no gateway in range\n");
181 else if ((!curr_gw) && (curr_gw_tmp))
182 bat_dbg(DBG_BATMAN, bat_priv,
183 "Adding route to gateway %pM "
184 "(gw_flags: %i, tq: %i)\n",
185 curr_gw_tmp->orig_node->orig,
186 curr_gw_tmp->orig_node->gw_flags,
187 curr_gw_tmp->orig_node->router->tq_avg);
188 else
189 bat_dbg(DBG_BATMAN, bat_priv,
190 "Changing route to gateway %pM "
191 "(gw_flags: %i, tq: %i)\n",
192 curr_gw_tmp->orig_node->orig,
193 curr_gw_tmp->orig_node->gw_flags,
194 curr_gw_tmp->orig_node->router->tq_avg);
195
196 gw_select(bat_priv, curr_gw_tmp);
197 }
198
199 rcu_read_unlock();
200 }
201
202 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
203 {
204 struct gw_node *curr_gateway_tmp;
205 uint8_t gw_tq_avg, orig_tq_avg;
206
207 rcu_read_lock();
208 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
209 if (!curr_gateway_tmp)
210 goto out_rcu;
211
212 if (!curr_gateway_tmp->orig_node)
213 goto deselect_rcu;
214
215 if (!curr_gateway_tmp->orig_node->router)
216 goto deselect_rcu;
217
218 /* this node already is the gateway */
219 if (curr_gateway_tmp->orig_node == orig_node)
220 goto out_rcu;
221
222 if (!orig_node->router)
223 goto out_rcu;
224
225 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg;
226 rcu_read_unlock();
227
228 orig_tq_avg = orig_node->router->tq_avg;
229
230 /* the TQ value has to be better */
231 if (orig_tq_avg < gw_tq_avg)
232 goto out;
233
234 /**
235 * if the routing class is greater than 3 the value tells us how much
236 * greater the TQ value of the new gateway must be
237 **/
238 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
239 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
240 goto out;
241
242 bat_dbg(DBG_BATMAN, bat_priv,
243 "Restarting gateway selection: better gateway found (tq curr: "
244 "%i, tq new: %i)\n",
245 gw_tq_avg, orig_tq_avg);
246 goto deselect;
247
248 out_rcu:
249 rcu_read_unlock();
250 goto out;
251 deselect_rcu:
252 rcu_read_unlock();
253 deselect:
254 gw_deselect(bat_priv);
255 out:
256 return;
257 }
258
259 static void gw_node_add(struct bat_priv *bat_priv,
260 struct orig_node *orig_node, uint8_t new_gwflags)
261 {
262 struct gw_node *gw_node;
263 int down, up;
264
265 gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC);
266 if (!gw_node)
267 return;
268
269 memset(gw_node, 0, sizeof(struct gw_node));
270 INIT_HLIST_NODE(&gw_node->list);
271 gw_node->orig_node = orig_node;
272 atomic_set(&gw_node->refcount, 1);
273
274 spin_lock_bh(&bat_priv->gw_list_lock);
275 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
276 spin_unlock_bh(&bat_priv->gw_list_lock);
277
278 gw_bandwidth_to_kbit(new_gwflags, &down, &up);
279 bat_dbg(DBG_BATMAN, bat_priv,
280 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
281 orig_node->orig, new_gwflags,
282 (down > 2048 ? down / 1024 : down),
283 (down > 2048 ? "MBit" : "KBit"),
284 (up > 2048 ? up / 1024 : up),
285 (up > 2048 ? "MBit" : "KBit"));
286 }
287
288 void gw_node_update(struct bat_priv *bat_priv,
289 struct orig_node *orig_node, uint8_t new_gwflags)
290 {
291 struct hlist_node *node;
292 struct gw_node *gw_node;
293
294 rcu_read_lock();
295 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
296 if (gw_node->orig_node != orig_node)
297 continue;
298
299 bat_dbg(DBG_BATMAN, bat_priv,
300 "Gateway class of originator %pM changed from "
301 "%i to %i\n",
302 orig_node->orig, gw_node->orig_node->gw_flags,
303 new_gwflags);
304
305 gw_node->deleted = 0;
306
307 if (new_gwflags == 0) {
308 gw_node->deleted = jiffies;
309 bat_dbg(DBG_BATMAN, bat_priv,
310 "Gateway %pM removed from gateway list\n",
311 orig_node->orig);
312
313 if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
314 rcu_read_unlock();
315 gw_deselect(bat_priv);
316 return;
317 }
318 }
319
320 rcu_read_unlock();
321 return;
322 }
323 rcu_read_unlock();
324
325 if (new_gwflags == 0)
326 return;
327
328 gw_node_add(bat_priv, orig_node, new_gwflags);
329 }
330
331 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
332 {
333 return gw_node_update(bat_priv, orig_node, 0);
334 }
335
336 void gw_node_purge(struct bat_priv *bat_priv)
337 {
338 struct gw_node *gw_node;
339 struct hlist_node *node, *node_tmp;
340 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
341
342 spin_lock_bh(&bat_priv->gw_list_lock);
343
344 hlist_for_each_entry_safe(gw_node, node, node_tmp,
345 &bat_priv->gw_list, list) {
346 if (((!gw_node->deleted) ||
347 (time_before(jiffies, gw_node->deleted + timeout))) &&
348 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
349 continue;
350
351 if (rcu_dereference(bat_priv->curr_gw) == gw_node)
352 gw_deselect(bat_priv);
353
354 hlist_del_rcu(&gw_node->list);
355 gw_node_free_ref(gw_node);
356 }
357
358
359 spin_unlock_bh(&bat_priv->gw_list_lock);
360 }
361
362 static int _write_buffer_text(struct bat_priv *bat_priv,
363 struct seq_file *seq, struct gw_node *gw_node)
364 {
365 struct gw_node *curr_gw;
366 int down, up, ret;
367
368 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
369
370 rcu_read_lock();
371 curr_gw = rcu_dereference(bat_priv->curr_gw);
372
373 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
374 (curr_gw == gw_node ? "=>" : " "),
375 gw_node->orig_node->orig,
376 gw_node->orig_node->router->tq_avg,
377 gw_node->orig_node->router->addr,
378 gw_node->orig_node->router->if_incoming->net_dev->name,
379 gw_node->orig_node->gw_flags,
380 (down > 2048 ? down / 1024 : down),
381 (down > 2048 ? "MBit" : "KBit"),
382 (up > 2048 ? up / 1024 : up),
383 (up > 2048 ? "MBit" : "KBit"));
384
385 rcu_read_unlock();
386 return ret;
387 }
388
389 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
390 {
391 struct net_device *net_dev = (struct net_device *)seq->private;
392 struct bat_priv *bat_priv = netdev_priv(net_dev);
393 struct gw_node *gw_node;
394 struct hlist_node *node;
395 int gw_count = 0;
396
397 if (!bat_priv->primary_if) {
398
399 return seq_printf(seq, "BATMAN mesh %s disabled - please "
400 "specify interfaces to enable it\n",
401 net_dev->name);
402 }
403
404 if (bat_priv->primary_if->if_status != IF_ACTIVE) {
405
406 return seq_printf(seq, "BATMAN mesh %s disabled - "
407 "primary interface not active\n",
408 net_dev->name);
409 }
410
411 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
412 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
413 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
414 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
415 bat_priv->primary_if->net_dev->name,
416 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
417
418 rcu_read_lock();
419 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
420 if (gw_node->deleted)
421 continue;
422
423 if (!gw_node->orig_node->router)
424 continue;
425
426 _write_buffer_text(bat_priv, seq, gw_node);
427 gw_count++;
428 }
429 rcu_read_unlock();
430
431 if (gw_count == 0)
432 seq_printf(seq, "No gateways in range ...\n");
433
434 return 0;
435 }
436
437 int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
438 {
439 struct ethhdr *ethhdr;
440 struct iphdr *iphdr;
441 struct ipv6hdr *ipv6hdr;
442 struct udphdr *udphdr;
443 unsigned int header_len = 0;
444
445 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
446 return 0;
447
448 /* check for ethernet header */
449 if (!pskb_may_pull(skb, header_len + ETH_HLEN))
450 return 0;
451 ethhdr = (struct ethhdr *)skb->data;
452 header_len += ETH_HLEN;
453
454 /* check for initial vlan header */
455 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
456 if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
457 return 0;
458 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
459 header_len += VLAN_HLEN;
460 }
461
462 /* check for ip header */
463 switch (ntohs(ethhdr->h_proto)) {
464 case ETH_P_IP:
465 if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr)))
466 return 0;
467 iphdr = (struct iphdr *)(skb->data + header_len);
468 header_len += iphdr->ihl * 4;
469
470 /* check for udp header */
471 if (iphdr->protocol != IPPROTO_UDP)
472 return 0;
473
474 break;
475 case ETH_P_IPV6:
476 if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr)))
477 return 0;
478 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
479 header_len += sizeof(struct ipv6hdr);
480
481 /* check for udp header */
482 if (ipv6hdr->nexthdr != IPPROTO_UDP)
483 return 0;
484
485 break;
486 default:
487 return 0;
488 }
489
490 if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr)))
491 return 0;
492 udphdr = (struct udphdr *)(skb->data + header_len);
493 header_len += sizeof(struct udphdr);
494
495 /* check for bootp port */
496 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
497 (ntohs(udphdr->dest) != 67))
498 return 0;
499
500 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
501 (ntohs(udphdr->dest) != 547))
502 return 0;
503
504 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
505 return -1;
506
507 rcu_read_lock();
508 if (!rcu_dereference(bat_priv->curr_gw)) {
509 rcu_read_unlock();
510 return 0;
511 }
512 rcu_read_unlock();
513
514 return 1;
515 }
This page took 0.04442 seconds and 5 git commands to generate.