batman-adv: Simplify gw_check_election(), use gw_get_selected()
[deliverable/linux.git] / net / batman-adv / gateway_client.c
1 /*
2 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
3 *
4 * Marek Lindner
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "gateway_client.h"
24 #include "gateway_common.h"
25 #include "hard-interface.h"
26 #include "originator.h"
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <linux/udp.h>
30 #include <linux/if_vlan.h>
31
32 static void gw_node_free_rcu(struct rcu_head *rcu)
33 {
34 struct gw_node *gw_node;
35
36 gw_node = container_of(rcu, struct gw_node, rcu);
37 kfree(gw_node);
38 }
39
40 static void gw_node_free_ref(struct gw_node *gw_node)
41 {
42 if (atomic_dec_and_test(&gw_node->refcount))
43 call_rcu(&gw_node->rcu, gw_node_free_rcu);
44 }
45
46 struct orig_node *gw_get_selected(struct bat_priv *bat_priv)
47 {
48 struct gw_node *curr_gateway_tmp;
49 struct orig_node *orig_node = NULL;
50
51 rcu_read_lock();
52 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw);
53 if (!curr_gateway_tmp)
54 goto out;
55
56 orig_node = curr_gateway_tmp->orig_node;
57 if (!orig_node)
58 goto out;
59
60 if (!atomic_inc_not_zero(&orig_node->refcount))
61 orig_node = NULL;
62
63 out:
64 rcu_read_unlock();
65 return orig_node;
66 }
67
68 void gw_deselect(struct bat_priv *bat_priv)
69 {
70 struct gw_node *gw_node;
71
72 spin_lock_bh(&bat_priv->gw_list_lock);
73 gw_node = rcu_dereference(bat_priv->curr_gw);
74 rcu_assign_pointer(bat_priv->curr_gw, NULL);
75 spin_unlock_bh(&bat_priv->gw_list_lock);
76
77 if (gw_node)
78 gw_node_free_ref(gw_node);
79 }
80
81 static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
82 {
83 struct gw_node *curr_gw_node;
84
85 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
86 new_gw_node = NULL;
87
88 spin_lock_bh(&bat_priv->gw_list_lock);
89 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
90 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
91 spin_unlock_bh(&bat_priv->gw_list_lock);
92
93 if (curr_gw_node)
94 gw_node_free_ref(curr_gw_node);
95 }
96
97 void gw_election(struct bat_priv *bat_priv)
98 {
99 struct hlist_node *node;
100 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL;
101 uint8_t max_tq = 0;
102 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
103 int down, up;
104
105 /**
106 * The batman daemon checks here if we already passed a full originator
107 * cycle in order to make sure we don't choose the first gateway we
108 * hear about. This check is based on the daemon's uptime which we
109 * don't have.
110 **/
111 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
112 return;
113
114 rcu_read_lock();
115 curr_gw = rcu_dereference(bat_priv->curr_gw);
116 if (curr_gw) {
117 rcu_read_unlock();
118 return;
119 }
120
121 if (hlist_empty(&bat_priv->gw_list)) {
122
123 if (curr_gw) {
124 rcu_read_unlock();
125 bat_dbg(DBG_BATMAN, bat_priv,
126 "Removing selected gateway - "
127 "no gateway in range\n");
128 gw_deselect(bat_priv);
129 } else
130 rcu_read_unlock();
131
132 return;
133 }
134
135 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
136 if (!gw_node->orig_node->router)
137 continue;
138
139 if (gw_node->deleted)
140 continue;
141
142 switch (atomic_read(&bat_priv->gw_sel_class)) {
143 case 1: /* fast connection */
144 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
145 &down, &up);
146
147 tmp_gw_factor = (gw_node->orig_node->router->tq_avg *
148 gw_node->orig_node->router->tq_avg *
149 down * 100 * 100) /
150 (TQ_LOCAL_WINDOW_SIZE *
151 TQ_LOCAL_WINDOW_SIZE * 64);
152
153 if ((tmp_gw_factor > max_gw_factor) ||
154 ((tmp_gw_factor == max_gw_factor) &&
155 (gw_node->orig_node->router->tq_avg > max_tq)))
156 curr_gw_tmp = gw_node;
157 break;
158
159 default: /**
160 * 2: stable connection (use best statistic)
161 * 3: fast-switch (use best statistic but change as
162 * soon as a better gateway appears)
163 * XX: late-switch (use best statistic but change as
164 * soon as a better gateway appears which has
165 * $routing_class more tq points)
166 **/
167 if (gw_node->orig_node->router->tq_avg > max_tq)
168 curr_gw_tmp = gw_node;
169 break;
170 }
171
172 if (gw_node->orig_node->router->tq_avg > max_tq)
173 max_tq = gw_node->orig_node->router->tq_avg;
174
175 if (tmp_gw_factor > max_gw_factor)
176 max_gw_factor = tmp_gw_factor;
177 }
178
179 if (curr_gw != curr_gw_tmp) {
180 if ((curr_gw) && (!curr_gw_tmp))
181 bat_dbg(DBG_BATMAN, bat_priv,
182 "Removing selected gateway - "
183 "no gateway in range\n");
184 else if ((!curr_gw) && (curr_gw_tmp))
185 bat_dbg(DBG_BATMAN, bat_priv,
186 "Adding route to gateway %pM "
187 "(gw_flags: %i, tq: %i)\n",
188 curr_gw_tmp->orig_node->orig,
189 curr_gw_tmp->orig_node->gw_flags,
190 curr_gw_tmp->orig_node->router->tq_avg);
191 else
192 bat_dbg(DBG_BATMAN, bat_priv,
193 "Changing route to gateway %pM "
194 "(gw_flags: %i, tq: %i)\n",
195 curr_gw_tmp->orig_node->orig,
196 curr_gw_tmp->orig_node->gw_flags,
197 curr_gw_tmp->orig_node->router->tq_avg);
198
199 gw_select(bat_priv, curr_gw_tmp);
200 }
201
202 rcu_read_unlock();
203 }
204
205 void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
206 {
207 struct orig_node *curr_gw_orig;
208 uint8_t gw_tq_avg, orig_tq_avg;
209
210 curr_gw_orig = gw_get_selected(bat_priv);
211 if (!curr_gw_orig)
212 goto deselect;
213
214 rcu_read_lock();
215 if (!curr_gw_orig->router)
216 goto deselect_rcu;
217
218 /* this node already is the gateway */
219 if (curr_gw_orig == orig_node)
220 goto out_rcu;
221
222 if (!orig_node->router)
223 goto out_rcu;
224
225 gw_tq_avg = curr_gw_orig->router->tq_avg;
226 rcu_read_unlock();
227
228 orig_tq_avg = orig_node->router->tq_avg;
229
230 /* the TQ value has to be better */
231 if (orig_tq_avg < gw_tq_avg)
232 goto out;
233
234 /**
235 * if the routing class is greater than 3 the value tells us how much
236 * greater the TQ value of the new gateway must be
237 **/
238 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
239 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
240 goto out;
241
242 bat_dbg(DBG_BATMAN, bat_priv,
243 "Restarting gateway selection: better gateway found (tq curr: "
244 "%i, tq new: %i)\n",
245 gw_tq_avg, orig_tq_avg);
246 goto deselect;
247
248 out_rcu:
249 rcu_read_unlock();
250 goto out;
251 deselect_rcu:
252 rcu_read_unlock();
253 deselect:
254 gw_deselect(bat_priv);
255 out:
256 if (curr_gw_orig)
257 orig_node_free_ref(curr_gw_orig);
258
259 return;
260 }
261
262 static void gw_node_add(struct bat_priv *bat_priv,
263 struct orig_node *orig_node, uint8_t new_gwflags)
264 {
265 struct gw_node *gw_node;
266 int down, up;
267
268 gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC);
269 if (!gw_node)
270 return;
271
272 memset(gw_node, 0, sizeof(struct gw_node));
273 INIT_HLIST_NODE(&gw_node->list);
274 gw_node->orig_node = orig_node;
275 atomic_set(&gw_node->refcount, 1);
276
277 spin_lock_bh(&bat_priv->gw_list_lock);
278 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
279 spin_unlock_bh(&bat_priv->gw_list_lock);
280
281 gw_bandwidth_to_kbit(new_gwflags, &down, &up);
282 bat_dbg(DBG_BATMAN, bat_priv,
283 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
284 orig_node->orig, new_gwflags,
285 (down > 2048 ? down / 1024 : down),
286 (down > 2048 ? "MBit" : "KBit"),
287 (up > 2048 ? up / 1024 : up),
288 (up > 2048 ? "MBit" : "KBit"));
289 }
290
291 void gw_node_update(struct bat_priv *bat_priv,
292 struct orig_node *orig_node, uint8_t new_gwflags)
293 {
294 struct hlist_node *node;
295 struct gw_node *gw_node;
296
297 rcu_read_lock();
298 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
299 if (gw_node->orig_node != orig_node)
300 continue;
301
302 bat_dbg(DBG_BATMAN, bat_priv,
303 "Gateway class of originator %pM changed from "
304 "%i to %i\n",
305 orig_node->orig, gw_node->orig_node->gw_flags,
306 new_gwflags);
307
308 gw_node->deleted = 0;
309
310 if (new_gwflags == 0) {
311 gw_node->deleted = jiffies;
312 bat_dbg(DBG_BATMAN, bat_priv,
313 "Gateway %pM removed from gateway list\n",
314 orig_node->orig);
315
316 if (gw_node == rcu_dereference(bat_priv->curr_gw)) {
317 rcu_read_unlock();
318 gw_deselect(bat_priv);
319 return;
320 }
321 }
322
323 rcu_read_unlock();
324 return;
325 }
326 rcu_read_unlock();
327
328 if (new_gwflags == 0)
329 return;
330
331 gw_node_add(bat_priv, orig_node, new_gwflags);
332 }
333
334 void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
335 {
336 return gw_node_update(bat_priv, orig_node, 0);
337 }
338
339 void gw_node_purge(struct bat_priv *bat_priv)
340 {
341 struct gw_node *gw_node;
342 struct hlist_node *node, *node_tmp;
343 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
344
345 spin_lock_bh(&bat_priv->gw_list_lock);
346
347 hlist_for_each_entry_safe(gw_node, node, node_tmp,
348 &bat_priv->gw_list, list) {
349 if (((!gw_node->deleted) ||
350 (time_before(jiffies, gw_node->deleted + timeout))) &&
351 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
352 continue;
353
354 if (rcu_dereference(bat_priv->curr_gw) == gw_node)
355 gw_deselect(bat_priv);
356
357 hlist_del_rcu(&gw_node->list);
358 gw_node_free_ref(gw_node);
359 }
360
361
362 spin_unlock_bh(&bat_priv->gw_list_lock);
363 }
364
365 static int _write_buffer_text(struct bat_priv *bat_priv,
366 struct seq_file *seq, struct gw_node *gw_node)
367 {
368 struct gw_node *curr_gw;
369 int down, up, ret;
370
371 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
372
373 rcu_read_lock();
374 curr_gw = rcu_dereference(bat_priv->curr_gw);
375
376 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
377 (curr_gw == gw_node ? "=>" : " "),
378 gw_node->orig_node->orig,
379 gw_node->orig_node->router->tq_avg,
380 gw_node->orig_node->router->addr,
381 gw_node->orig_node->router->if_incoming->net_dev->name,
382 gw_node->orig_node->gw_flags,
383 (down > 2048 ? down / 1024 : down),
384 (down > 2048 ? "MBit" : "KBit"),
385 (up > 2048 ? up / 1024 : up),
386 (up > 2048 ? "MBit" : "KBit"));
387
388 rcu_read_unlock();
389 return ret;
390 }
391
392 int gw_client_seq_print_text(struct seq_file *seq, void *offset)
393 {
394 struct net_device *net_dev = (struct net_device *)seq->private;
395 struct bat_priv *bat_priv = netdev_priv(net_dev);
396 struct gw_node *gw_node;
397 struct hlist_node *node;
398 int gw_count = 0;
399
400 if (!bat_priv->primary_if) {
401
402 return seq_printf(seq, "BATMAN mesh %s disabled - please "
403 "specify interfaces to enable it\n",
404 net_dev->name);
405 }
406
407 if (bat_priv->primary_if->if_status != IF_ACTIVE) {
408
409 return seq_printf(seq, "BATMAN mesh %s disabled - "
410 "primary interface not active\n",
411 net_dev->name);
412 }
413
414 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
415 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
416 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
417 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
418 bat_priv->primary_if->net_dev->name,
419 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
420
421 rcu_read_lock();
422 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
423 if (gw_node->deleted)
424 continue;
425
426 if (!gw_node->orig_node->router)
427 continue;
428
429 _write_buffer_text(bat_priv, seq, gw_node);
430 gw_count++;
431 }
432 rcu_read_unlock();
433
434 if (gw_count == 0)
435 seq_printf(seq, "No gateways in range ...\n");
436
437 return 0;
438 }
439
440 int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
441 {
442 struct ethhdr *ethhdr;
443 struct iphdr *iphdr;
444 struct ipv6hdr *ipv6hdr;
445 struct udphdr *udphdr;
446 unsigned int header_len = 0;
447
448 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
449 return 0;
450
451 /* check for ethernet header */
452 if (!pskb_may_pull(skb, header_len + ETH_HLEN))
453 return 0;
454 ethhdr = (struct ethhdr *)skb->data;
455 header_len += ETH_HLEN;
456
457 /* check for initial vlan header */
458 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
459 if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
460 return 0;
461 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
462 header_len += VLAN_HLEN;
463 }
464
465 /* check for ip header */
466 switch (ntohs(ethhdr->h_proto)) {
467 case ETH_P_IP:
468 if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr)))
469 return 0;
470 iphdr = (struct iphdr *)(skb->data + header_len);
471 header_len += iphdr->ihl * 4;
472
473 /* check for udp header */
474 if (iphdr->protocol != IPPROTO_UDP)
475 return 0;
476
477 break;
478 case ETH_P_IPV6:
479 if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr)))
480 return 0;
481 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
482 header_len += sizeof(struct ipv6hdr);
483
484 /* check for udp header */
485 if (ipv6hdr->nexthdr != IPPROTO_UDP)
486 return 0;
487
488 break;
489 default:
490 return 0;
491 }
492
493 if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr)))
494 return 0;
495 udphdr = (struct udphdr *)(skb->data + header_len);
496 header_len += sizeof(struct udphdr);
497
498 /* check for bootp port */
499 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
500 (ntohs(udphdr->dest) != 67))
501 return 0;
502
503 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
504 (ntohs(udphdr->dest) != 547))
505 return 0;
506
507 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
508 return -1;
509
510 rcu_read_lock();
511 if (!rcu_dereference(bat_priv->curr_gw)) {
512 rcu_read_unlock();
513 return 0;
514 }
515 rcu_read_unlock();
516
517 return 1;
518 }
This page took 0.046489 seconds and 6 git commands to generate.