batman-adv: Prefix packet structs with batadv_
[deliverable/linux.git] / net / batman-adv / main.c
... / ...
CommitLineData
1/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include "main.h"
21#include "bat_sysfs.h"
22#include "bat_debugfs.h"
23#include "routing.h"
24#include "send.h"
25#include "originator.h"
26#include "soft-interface.h"
27#include "icmp_socket.h"
28#include "translation-table.h"
29#include "hard-interface.h"
30#include "gateway_client.h"
31#include "bridge_loop_avoidance.h"
32#include "vis.h"
33#include "hash.h"
34#include "bat_algo.h"
35
36
37/* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked
39 */
40struct list_head batadv_hardif_list;
41static int (*batadv_rx_handler[256])(struct sk_buff *,
42 struct hard_iface *);
43char batadv_routing_algo[20] = "BATMAN_IV";
44static struct hlist_head batadv_algo_list;
45
46unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47
48struct workqueue_struct *batadv_event_workqueue;
49
50static void batadv_recv_handler_init(void);
51
52static int __init batadv_init(void)
53{
54 INIT_LIST_HEAD(&batadv_hardif_list);
55 INIT_HLIST_HEAD(&batadv_algo_list);
56
57 batadv_recv_handler_init();
58
59 batadv_iv_init();
60
61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/
63 */
64 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
65
66 if (!batadv_event_workqueue)
67 return -ENOMEM;
68
69 batadv_socket_init();
70 batadv_debugfs_init();
71
72 register_netdevice_notifier(&batadv_hard_if_notifier);
73
74 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
75 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
76
77 return 0;
78}
79
80static void __exit batadv_exit(void)
81{
82 batadv_debugfs_destroy();
83 unregister_netdevice_notifier(&batadv_hard_if_notifier);
84 batadv_hardif_remove_interfaces();
85
86 flush_workqueue(batadv_event_workqueue);
87 destroy_workqueue(batadv_event_workqueue);
88 batadv_event_workqueue = NULL;
89
90 rcu_barrier();
91}
92
93int batadv_mesh_init(struct net_device *soft_iface)
94{
95 struct bat_priv *bat_priv = netdev_priv(soft_iface);
96 int ret;
97
98 spin_lock_init(&bat_priv->forw_bat_list_lock);
99 spin_lock_init(&bat_priv->forw_bcast_list_lock);
100 spin_lock_init(&bat_priv->tt_changes_list_lock);
101 spin_lock_init(&bat_priv->tt_req_list_lock);
102 spin_lock_init(&bat_priv->tt_roam_list_lock);
103 spin_lock_init(&bat_priv->tt_buff_lock);
104 spin_lock_init(&bat_priv->gw_list_lock);
105 spin_lock_init(&bat_priv->vis_hash_lock);
106 spin_lock_init(&bat_priv->vis_list_lock);
107
108 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
109 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
110 INIT_HLIST_HEAD(&bat_priv->gw_list);
111 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
112 INIT_LIST_HEAD(&bat_priv->tt_req_list);
113 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
114
115 ret = batadv_originator_init(bat_priv);
116 if (ret < 0)
117 goto err;
118
119 ret = batadv_tt_init(bat_priv);
120 if (ret < 0)
121 goto err;
122
123 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
124 BATADV_NULL_IFINDEX);
125
126 ret = batadv_vis_init(bat_priv);
127 if (ret < 0)
128 goto err;
129
130 ret = batadv_bla_init(bat_priv);
131 if (ret < 0)
132 goto err;
133
134 atomic_set(&bat_priv->gw_reselect, 0);
135 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
136
137 return 0;
138
139err:
140 batadv_mesh_free(soft_iface);
141 return ret;
142}
143
144void batadv_mesh_free(struct net_device *soft_iface)
145{
146 struct bat_priv *bat_priv = netdev_priv(soft_iface);
147
148 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
149
150 batadv_purge_outstanding_packets(bat_priv, NULL);
151
152 batadv_vis_quit(bat_priv);
153
154 batadv_gw_node_purge(bat_priv);
155 batadv_originator_free(bat_priv);
156
157 batadv_tt_free(bat_priv);
158
159 batadv_bla_free(bat_priv);
160
161 free_percpu(bat_priv->bat_counters);
162
163 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
164}
165
166void batadv_inc_module_count(void)
167{
168 try_module_get(THIS_MODULE);
169}
170
171void batadv_dec_module_count(void)
172{
173 module_put(THIS_MODULE);
174}
175
176int batadv_is_my_mac(const uint8_t *addr)
177{
178 const struct hard_iface *hard_iface;
179
180 rcu_read_lock();
181 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
182 if (hard_iface->if_status != BATADV_IF_ACTIVE)
183 continue;
184
185 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
186 rcu_read_unlock();
187 return 1;
188 }
189 }
190 rcu_read_unlock();
191 return 0;
192}
193
194static int batadv_recv_unhandled_packet(struct sk_buff *skb,
195 struct hard_iface *recv_if)
196{
197 return NET_RX_DROP;
198}
199
200/* incoming packets with the batman ethertype received on any active hard
201 * interface
202 */
203int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
204 struct packet_type *ptype,
205 struct net_device *orig_dev)
206{
207 struct bat_priv *bat_priv;
208 struct batadv_ogm_packet *batadv_ogm_packet;
209 struct hard_iface *hard_iface;
210 uint8_t idx;
211 int ret;
212
213 hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
214 skb = skb_share_check(skb, GFP_ATOMIC);
215
216 /* skb was released by skb_share_check() */
217 if (!skb)
218 goto err_out;
219
220 /* packet should hold at least type and version */
221 if (unlikely(!pskb_may_pull(skb, 2)))
222 goto err_free;
223
224 /* expect a valid ethernet header here. */
225 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
226 goto err_free;
227
228 if (!hard_iface->soft_iface)
229 goto err_free;
230
231 bat_priv = netdev_priv(hard_iface->soft_iface);
232
233 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
234 goto err_free;
235
236 /* discard frames on not active interfaces */
237 if (hard_iface->if_status != BATADV_IF_ACTIVE)
238 goto err_free;
239
240 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
241
242 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
243 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
244 "Drop packet: incompatible batman version (%i)\n",
245 batadv_ogm_packet->header.version);
246 goto err_free;
247 }
248
249 /* all receive handlers return whether they received or reused
250 * the supplied skb. if not, we have to free the skb.
251 */
252 idx = batadv_ogm_packet->header.packet_type;
253 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
254
255 if (ret == NET_RX_DROP)
256 kfree_skb(skb);
257
258 /* return NET_RX_SUCCESS in any case as we
259 * most probably dropped the packet for
260 * routing-logical reasons.
261 */
262 return NET_RX_SUCCESS;
263
264err_free:
265 kfree_skb(skb);
266err_out:
267 return NET_RX_DROP;
268}
269
270static void batadv_recv_handler_init(void)
271{
272 int i;
273
274 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
275 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
276
277 /* batman icmp packet */
278 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
279 /* unicast packet */
280 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
281 /* fragmented unicast packet */
282 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
283 /* broadcast packet */
284 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
285 /* vis packet */
286 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
287 /* Translation table query (request or response) */
288 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
289 /* Roaming advertisement */
290 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
291}
292
293int batadv_recv_handler_register(uint8_t packet_type,
294 int (*recv_handler)(struct sk_buff *,
295 struct hard_iface *))
296{
297 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
298 return -EBUSY;
299
300 batadv_rx_handler[packet_type] = recv_handler;
301 return 0;
302}
303
304void batadv_recv_handler_unregister(uint8_t packet_type)
305{
306 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
307}
308
309static struct bat_algo_ops *batadv_algo_get(char *name)
310{
311 struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
312 struct hlist_node *node;
313
314 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
315 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
316 continue;
317
318 bat_algo_ops = bat_algo_ops_tmp;
319 break;
320 }
321
322 return bat_algo_ops;
323}
324
325int batadv_algo_register(struct bat_algo_ops *bat_algo_ops)
326{
327 struct bat_algo_ops *bat_algo_ops_tmp;
328 int ret;
329
330 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
331 if (bat_algo_ops_tmp) {
332 pr_info("Trying to register already registered routing algorithm: %s\n",
333 bat_algo_ops->name);
334 ret = -EEXIST;
335 goto out;
336 }
337
338 /* all algorithms must implement all ops (for now) */
339 if (!bat_algo_ops->bat_iface_enable ||
340 !bat_algo_ops->bat_iface_disable ||
341 !bat_algo_ops->bat_iface_update_mac ||
342 !bat_algo_ops->bat_primary_iface_set ||
343 !bat_algo_ops->bat_ogm_schedule ||
344 !bat_algo_ops->bat_ogm_emit) {
345 pr_info("Routing algo '%s' does not implement required ops\n",
346 bat_algo_ops->name);
347 ret = -EINVAL;
348 goto out;
349 }
350
351 INIT_HLIST_NODE(&bat_algo_ops->list);
352 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
353 ret = 0;
354
355out:
356 return ret;
357}
358
359int batadv_algo_select(struct bat_priv *bat_priv, char *name)
360{
361 struct bat_algo_ops *bat_algo_ops;
362 int ret = -EINVAL;
363
364 bat_algo_ops = batadv_algo_get(name);
365 if (!bat_algo_ops)
366 goto out;
367
368 bat_priv->bat_algo_ops = bat_algo_ops;
369 ret = 0;
370
371out:
372 return ret;
373}
374
375int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
376{
377 struct bat_algo_ops *bat_algo_ops;
378 struct hlist_node *node;
379
380 seq_printf(seq, "Available routing algorithms:\n");
381
382 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
383 seq_printf(seq, "%s\n", bat_algo_ops->name);
384 }
385
386 return 0;
387}
388
389static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
390{
391 struct bat_algo_ops *bat_algo_ops;
392 char *algo_name = (char *)val;
393 size_t name_len = strlen(algo_name);
394
395 if (algo_name[name_len - 1] == '\n')
396 algo_name[name_len - 1] = '\0';
397
398 bat_algo_ops = batadv_algo_get(algo_name);
399 if (!bat_algo_ops) {
400 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
401 return -EINVAL;
402 }
403
404 return param_set_copystring(algo_name, kp);
405}
406
407static const struct kernel_param_ops batadv_param_ops_ra = {
408 .set = batadv_param_set_ra,
409 .get = param_get_string,
410};
411
412static struct kparam_string batadv_param_string_ra = {
413 .maxlen = sizeof(batadv_routing_algo),
414 .string = batadv_routing_algo,
415};
416
417module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
418 0644);
419module_init(batadv_init);
420module_exit(batadv_exit);
421
422MODULE_LICENSE("GPL");
423
424MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
425MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
426MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
427MODULE_VERSION(BATADV_SOURCE_VERSION);
This page took 0.027704 seconds and 5 git commands to generate.