Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[deliverable/linux.git] / net / batman-adv / main.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "sysfs.h"
22 #include "debugfs.h"
23 #include "routing.h"
24 #include "send.h"
25 #include "originator.h"
26 #include "soft-interface.h"
27 #include "icmp_socket.h"
28 #include "translation-table.h"
29 #include "hard-interface.h"
30 #include "gateway_client.h"
31 #include "bridge_loop_avoidance.h"
32 #include "vis.h"
33 #include "hash.h"
34 #include "bat_algo.h"
35
36
37 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked
39 */
40 struct list_head batadv_hardif_list;
41 static int (*batadv_rx_handler[256])(struct sk_buff *,
42 struct batadv_hard_iface *);
43 char batadv_routing_algo[20] = "BATMAN_IV";
44 static struct hlist_head batadv_algo_list;
45
46 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47
48 struct workqueue_struct *batadv_event_workqueue;
49
50 static void batadv_recv_handler_init(void);
51
52 static int __init batadv_init(void)
53 {
54 INIT_LIST_HEAD(&batadv_hardif_list);
55 INIT_HLIST_HEAD(&batadv_algo_list);
56
57 batadv_recv_handler_init();
58
59 batadv_iv_init();
60
61 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
62
63 if (!batadv_event_workqueue)
64 return -ENOMEM;
65
66 batadv_socket_init();
67 batadv_debugfs_init();
68
69 register_netdevice_notifier(&batadv_hard_if_notifier);
70
71 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
72 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
73
74 return 0;
75 }
76
77 static void __exit batadv_exit(void)
78 {
79 batadv_debugfs_destroy();
80 unregister_netdevice_notifier(&batadv_hard_if_notifier);
81 batadv_hardif_remove_interfaces();
82
83 flush_workqueue(batadv_event_workqueue);
84 destroy_workqueue(batadv_event_workqueue);
85 batadv_event_workqueue = NULL;
86
87 rcu_barrier();
88 }
89
90 int batadv_mesh_init(struct net_device *soft_iface)
91 {
92 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
93 int ret;
94
95 spin_lock_init(&bat_priv->forw_bat_list_lock);
96 spin_lock_init(&bat_priv->forw_bcast_list_lock);
97 spin_lock_init(&bat_priv->tt.changes_list_lock);
98 spin_lock_init(&bat_priv->tt.req_list_lock);
99 spin_lock_init(&bat_priv->tt.roam_list_lock);
100 spin_lock_init(&bat_priv->tt.last_changeset_lock);
101 spin_lock_init(&bat_priv->gw.list_lock);
102 spin_lock_init(&bat_priv->vis.hash_lock);
103 spin_lock_init(&bat_priv->vis.list_lock);
104
105 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
106 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
107 INIT_HLIST_HEAD(&bat_priv->gw.list);
108 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
109 INIT_LIST_HEAD(&bat_priv->tt.req_list);
110 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
111
112 ret = batadv_originator_init(bat_priv);
113 if (ret < 0)
114 goto err;
115
116 ret = batadv_tt_init(bat_priv);
117 if (ret < 0)
118 goto err;
119
120 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
121 BATADV_NULL_IFINDEX);
122
123 ret = batadv_vis_init(bat_priv);
124 if (ret < 0)
125 goto err;
126
127 ret = batadv_bla_init(bat_priv);
128 if (ret < 0)
129 goto err;
130
131 atomic_set(&bat_priv->gw.reselect, 0);
132 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
133
134 return 0;
135
136 err:
137 batadv_mesh_free(soft_iface);
138 return ret;
139 }
140
141 void batadv_mesh_free(struct net_device *soft_iface)
142 {
143 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
144
145 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
146
147 batadv_purge_outstanding_packets(bat_priv, NULL);
148
149 batadv_vis_quit(bat_priv);
150
151 batadv_gw_node_purge(bat_priv);
152 batadv_originator_free(bat_priv);
153
154 batadv_tt_free(bat_priv);
155
156 batadv_bla_free(bat_priv);
157
158 free_percpu(bat_priv->bat_counters);
159
160 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
161 }
162
163 void batadv_inc_module_count(void)
164 {
165 try_module_get(THIS_MODULE);
166 }
167
168 void batadv_dec_module_count(void)
169 {
170 module_put(THIS_MODULE);
171 }
172
173 int batadv_is_my_mac(const uint8_t *addr)
174 {
175 const struct batadv_hard_iface *hard_iface;
176
177 rcu_read_lock();
178 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
179 if (hard_iface->if_status != BATADV_IF_ACTIVE)
180 continue;
181
182 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
183 rcu_read_unlock();
184 return 1;
185 }
186 }
187 rcu_read_unlock();
188 return 0;
189 }
190
191 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
192 struct batadv_hard_iface *recv_if)
193 {
194 return NET_RX_DROP;
195 }
196
197 /* incoming packets with the batman ethertype received on any active hard
198 * interface
199 */
200 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
201 struct packet_type *ptype,
202 struct net_device *orig_dev)
203 {
204 struct batadv_priv *bat_priv;
205 struct batadv_ogm_packet *batadv_ogm_packet;
206 struct batadv_hard_iface *hard_iface;
207 uint8_t idx;
208 int ret;
209
210 hard_iface = container_of(ptype, struct batadv_hard_iface,
211 batman_adv_ptype);
212 skb = skb_share_check(skb, GFP_ATOMIC);
213
214 /* skb was released by skb_share_check() */
215 if (!skb)
216 goto err_out;
217
218 /* packet should hold at least type and version */
219 if (unlikely(!pskb_may_pull(skb, 2)))
220 goto err_free;
221
222 /* expect a valid ethernet header here. */
223 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
224 goto err_free;
225
226 if (!hard_iface->soft_iface)
227 goto err_free;
228
229 bat_priv = netdev_priv(hard_iface->soft_iface);
230
231 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
232 goto err_free;
233
234 /* discard frames on not active interfaces */
235 if (hard_iface->if_status != BATADV_IF_ACTIVE)
236 goto err_free;
237
238 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
239
240 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
241 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
242 "Drop packet: incompatible batman version (%i)\n",
243 batadv_ogm_packet->header.version);
244 goto err_free;
245 }
246
247 /* all receive handlers return whether they received or reused
248 * the supplied skb. if not, we have to free the skb.
249 */
250 idx = batadv_ogm_packet->header.packet_type;
251 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
252
253 if (ret == NET_RX_DROP)
254 kfree_skb(skb);
255
256 /* return NET_RX_SUCCESS in any case as we
257 * most probably dropped the packet for
258 * routing-logical reasons.
259 */
260 return NET_RX_SUCCESS;
261
262 err_free:
263 kfree_skb(skb);
264 err_out:
265 return NET_RX_DROP;
266 }
267
268 static void batadv_recv_handler_init(void)
269 {
270 int i;
271
272 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
273 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
274
275 /* batman icmp packet */
276 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
277 /* unicast packet */
278 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
279 /* fragmented unicast packet */
280 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
281 /* broadcast packet */
282 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
283 /* vis packet */
284 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
285 /* Translation table query (request or response) */
286 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
287 /* Roaming advertisement */
288 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
289 }
290
291 int
292 batadv_recv_handler_register(uint8_t packet_type,
293 int (*recv_handler)(struct sk_buff *,
294 struct batadv_hard_iface *))
295 {
296 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
297 return -EBUSY;
298
299 batadv_rx_handler[packet_type] = recv_handler;
300 return 0;
301 }
302
303 void batadv_recv_handler_unregister(uint8_t packet_type)
304 {
305 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
306 }
307
308 static struct batadv_algo_ops *batadv_algo_get(char *name)
309 {
310 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
311 struct hlist_node *node;
312
313 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
314 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
315 continue;
316
317 bat_algo_ops = bat_algo_ops_tmp;
318 break;
319 }
320
321 return bat_algo_ops;
322 }
323
324 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
325 {
326 struct batadv_algo_ops *bat_algo_ops_tmp;
327 int ret;
328
329 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
330 if (bat_algo_ops_tmp) {
331 pr_info("Trying to register already registered routing algorithm: %s\n",
332 bat_algo_ops->name);
333 ret = -EEXIST;
334 goto out;
335 }
336
337 /* all algorithms must implement all ops (for now) */
338 if (!bat_algo_ops->bat_iface_enable ||
339 !bat_algo_ops->bat_iface_disable ||
340 !bat_algo_ops->bat_iface_update_mac ||
341 !bat_algo_ops->bat_primary_iface_set ||
342 !bat_algo_ops->bat_ogm_schedule ||
343 !bat_algo_ops->bat_ogm_emit) {
344 pr_info("Routing algo '%s' does not implement required ops\n",
345 bat_algo_ops->name);
346 ret = -EINVAL;
347 goto out;
348 }
349
350 INIT_HLIST_NODE(&bat_algo_ops->list);
351 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
352 ret = 0;
353
354 out:
355 return ret;
356 }
357
358 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
359 {
360 struct batadv_algo_ops *bat_algo_ops;
361 int ret = -EINVAL;
362
363 bat_algo_ops = batadv_algo_get(name);
364 if (!bat_algo_ops)
365 goto out;
366
367 bat_priv->bat_algo_ops = bat_algo_ops;
368 ret = 0;
369
370 out:
371 return ret;
372 }
373
374 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
375 {
376 struct batadv_algo_ops *bat_algo_ops;
377 struct hlist_node *node;
378
379 seq_printf(seq, "Available routing algorithms:\n");
380
381 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
382 seq_printf(seq, "%s\n", bat_algo_ops->name);
383 }
384
385 return 0;
386 }
387
388 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
389 {
390 struct batadv_algo_ops *bat_algo_ops;
391 char *algo_name = (char *)val;
392 size_t name_len = strlen(algo_name);
393
394 if (algo_name[name_len - 1] == '\n')
395 algo_name[name_len - 1] = '\0';
396
397 bat_algo_ops = batadv_algo_get(algo_name);
398 if (!bat_algo_ops) {
399 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
400 return -EINVAL;
401 }
402
403 return param_set_copystring(algo_name, kp);
404 }
405
406 static const struct kernel_param_ops batadv_param_ops_ra = {
407 .set = batadv_param_set_ra,
408 .get = param_get_string,
409 };
410
411 static struct kparam_string batadv_param_string_ra = {
412 .maxlen = sizeof(batadv_routing_algo),
413 .string = batadv_routing_algo,
414 };
415
416 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
417 0644);
418 module_init(batadv_init);
419 module_exit(batadv_exit);
420
421 MODULE_LICENSE("GPL");
422
423 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
424 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
425 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
426 MODULE_VERSION(BATADV_SOURCE_VERSION);
This page took 0.038566 seconds and 5 git commands to generate.