2 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "bat_sysfs.h"
24 #include "bat_debugfs.h"
27 #include "originator.h"
28 #include "soft-interface.h"
29 #include "icmp_socket.h"
30 #include "translation-table.h"
31 #include "hard-interface.h"
32 #include "gateway_client.h"
33 #include "bridge_loop_avoidance.h"
39 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
40 * list traversals just rcu-locked */
41 struct list_head hardif_list
;
42 static int (*recv_packet_handler
[256])(struct sk_buff
*, struct hard_iface
*);
43 char bat_routing_algo
[20] = "BATMAN_IV";
44 static struct hlist_head bat_algo_list
;
46 unsigned char broadcast_addr
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
48 struct workqueue_struct
*bat_event_workqueue
;
50 static void recv_handler_init(void);
52 static int __init
batman_init(void)
54 INIT_LIST_HEAD(&hardif_list
);
55 INIT_HLIST_HEAD(&bat_algo_list
);
61 /* the name should not be longer than 10 chars - see
62 * http://lwn.net/Articles/23634/ */
63 bat_event_workqueue
= create_singlethread_workqueue("bat_events");
65 if (!bat_event_workqueue
)
69 batadv_debugfs_init();
71 register_netdevice_notifier(&hard_if_notifier
);
73 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
74 SOURCE_VERSION
, COMPAT_VERSION
);
79 static void __exit
batman_exit(void)
81 batadv_debugfs_destroy();
82 unregister_netdevice_notifier(&hard_if_notifier
);
83 hardif_remove_interfaces();
85 flush_workqueue(bat_event_workqueue
);
86 destroy_workqueue(bat_event_workqueue
);
87 bat_event_workqueue
= NULL
;
92 int mesh_init(struct net_device
*soft_iface
)
94 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
97 spin_lock_init(&bat_priv
->forw_bat_list_lock
);
98 spin_lock_init(&bat_priv
->forw_bcast_list_lock
);
99 spin_lock_init(&bat_priv
->tt_changes_list_lock
);
100 spin_lock_init(&bat_priv
->tt_req_list_lock
);
101 spin_lock_init(&bat_priv
->tt_roam_list_lock
);
102 spin_lock_init(&bat_priv
->tt_buff_lock
);
103 spin_lock_init(&bat_priv
->gw_list_lock
);
104 spin_lock_init(&bat_priv
->vis_hash_lock
);
105 spin_lock_init(&bat_priv
->vis_list_lock
);
107 INIT_HLIST_HEAD(&bat_priv
->forw_bat_list
);
108 INIT_HLIST_HEAD(&bat_priv
->forw_bcast_list
);
109 INIT_HLIST_HEAD(&bat_priv
->gw_list
);
110 INIT_LIST_HEAD(&bat_priv
->tt_changes_list
);
111 INIT_LIST_HEAD(&bat_priv
->tt_req_list
);
112 INIT_LIST_HEAD(&bat_priv
->tt_roam_list
);
114 ret
= originator_init(bat_priv
);
118 ret
= tt_init(bat_priv
);
122 tt_local_add(soft_iface
, soft_iface
->dev_addr
, NULL_IFINDEX
);
124 ret
= vis_init(bat_priv
);
128 ret
= batadv_bla_init(bat_priv
);
132 atomic_set(&bat_priv
->gw_reselect
, 0);
133 atomic_set(&bat_priv
->mesh_state
, MESH_ACTIVE
);
138 mesh_free(soft_iface
);
142 void mesh_free(struct net_device
*soft_iface
)
144 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
146 atomic_set(&bat_priv
->mesh_state
, MESH_DEACTIVATING
);
148 purge_outstanding_packets(bat_priv
, NULL
);
152 gw_node_purge(bat_priv
);
153 originator_free(bat_priv
);
157 batadv_bla_free(bat_priv
);
159 free_percpu(bat_priv
->bat_counters
);
161 atomic_set(&bat_priv
->mesh_state
, MESH_INACTIVE
);
164 void inc_module_count(void)
166 try_module_get(THIS_MODULE
);
169 void dec_module_count(void)
171 module_put(THIS_MODULE
);
174 int is_my_mac(const uint8_t *addr
)
176 const struct hard_iface
*hard_iface
;
179 list_for_each_entry_rcu(hard_iface
, &hardif_list
, list
) {
180 if (hard_iface
->if_status
!= IF_ACTIVE
)
183 if (compare_eth(hard_iface
->net_dev
->dev_addr
, addr
)) {
192 static int recv_unhandled_packet(struct sk_buff
*skb
,
193 struct hard_iface
*recv_if
)
198 /* incoming packets with the batman ethertype received on any active hard
201 int batman_skb_recv(struct sk_buff
*skb
, struct net_device
*dev
,
202 struct packet_type
*ptype
, struct net_device
*orig_dev
)
204 struct bat_priv
*bat_priv
;
205 struct batman_ogm_packet
*batman_ogm_packet
;
206 struct hard_iface
*hard_iface
;
210 hard_iface
= container_of(ptype
, struct hard_iface
, batman_adv_ptype
);
211 skb
= skb_share_check(skb
, GFP_ATOMIC
);
213 /* skb was released by skb_share_check() */
217 /* packet should hold at least type and version */
218 if (unlikely(!pskb_may_pull(skb
, 2)))
221 /* expect a valid ethernet header here. */
222 if (unlikely(skb
->mac_len
!= ETH_HLEN
|| !skb_mac_header(skb
)))
225 if (!hard_iface
->soft_iface
)
228 bat_priv
= netdev_priv(hard_iface
->soft_iface
);
230 if (atomic_read(&bat_priv
->mesh_state
) != MESH_ACTIVE
)
233 /* discard frames on not active interfaces */
234 if (hard_iface
->if_status
!= IF_ACTIVE
)
237 batman_ogm_packet
= (struct batman_ogm_packet
*)skb
->data
;
239 if (batman_ogm_packet
->header
.version
!= COMPAT_VERSION
) {
240 bat_dbg(DBG_BATMAN
, bat_priv
,
241 "Drop packet: incompatible batman version (%i)\n",
242 batman_ogm_packet
->header
.version
);
246 /* all receive handlers return whether they received or reused
247 * the supplied skb. if not, we have to free the skb.
249 idx
= batman_ogm_packet
->header
.packet_type
;
250 ret
= (*recv_packet_handler
[idx
])(skb
, hard_iface
);
252 if (ret
== NET_RX_DROP
)
255 /* return NET_RX_SUCCESS in any case as we
256 * most probably dropped the packet for
257 * routing-logical reasons.
259 return NET_RX_SUCCESS
;
267 static void recv_handler_init(void)
271 for (i
= 0; i
< ARRAY_SIZE(recv_packet_handler
); i
++)
272 recv_packet_handler
[i
] = recv_unhandled_packet
;
274 /* batman icmp packet */
275 recv_packet_handler
[BAT_ICMP
] = recv_icmp_packet
;
277 recv_packet_handler
[BAT_UNICAST
] = recv_unicast_packet
;
278 /* fragmented unicast packet */
279 recv_packet_handler
[BAT_UNICAST_FRAG
] = recv_ucast_frag_packet
;
280 /* broadcast packet */
281 recv_packet_handler
[BAT_BCAST
] = recv_bcast_packet
;
283 recv_packet_handler
[BAT_VIS
] = recv_vis_packet
;
284 /* Translation table query (request or response) */
285 recv_packet_handler
[BAT_TT_QUERY
] = recv_tt_query
;
286 /* Roaming advertisement */
287 recv_packet_handler
[BAT_ROAM_ADV
] = recv_roam_adv
;
290 int recv_handler_register(uint8_t packet_type
,
291 int (*recv_handler
)(struct sk_buff
*,
292 struct hard_iface
*))
294 if (recv_packet_handler
[packet_type
] != &recv_unhandled_packet
)
297 recv_packet_handler
[packet_type
] = recv_handler
;
301 void recv_handler_unregister(uint8_t packet_type
)
303 recv_packet_handler
[packet_type
] = recv_unhandled_packet
;
306 static struct bat_algo_ops
*bat_algo_get(char *name
)
308 struct bat_algo_ops
*bat_algo_ops
= NULL
, *bat_algo_ops_tmp
;
309 struct hlist_node
*node
;
311 hlist_for_each_entry(bat_algo_ops_tmp
, node
, &bat_algo_list
, list
) {
312 if (strcmp(bat_algo_ops_tmp
->name
, name
) != 0)
315 bat_algo_ops
= bat_algo_ops_tmp
;
322 int bat_algo_register(struct bat_algo_ops
*bat_algo_ops
)
324 struct bat_algo_ops
*bat_algo_ops_tmp
;
327 bat_algo_ops_tmp
= bat_algo_get(bat_algo_ops
->name
);
328 if (bat_algo_ops_tmp
) {
329 pr_info("Trying to register already registered routing algorithm: %s\n",
335 /* all algorithms must implement all ops (for now) */
336 if (!bat_algo_ops
->bat_iface_enable
||
337 !bat_algo_ops
->bat_iface_disable
||
338 !bat_algo_ops
->bat_iface_update_mac
||
339 !bat_algo_ops
->bat_primary_iface_set
||
340 !bat_algo_ops
->bat_ogm_schedule
||
341 !bat_algo_ops
->bat_ogm_emit
) {
342 pr_info("Routing algo '%s' does not implement required ops\n",
348 INIT_HLIST_NODE(&bat_algo_ops
->list
);
349 hlist_add_head(&bat_algo_ops
->list
, &bat_algo_list
);
356 int bat_algo_select(struct bat_priv
*bat_priv
, char *name
)
358 struct bat_algo_ops
*bat_algo_ops
;
361 bat_algo_ops
= bat_algo_get(name
);
365 bat_priv
->bat_algo_ops
= bat_algo_ops
;
372 int bat_algo_seq_print_text(struct seq_file
*seq
, void *offset
)
374 struct bat_algo_ops
*bat_algo_ops
;
375 struct hlist_node
*node
;
377 seq_printf(seq
, "Available routing algorithms:\n");
379 hlist_for_each_entry(bat_algo_ops
, node
, &bat_algo_list
, list
) {
380 seq_printf(seq
, "%s\n", bat_algo_ops
->name
);
386 static int param_set_ra(const char *val
, const struct kernel_param
*kp
)
388 struct bat_algo_ops
*bat_algo_ops
;
389 char *algo_name
= (char *)val
;
390 size_t name_len
= strlen(algo_name
);
392 if (algo_name
[name_len
- 1] == '\n')
393 algo_name
[name_len
- 1] = '\0';
395 bat_algo_ops
= bat_algo_get(algo_name
);
397 pr_err("Routing algorithm '%s' is not supported\n", algo_name
);
401 return param_set_copystring(algo_name
, kp
);
404 static const struct kernel_param_ops param_ops_ra
= {
406 .get
= param_get_string
,
409 static struct kparam_string __param_string_ra
= {
410 .maxlen
= sizeof(bat_routing_algo
),
411 .string
= bat_routing_algo
,
414 module_param_cb(routing_algo
, ¶m_ops_ra
, &__param_string_ra
, 0644);
415 module_init(batman_init
);
416 module_exit(batman_exit
);
418 MODULE_LICENSE("GPL");
420 MODULE_AUTHOR(DRIVER_AUTHOR
);
421 MODULE_DESCRIPTION(DRIVER_DESC
);
422 MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE
);
423 MODULE_VERSION(SOURCE_VERSION
);