Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[deliverable/linux.git] / net / batman-adv / main.c
1 /* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "main.h"
19
20 #include <linux/atomic.h>
21 #include <linux/bug.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/crc32c.h>
24 #include <linux/errno.h>
25 #include <linux/fs.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/kernel.h>
32 #include <linux/kref.h>
33 #include <linux/list.h>
34 #include <linux/lockdep.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/pkt_sched.h>
39 #include <linux/rculist.h>
40 #include <linux/rcupdate.h>
41 #include <linux/seq_file.h>
42 #include <linux/skbuff.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
45 #include <linux/stddef.h>
46 #include <linux/string.h>
47 #include <linux/workqueue.h>
48 #include <net/dsfield.h>
49 #include <net/rtnetlink.h>
50
51 #include "bat_algo.h"
52 #include "bridge_loop_avoidance.h"
53 #include "debugfs.h"
54 #include "distributed-arp-table.h"
55 #include "gateway_client.h"
56 #include "gateway_common.h"
57 #include "hard-interface.h"
58 #include "icmp_socket.h"
59 #include "multicast.h"
60 #include "network-coding.h"
61 #include "originator.h"
62 #include "packet.h"
63 #include "routing.h"
64 #include "send.h"
65 #include "soft-interface.h"
66 #include "translation-table.h"
67
68 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
69 * list traversals just rcu-locked
70 */
71 struct list_head batadv_hardif_list;
72 static int (*batadv_rx_handler[256])(struct sk_buff *,
73 struct batadv_hard_iface *);
74 char batadv_routing_algo[20] = "BATMAN_IV";
75 static struct hlist_head batadv_algo_list;
76
77 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
78
79 struct workqueue_struct *batadv_event_workqueue;
80
81 static void batadv_recv_handler_init(void);
82
83 static int __init batadv_init(void)
84 {
85 INIT_LIST_HEAD(&batadv_hardif_list);
86 INIT_HLIST_HEAD(&batadv_algo_list);
87
88 batadv_recv_handler_init();
89
90 batadv_v_init();
91 batadv_iv_init();
92 batadv_nc_init();
93
94 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
95
96 if (!batadv_event_workqueue)
97 return -ENOMEM;
98
99 batadv_socket_init();
100 batadv_debugfs_init();
101
102 register_netdevice_notifier(&batadv_hard_if_notifier);
103 rtnl_link_register(&batadv_link_ops);
104
105 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
106 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
107
108 return 0;
109 }
110
111 static void __exit batadv_exit(void)
112 {
113 batadv_debugfs_destroy();
114 rtnl_link_unregister(&batadv_link_ops);
115 unregister_netdevice_notifier(&batadv_hard_if_notifier);
116 batadv_hardif_remove_interfaces();
117
118 flush_workqueue(batadv_event_workqueue);
119 destroy_workqueue(batadv_event_workqueue);
120 batadv_event_workqueue = NULL;
121
122 rcu_barrier();
123 }
124
125 int batadv_mesh_init(struct net_device *soft_iface)
126 {
127 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
128 int ret;
129
130 spin_lock_init(&bat_priv->forw_bat_list_lock);
131 spin_lock_init(&bat_priv->forw_bcast_list_lock);
132 spin_lock_init(&bat_priv->tt.changes_list_lock);
133 spin_lock_init(&bat_priv->tt.req_list_lock);
134 spin_lock_init(&bat_priv->tt.roam_list_lock);
135 spin_lock_init(&bat_priv->tt.last_changeset_lock);
136 spin_lock_init(&bat_priv->tt.commit_lock);
137 spin_lock_init(&bat_priv->gw.list_lock);
138 #ifdef CONFIG_BATMAN_ADV_MCAST
139 spin_lock_init(&bat_priv->mcast.want_lists_lock);
140 #endif
141 spin_lock_init(&bat_priv->tvlv.container_list_lock);
142 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
143 spin_lock_init(&bat_priv->softif_vlan_list_lock);
144
145 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
146 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
147 INIT_HLIST_HEAD(&bat_priv->gw.list);
148 #ifdef CONFIG_BATMAN_ADV_MCAST
149 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
150 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
151 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
152 #endif
153 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
154 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
155 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
156 #ifdef CONFIG_BATMAN_ADV_MCAST
157 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
158 #endif
159 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
160 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
161 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
162
163 ret = batadv_v_mesh_init(bat_priv);
164 if (ret < 0)
165 goto err;
166
167 ret = batadv_originator_init(bat_priv);
168 if (ret < 0)
169 goto err;
170
171 ret = batadv_tt_init(bat_priv);
172 if (ret < 0)
173 goto err;
174
175 ret = batadv_bla_init(bat_priv);
176 if (ret < 0)
177 goto err;
178
179 ret = batadv_dat_init(bat_priv);
180 if (ret < 0)
181 goto err;
182
183 ret = batadv_nc_mesh_init(bat_priv);
184 if (ret < 0)
185 goto err;
186
187 batadv_gw_init(bat_priv);
188 batadv_mcast_init(bat_priv);
189
190 atomic_set(&bat_priv->gw.reselect, 0);
191 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
192
193 return 0;
194
195 err:
196 batadv_mesh_free(soft_iface);
197 return ret;
198 }
199
200 void batadv_mesh_free(struct net_device *soft_iface)
201 {
202 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
203
204 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
205
206 batadv_purge_outstanding_packets(bat_priv, NULL);
207
208 batadv_gw_node_free(bat_priv);
209
210 batadv_v_mesh_free(bat_priv);
211 batadv_nc_mesh_free(bat_priv);
212 batadv_dat_free(bat_priv);
213 batadv_bla_free(bat_priv);
214
215 batadv_mcast_free(bat_priv);
216
217 /* Free the TT and the originator tables only after having terminated
218 * all the other depending components which may use these structures for
219 * their purposes.
220 */
221 batadv_tt_free(bat_priv);
222
223 /* Since the originator table clean up routine is accessing the TT
224 * tables as well, it has to be invoked after the TT tables have been
225 * freed and marked as empty. This ensures that no cleanup RCU callbacks
226 * accessing the TT data are scheduled for later execution.
227 */
228 batadv_originator_free(bat_priv);
229
230 batadv_gw_free(bat_priv);
231
232 free_percpu(bat_priv->bat_counters);
233 bat_priv->bat_counters = NULL;
234
235 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
236 }
237
238 /**
239 * batadv_is_my_mac - check if the given mac address belongs to any of the real
240 * interfaces in the current mesh
241 * @bat_priv: the bat priv with all the soft interface information
242 * @addr: the address to check
243 *
244 * Return: 'true' if the mac address was found, false otherwise.
245 */
246 bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
247 {
248 const struct batadv_hard_iface *hard_iface;
249 bool is_my_mac = false;
250
251 rcu_read_lock();
252 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
253 if (hard_iface->if_status != BATADV_IF_ACTIVE)
254 continue;
255
256 if (hard_iface->soft_iface != bat_priv->soft_iface)
257 continue;
258
259 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
260 is_my_mac = true;
261 break;
262 }
263 }
264 rcu_read_unlock();
265 return is_my_mac;
266 }
267
268 /**
269 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
270 * function that requires the primary interface
271 * @seq: debugfs table seq_file struct
272 *
273 * Return: primary interface if found or NULL otherwise.
274 */
275 struct batadv_hard_iface *
276 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
277 {
278 struct net_device *net_dev = (struct net_device *)seq->private;
279 struct batadv_priv *bat_priv = netdev_priv(net_dev);
280 struct batadv_hard_iface *primary_if;
281
282 primary_if = batadv_primary_if_get_selected(bat_priv);
283
284 if (!primary_if) {
285 seq_printf(seq,
286 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
287 net_dev->name);
288 goto out;
289 }
290
291 if (primary_if->if_status == BATADV_IF_ACTIVE)
292 goto out;
293
294 seq_printf(seq,
295 "BATMAN mesh %s disabled - primary interface not active\n",
296 net_dev->name);
297 batadv_hardif_put(primary_if);
298 primary_if = NULL;
299
300 out:
301 return primary_if;
302 }
303
304 /**
305 * batadv_max_header_len - calculate maximum encapsulation overhead for a
306 * payload packet
307 *
308 * Return: the maximum encapsulation overhead in bytes.
309 */
310 int batadv_max_header_len(void)
311 {
312 int header_len = 0;
313
314 header_len = max_t(int, header_len,
315 sizeof(struct batadv_unicast_packet));
316 header_len = max_t(int, header_len,
317 sizeof(struct batadv_unicast_4addr_packet));
318 header_len = max_t(int, header_len,
319 sizeof(struct batadv_bcast_packet));
320
321 #ifdef CONFIG_BATMAN_ADV_NC
322 header_len = max_t(int, header_len,
323 sizeof(struct batadv_coded_packet));
324 #endif
325
326 return header_len + ETH_HLEN;
327 }
328
329 /**
330 * batadv_skb_set_priority - sets skb priority according to packet content
331 * @skb: the packet to be sent
332 * @offset: offset to the packet content
333 *
334 * This function sets a value between 256 and 263 (802.1d priority), which
335 * can be interpreted by the cfg80211 or other drivers.
336 */
337 void batadv_skb_set_priority(struct sk_buff *skb, int offset)
338 {
339 struct iphdr ip_hdr_tmp, *ip_hdr;
340 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
341 struct ethhdr ethhdr_tmp, *ethhdr;
342 struct vlan_ethhdr *vhdr, vhdr_tmp;
343 u32 prio;
344
345 /* already set, do nothing */
346 if (skb->priority >= 256 && skb->priority <= 263)
347 return;
348
349 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
350 if (!ethhdr)
351 return;
352
353 switch (ethhdr->h_proto) {
354 case htons(ETH_P_8021Q):
355 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
356 sizeof(*vhdr), &vhdr_tmp);
357 if (!vhdr)
358 return;
359 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
360 prio = prio >> VLAN_PRIO_SHIFT;
361 break;
362 case htons(ETH_P_IP):
363 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
364 sizeof(*ip_hdr), &ip_hdr_tmp);
365 if (!ip_hdr)
366 return;
367 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
368 break;
369 case htons(ETH_P_IPV6):
370 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
371 sizeof(*ip6_hdr), &ip6_hdr_tmp);
372 if (!ip6_hdr)
373 return;
374 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
375 break;
376 default:
377 return;
378 }
379
380 skb->priority = prio + 256;
381 }
382
383 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
384 struct batadv_hard_iface *recv_if)
385 {
386 return NET_RX_DROP;
387 }
388
389 /* incoming packets with the batman ethertype received on any active hard
390 * interface
391 */
392 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
393 struct packet_type *ptype,
394 struct net_device *orig_dev)
395 {
396 struct batadv_priv *bat_priv;
397 struct batadv_ogm_packet *batadv_ogm_packet;
398 struct batadv_hard_iface *hard_iface;
399 u8 idx;
400 int ret;
401
402 hard_iface = container_of(ptype, struct batadv_hard_iface,
403 batman_adv_ptype);
404
405 /* Prevent processing a packet received on an interface which is getting
406 * shut down otherwise the packet may trigger de-reference errors
407 * further down in the receive path.
408 */
409 if (!kref_get_unless_zero(&hard_iface->refcount))
410 goto err_out;
411
412 skb = skb_share_check(skb, GFP_ATOMIC);
413
414 /* skb was released by skb_share_check() */
415 if (!skb)
416 goto err_put;
417
418 /* packet should hold at least type and version */
419 if (unlikely(!pskb_may_pull(skb, 2)))
420 goto err_free;
421
422 /* expect a valid ethernet header here. */
423 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
424 goto err_free;
425
426 if (!hard_iface->soft_iface)
427 goto err_free;
428
429 bat_priv = netdev_priv(hard_iface->soft_iface);
430
431 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
432 goto err_free;
433
434 /* discard frames on not active interfaces */
435 if (hard_iface->if_status != BATADV_IF_ACTIVE)
436 goto err_free;
437
438 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
439
440 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
441 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
442 "Drop packet: incompatible batman version (%i)\n",
443 batadv_ogm_packet->version);
444 goto err_free;
445 }
446
447 /* reset control block to avoid left overs from previous users */
448 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
449
450 /* all receive handlers return whether they received or reused
451 * the supplied skb. if not, we have to free the skb.
452 */
453 idx = batadv_ogm_packet->packet_type;
454 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
455
456 if (ret == NET_RX_DROP)
457 kfree_skb(skb);
458
459 batadv_hardif_put(hard_iface);
460
461 /* return NET_RX_SUCCESS in any case as we
462 * most probably dropped the packet for
463 * routing-logical reasons.
464 */
465 return NET_RX_SUCCESS;
466
467 err_free:
468 kfree_skb(skb);
469 err_put:
470 batadv_hardif_put(hard_iface);
471 err_out:
472 return NET_RX_DROP;
473 }
474
475 static void batadv_recv_handler_init(void)
476 {
477 int i;
478
479 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
480 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
481
482 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
483 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
484
485 /* compile time checks for sizes */
486 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
487 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
488 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
489 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
490 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
491 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
492 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
493 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
494 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
495 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
496 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
497 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
498 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
499 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
500 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
501 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
502
503 /* broadcast packet */
504 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
505
506 /* unicast packets ... */
507 /* unicast with 4 addresses packet */
508 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
509 /* unicast packet */
510 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
511 /* unicast tvlv packet */
512 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
513 /* batman icmp packet */
514 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
515 /* Fragmented packets */
516 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
517 }
518
519 int
520 batadv_recv_handler_register(u8 packet_type,
521 int (*recv_handler)(struct sk_buff *,
522 struct batadv_hard_iface *))
523 {
524 int (*curr)(struct sk_buff *,
525 struct batadv_hard_iface *);
526 curr = batadv_rx_handler[packet_type];
527
528 if ((curr != batadv_recv_unhandled_packet) &&
529 (curr != batadv_recv_unhandled_unicast_packet))
530 return -EBUSY;
531
532 batadv_rx_handler[packet_type] = recv_handler;
533 return 0;
534 }
535
536 void batadv_recv_handler_unregister(u8 packet_type)
537 {
538 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
539 }
540
541 static struct batadv_algo_ops *batadv_algo_get(char *name)
542 {
543 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
544
545 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
546 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
547 continue;
548
549 bat_algo_ops = bat_algo_ops_tmp;
550 break;
551 }
552
553 return bat_algo_ops;
554 }
555
556 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
557 {
558 struct batadv_algo_ops *bat_algo_ops_tmp;
559
560 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
561 if (bat_algo_ops_tmp) {
562 pr_info("Trying to register already registered routing algorithm: %s\n",
563 bat_algo_ops->name);
564 return -EEXIST;
565 }
566
567 /* all algorithms must implement all ops (for now) */
568 if (!bat_algo_ops->bat_iface_enable ||
569 !bat_algo_ops->bat_iface_disable ||
570 !bat_algo_ops->bat_iface_update_mac ||
571 !bat_algo_ops->bat_primary_iface_set ||
572 !bat_algo_ops->bat_ogm_schedule ||
573 !bat_algo_ops->bat_ogm_emit ||
574 !bat_algo_ops->bat_neigh_cmp ||
575 !bat_algo_ops->bat_neigh_is_similar_or_better) {
576 pr_info("Routing algo '%s' does not implement required ops\n",
577 bat_algo_ops->name);
578 return -EINVAL;
579 }
580
581 INIT_HLIST_NODE(&bat_algo_ops->list);
582 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
583
584 return 0;
585 }
586
587 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
588 {
589 struct batadv_algo_ops *bat_algo_ops;
590
591 bat_algo_ops = batadv_algo_get(name);
592 if (!bat_algo_ops)
593 return -EINVAL;
594
595 bat_priv->bat_algo_ops = bat_algo_ops;
596
597 return 0;
598 }
599
600 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
601 {
602 struct batadv_algo_ops *bat_algo_ops;
603
604 seq_puts(seq, "Available routing algorithms:\n");
605
606 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
607 seq_printf(seq, " * %s\n", bat_algo_ops->name);
608 }
609
610 return 0;
611 }
612
613 /**
614 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
615 * the header
616 * @skb: skb pointing to fragmented socket buffers
617 * @payload_ptr: Pointer to position inside the head buffer of the skb
618 * marking the start of the data to be CRC'ed
619 *
620 * payload_ptr must always point to an address in the skb head buffer and not to
621 * a fragment.
622 *
623 * Return: big endian crc32c of the checksummed data
624 */
625 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
626 {
627 u32 crc = 0;
628 unsigned int from;
629 unsigned int to = skb->len;
630 struct skb_seq_state st;
631 const u8 *data;
632 unsigned int len;
633 unsigned int consumed = 0;
634
635 from = (unsigned int)(payload_ptr - skb->data);
636
637 skb_prepare_seq_read(skb, from, to, &st);
638 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
639 crc = crc32c(crc, data, len);
640 consumed += len;
641 }
642
643 return htonl(crc);
644 }
645
646 /**
647 * batadv_tvlv_handler_release - release tvlv handler from lists and queue for
648 * free after rcu grace period
649 * @ref: kref pointer of the tvlv
650 */
651 static void batadv_tvlv_handler_release(struct kref *ref)
652 {
653 struct batadv_tvlv_handler *tvlv_handler;
654
655 tvlv_handler = container_of(ref, struct batadv_tvlv_handler, refcount);
656 kfree_rcu(tvlv_handler, rcu);
657 }
658
659 /**
660 * batadv_tvlv_handler_put - decrement the tvlv container refcounter and
661 * possibly release it
662 * @tvlv_handler: the tvlv handler to free
663 */
664 static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler)
665 {
666 kref_put(&tvlv_handler->refcount, batadv_tvlv_handler_release);
667 }
668
669 /**
670 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
671 * based on the provided type and version (both need to match)
672 * @bat_priv: the bat priv with all the soft interface information
673 * @type: tvlv handler type to look for
674 * @version: tvlv handler version to look for
675 *
676 * Return: tvlv handler if found or NULL otherwise.
677 */
678 static struct batadv_tvlv_handler *
679 batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
680 {
681 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
682
683 rcu_read_lock();
684 hlist_for_each_entry_rcu(tvlv_handler_tmp,
685 &bat_priv->tvlv.handler_list, list) {
686 if (tvlv_handler_tmp->type != type)
687 continue;
688
689 if (tvlv_handler_tmp->version != version)
690 continue;
691
692 if (!kref_get_unless_zero(&tvlv_handler_tmp->refcount))
693 continue;
694
695 tvlv_handler = tvlv_handler_tmp;
696 break;
697 }
698 rcu_read_unlock();
699
700 return tvlv_handler;
701 }
702
703 /**
704 * batadv_tvlv_container_release - release tvlv from lists and free
705 * @ref: kref pointer of the tvlv
706 */
707 static void batadv_tvlv_container_release(struct kref *ref)
708 {
709 struct batadv_tvlv_container *tvlv;
710
711 tvlv = container_of(ref, struct batadv_tvlv_container, refcount);
712 kfree(tvlv);
713 }
714
715 /**
716 * batadv_tvlv_container_put - decrement the tvlv container refcounter and
717 * possibly release it
718 * @tvlv: the tvlv container to free
719 */
720 static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv)
721 {
722 kref_put(&tvlv->refcount, batadv_tvlv_container_release);
723 }
724
725 /**
726 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
727 * list based on the provided type and version (both need to match)
728 * @bat_priv: the bat priv with all the soft interface information
729 * @type: tvlv container type to look for
730 * @version: tvlv container version to look for
731 *
732 * Has to be called with the appropriate locks being acquired
733 * (tvlv.container_list_lock).
734 *
735 * Return: tvlv container if found or NULL otherwise.
736 */
737 static struct batadv_tvlv_container *
738 batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
739 {
740 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
741
742 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
743
744 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
745 if (tvlv_tmp->tvlv_hdr.type != type)
746 continue;
747
748 if (tvlv_tmp->tvlv_hdr.version != version)
749 continue;
750
751 kref_get(&tvlv_tmp->refcount);
752 tvlv = tvlv_tmp;
753 break;
754 }
755
756 return tvlv;
757 }
758
759 /**
760 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
761 * list entries
762 * @bat_priv: the bat priv with all the soft interface information
763 *
764 * Has to be called with the appropriate locks being acquired
765 * (tvlv.container_list_lock).
766 *
767 * Return: size of all currently registered tvlv containers in bytes.
768 */
769 static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
770 {
771 struct batadv_tvlv_container *tvlv;
772 u16 tvlv_len = 0;
773
774 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
775
776 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
777 tvlv_len += sizeof(struct batadv_tvlv_hdr);
778 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
779 }
780
781 return tvlv_len;
782 }
783
784 /**
785 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
786 * list
787 * @bat_priv: the bat priv with all the soft interface information
788 * @tvlv: the to be removed tvlv container
789 *
790 * Has to be called with the appropriate locks being acquired
791 * (tvlv.container_list_lock).
792 */
793 static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
794 struct batadv_tvlv_container *tvlv)
795 {
796 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
797
798 if (!tvlv)
799 return;
800
801 hlist_del(&tvlv->list);
802
803 /* first call to decrement the counter, second call to free */
804 batadv_tvlv_container_put(tvlv);
805 batadv_tvlv_container_put(tvlv);
806 }
807
808 /**
809 * batadv_tvlv_container_unregister - unregister tvlv container based on the
810 * provided type and version (both need to match)
811 * @bat_priv: the bat priv with all the soft interface information
812 * @type: tvlv container type to unregister
813 * @version: tvlv container type to unregister
814 */
815 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
816 u8 type, u8 version)
817 {
818 struct batadv_tvlv_container *tvlv;
819
820 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
821 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
822 batadv_tvlv_container_remove(bat_priv, tvlv);
823 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
824 }
825
826 /**
827 * batadv_tvlv_container_register - register tvlv type, version and content
828 * to be propagated with each (primary interface) OGM
829 * @bat_priv: the bat priv with all the soft interface information
830 * @type: tvlv container type
831 * @version: tvlv container version
832 * @tvlv_value: tvlv container content
833 * @tvlv_value_len: tvlv container content length
834 *
835 * If a container of the same type and version was already registered the new
836 * content is going to replace the old one.
837 */
838 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
839 u8 type, u8 version,
840 void *tvlv_value, u16 tvlv_value_len)
841 {
842 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
843
844 if (!tvlv_value)
845 tvlv_value_len = 0;
846
847 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
848 if (!tvlv_new)
849 return;
850
851 tvlv_new->tvlv_hdr.version = version;
852 tvlv_new->tvlv_hdr.type = type;
853 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
854
855 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
856 INIT_HLIST_NODE(&tvlv_new->list);
857 kref_init(&tvlv_new->refcount);
858
859 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
860 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
861 batadv_tvlv_container_remove(bat_priv, tvlv_old);
862 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
863 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
864 }
865
866 /**
867 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
868 * requested packet size
869 * @packet_buff: packet buffer
870 * @packet_buff_len: packet buffer size
871 * @min_packet_len: requested packet minimum size
872 * @additional_packet_len: requested additional packet size on top of minimum
873 * size
874 *
875 * Return: true of the packet buffer could be changed to the requested size,
876 * false otherwise.
877 */
878 static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
879 int *packet_buff_len,
880 int min_packet_len,
881 int additional_packet_len)
882 {
883 unsigned char *new_buff;
884
885 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
886
887 /* keep old buffer if kmalloc should fail */
888 if (!new_buff)
889 return false;
890
891 memcpy(new_buff, *packet_buff, min_packet_len);
892 kfree(*packet_buff);
893 *packet_buff = new_buff;
894 *packet_buff_len = min_packet_len + additional_packet_len;
895
896 return true;
897 }
898
899 /**
900 * batadv_tvlv_container_ogm_append - append tvlv container content to given
901 * OGM packet buffer
902 * @bat_priv: the bat priv with all the soft interface information
903 * @packet_buff: ogm packet buffer
904 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
905 * content
906 * @packet_min_len: ogm header size to be preserved for the OGM itself
907 *
908 * The ogm packet might be enlarged or shrunk depending on the current size
909 * and the size of the to-be-appended tvlv containers.
910 *
911 * Return: size of all appended tvlv containers in bytes.
912 */
913 u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
914 unsigned char **packet_buff,
915 int *packet_buff_len, int packet_min_len)
916 {
917 struct batadv_tvlv_container *tvlv;
918 struct batadv_tvlv_hdr *tvlv_hdr;
919 u16 tvlv_value_len;
920 void *tvlv_value;
921 bool ret;
922
923 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
924 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
925
926 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
927 packet_min_len, tvlv_value_len);
928
929 if (!ret)
930 goto end;
931
932 if (!tvlv_value_len)
933 goto end;
934
935 tvlv_value = (*packet_buff) + packet_min_len;
936
937 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
938 tvlv_hdr = tvlv_value;
939 tvlv_hdr->type = tvlv->tvlv_hdr.type;
940 tvlv_hdr->version = tvlv->tvlv_hdr.version;
941 tvlv_hdr->len = tvlv->tvlv_hdr.len;
942 tvlv_value = tvlv_hdr + 1;
943 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
944 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
945 }
946
947 end:
948 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
949 return tvlv_value_len;
950 }
951
952 /**
953 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
954 * appropriate handlers
955 * @bat_priv: the bat priv with all the soft interface information
956 * @tvlv_handler: tvlv callback function handling the tvlv content
957 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
958 * @orig_node: orig node emitting the ogm packet
959 * @src: source mac address of the unicast packet
960 * @dst: destination mac address of the unicast packet
961 * @tvlv_value: tvlv content
962 * @tvlv_value_len: tvlv content length
963 *
964 * Return: success if handler was not found or the return value of the handler
965 * callback.
966 */
967 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
968 struct batadv_tvlv_handler *tvlv_handler,
969 bool ogm_source,
970 struct batadv_orig_node *orig_node,
971 u8 *src, u8 *dst,
972 void *tvlv_value, u16 tvlv_value_len)
973 {
974 if (!tvlv_handler)
975 return NET_RX_SUCCESS;
976
977 if (ogm_source) {
978 if (!tvlv_handler->ogm_handler)
979 return NET_RX_SUCCESS;
980
981 if (!orig_node)
982 return NET_RX_SUCCESS;
983
984 tvlv_handler->ogm_handler(bat_priv, orig_node,
985 BATADV_NO_FLAGS,
986 tvlv_value, tvlv_value_len);
987 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
988 } else {
989 if (!src)
990 return NET_RX_SUCCESS;
991
992 if (!dst)
993 return NET_RX_SUCCESS;
994
995 if (!tvlv_handler->unicast_handler)
996 return NET_RX_SUCCESS;
997
998 return tvlv_handler->unicast_handler(bat_priv, src,
999 dst, tvlv_value,
1000 tvlv_value_len);
1001 }
1002
1003 return NET_RX_SUCCESS;
1004 }
1005
1006 /**
1007 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
1008 * appropriate handlers
1009 * @bat_priv: the bat priv with all the soft interface information
1010 * @ogm_source: flag indicating whether the tvlv is an ogm or a unicast packet
1011 * @orig_node: orig node emitting the ogm packet
1012 * @src: source mac address of the unicast packet
1013 * @dst: destination mac address of the unicast packet
1014 * @tvlv_value: tvlv content
1015 * @tvlv_value_len: tvlv content length
1016 *
1017 * Return: success when processing an OGM or the return value of all called
1018 * handler callbacks.
1019 */
1020 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
1021 bool ogm_source,
1022 struct batadv_orig_node *orig_node,
1023 u8 *src, u8 *dst,
1024 void *tvlv_value, u16 tvlv_value_len)
1025 {
1026 struct batadv_tvlv_handler *tvlv_handler;
1027 struct batadv_tvlv_hdr *tvlv_hdr;
1028 u16 tvlv_value_cont_len;
1029 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
1030 int ret = NET_RX_SUCCESS;
1031
1032 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
1033 tvlv_hdr = tvlv_value;
1034 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
1035 tvlv_value = tvlv_hdr + 1;
1036 tvlv_value_len -= sizeof(*tvlv_hdr);
1037
1038 if (tvlv_value_cont_len > tvlv_value_len)
1039 break;
1040
1041 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
1042 tvlv_hdr->type,
1043 tvlv_hdr->version);
1044
1045 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
1046 ogm_source, orig_node,
1047 src, dst, tvlv_value,
1048 tvlv_value_cont_len);
1049 if (tvlv_handler)
1050 batadv_tvlv_handler_put(tvlv_handler);
1051 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
1052 tvlv_value_len -= tvlv_value_cont_len;
1053 }
1054
1055 if (!ogm_source)
1056 return ret;
1057
1058 rcu_read_lock();
1059 hlist_for_each_entry_rcu(tvlv_handler,
1060 &bat_priv->tvlv.handler_list, list) {
1061 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
1062 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
1063 tvlv_handler->ogm_handler(bat_priv, orig_node,
1064 cifnotfound, NULL, 0);
1065
1066 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1067 }
1068 rcu_read_unlock();
1069
1070 return NET_RX_SUCCESS;
1071 }
1072
1073 /**
1074 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1075 * handlers
1076 * @bat_priv: the bat priv with all the soft interface information
1077 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1078 * @orig_node: orig node emitting the ogm packet
1079 */
1080 void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1081 struct batadv_ogm_packet *batadv_ogm_packet,
1082 struct batadv_orig_node *orig_node)
1083 {
1084 void *tvlv_value;
1085 u16 tvlv_value_len;
1086
1087 if (!batadv_ogm_packet)
1088 return;
1089
1090 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1091 if (!tvlv_value_len)
1092 return;
1093
1094 tvlv_value = batadv_ogm_packet + 1;
1095
1096 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1097 tvlv_value, tvlv_value_len);
1098 }
1099
1100 /**
1101 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1102 * type and version (both need to match) for ogm tvlv payload and/or unicast
1103 * payload
1104 * @bat_priv: the bat priv with all the soft interface information
1105 * @optr: ogm tvlv handler callback function. This function receives the orig
1106 * node, flags and the tvlv content as argument to process.
1107 * @uptr: unicast tvlv handler callback function. This function receives the
1108 * source & destination of the unicast packet as well as the tvlv content
1109 * to process.
1110 * @type: tvlv handler type to be registered
1111 * @version: tvlv handler version to be registered
1112 * @flags: flags to enable or disable TVLV API behavior
1113 */
1114 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1115 void (*optr)(struct batadv_priv *bat_priv,
1116 struct batadv_orig_node *orig,
1117 u8 flags,
1118 void *tvlv_value,
1119 u16 tvlv_value_len),
1120 int (*uptr)(struct batadv_priv *bat_priv,
1121 u8 *src, u8 *dst,
1122 void *tvlv_value,
1123 u16 tvlv_value_len),
1124 u8 type, u8 version, u8 flags)
1125 {
1126 struct batadv_tvlv_handler *tvlv_handler;
1127
1128 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1129 if (tvlv_handler) {
1130 batadv_tvlv_handler_put(tvlv_handler);
1131 return;
1132 }
1133
1134 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1135 if (!tvlv_handler)
1136 return;
1137
1138 tvlv_handler->ogm_handler = optr;
1139 tvlv_handler->unicast_handler = uptr;
1140 tvlv_handler->type = type;
1141 tvlv_handler->version = version;
1142 tvlv_handler->flags = flags;
1143 kref_init(&tvlv_handler->refcount);
1144 INIT_HLIST_NODE(&tvlv_handler->list);
1145
1146 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1147 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1148 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1149 }
1150
1151 /**
1152 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1153 * provided type and version (both need to match)
1154 * @bat_priv: the bat priv with all the soft interface information
1155 * @type: tvlv handler type to be unregistered
1156 * @version: tvlv handler version to be unregistered
1157 */
1158 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1159 u8 type, u8 version)
1160 {
1161 struct batadv_tvlv_handler *tvlv_handler;
1162
1163 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1164 if (!tvlv_handler)
1165 return;
1166
1167 batadv_tvlv_handler_put(tvlv_handler);
1168 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1169 hlist_del_rcu(&tvlv_handler->list);
1170 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1171 batadv_tvlv_handler_put(tvlv_handler);
1172 }
1173
1174 /**
1175 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1176 * specified host
1177 * @bat_priv: the bat priv with all the soft interface information
1178 * @src: source mac address of the unicast packet
1179 * @dst: destination mac address of the unicast packet
1180 * @type: tvlv type
1181 * @version: tvlv version
1182 * @tvlv_value: tvlv content
1183 * @tvlv_value_len: tvlv content length
1184 */
1185 void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
1186 u8 *dst, u8 type, u8 version,
1187 void *tvlv_value, u16 tvlv_value_len)
1188 {
1189 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1190 struct batadv_tvlv_hdr *tvlv_hdr;
1191 struct batadv_orig_node *orig_node;
1192 struct sk_buff *skb;
1193 unsigned char *tvlv_buff;
1194 unsigned int tvlv_len;
1195 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1196
1197 orig_node = batadv_orig_hash_find(bat_priv, dst);
1198 if (!orig_node)
1199 return;
1200
1201 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1202
1203 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1204 if (!skb)
1205 goto out;
1206
1207 skb->priority = TC_PRIO_CONTROL;
1208 skb_reserve(skb, ETH_HLEN);
1209 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1210 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1211 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1212 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1213 unicast_tvlv_packet->ttl = BATADV_TTL;
1214 unicast_tvlv_packet->reserved = 0;
1215 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1216 unicast_tvlv_packet->align = 0;
1217 ether_addr_copy(unicast_tvlv_packet->src, src);
1218 ether_addr_copy(unicast_tvlv_packet->dst, dst);
1219
1220 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1221 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1222 tvlv_hdr->version = version;
1223 tvlv_hdr->type = type;
1224 tvlv_hdr->len = htons(tvlv_value_len);
1225 tvlv_buff += sizeof(*tvlv_hdr);
1226 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1227
1228 if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
1229 kfree_skb(skb);
1230 out:
1231 batadv_orig_node_put(orig_node);
1232 }
1233
1234 /**
1235 * batadv_get_vid - extract the VLAN identifier from skb if any
1236 * @skb: the buffer containing the packet
1237 * @header_len: length of the batman header preceding the ethernet header
1238 *
1239 * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
1240 * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
1241 */
1242 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1243 {
1244 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1245 struct vlan_ethhdr *vhdr;
1246 unsigned short vid;
1247
1248 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1249 return BATADV_NO_FLAGS;
1250
1251 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1252 return BATADV_NO_FLAGS;
1253
1254 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1255 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1256 vid |= BATADV_VLAN_HAS_TAG;
1257
1258 return vid;
1259 }
1260
1261 /**
1262 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1263 * @bat_priv: the bat priv with all the soft interface information
1264 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1265 * looked up
1266 *
1267 * Return: true if AP isolation is on for the VLAN idenfied by vid, false
1268 * otherwise
1269 */
1270 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1271 {
1272 bool ap_isolation_enabled = false;
1273 struct batadv_softif_vlan *vlan;
1274
1275 /* if the AP isolation is requested on a VLAN, then check for its
1276 * setting in the proper VLAN private data structure
1277 */
1278 vlan = batadv_softif_vlan_get(bat_priv, vid);
1279 if (vlan) {
1280 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1281 batadv_softif_vlan_put(vlan);
1282 }
1283
1284 return ap_isolation_enabled;
1285 }
1286
1287 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
1288 {
1289 struct batadv_algo_ops *bat_algo_ops;
1290 char *algo_name = (char *)val;
1291 size_t name_len = strlen(algo_name);
1292
1293 if (name_len > 0 && algo_name[name_len - 1] == '\n')
1294 algo_name[name_len - 1] = '\0';
1295
1296 bat_algo_ops = batadv_algo_get(algo_name);
1297 if (!bat_algo_ops) {
1298 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
1299 return -EINVAL;
1300 }
1301
1302 return param_set_copystring(algo_name, kp);
1303 }
1304
1305 static const struct kernel_param_ops batadv_param_ops_ra = {
1306 .set = batadv_param_set_ra,
1307 .get = param_get_string,
1308 };
1309
1310 static struct kparam_string batadv_param_string_ra = {
1311 .maxlen = sizeof(batadv_routing_algo),
1312 .string = batadv_routing_algo,
1313 };
1314
1315 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1316 0644);
1317 module_init(batadv_init);
1318 module_exit(batadv_exit);
1319
1320 MODULE_LICENSE("GPL");
1321
1322 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1323 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1324 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1325 MODULE_VERSION(BATADV_SOURCE_VERSION);
This page took 0.097516 seconds and 5 git commands to generate.