1 /* Copyright 2011, Siemens AG
2 * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
5 /* Based on patches from Jon Smirl <jonsmirl@gmail.com>
6 * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 /* Jon's code is based on 6lowpan implementation for Contiki which is:
19 * Copyright (c) 2008, Swedish Institute of Computer Science.
20 * All rights reserved.
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions
25 * 1. Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * 2. Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in the
29 * documentation and/or other materials provided with the distribution.
30 * 3. Neither the name of the Institute nor the names of its contributors
31 * may be used to endorse or promote products derived from this software
32 * without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
38 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 #include <linux/bitops.h>
48 #include <linux/if_arp.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/netdevice.h>
52 #include <net/af_ieee802154.h>
53 #include <net/ieee802154.h>
54 #include <net/ieee802154_netdev.h>
55 #include <net/6lowpan.h>
58 #include "reassembly.h"
60 static LIST_HEAD(lowpan_devices
);
62 /* private device info */
63 struct lowpan_dev_info
{
64 struct net_device
*real_dev
; /* real WPAN device ptr */
65 struct mutex dev_list_mtx
; /* mutex for list ops */
69 struct lowpan_dev_record
{
70 struct net_device
*ldev
;
71 struct list_head list
;
74 /* don't save pan id, it's intra pan */
78 /* IPv6 needs big endian here */
84 struct lowpan_addr_info
{
85 struct lowpan_addr daddr
;
86 struct lowpan_addr saddr
;
90 lowpan_dev_info
*lowpan_dev_info(const struct net_device
*dev
)
92 return netdev_priv(dev
);
96 lowpan_addr_info
*lowpan_skb_priv(const struct sk_buff
*skb
)
98 WARN_ON_ONCE(skb_headroom(skb
) < sizeof(struct lowpan_addr_info
));
99 return (struct lowpan_addr_info
*)(skb
->data
-
100 sizeof(struct lowpan_addr_info
));
103 static int lowpan_header_create(struct sk_buff
*skb
, struct net_device
*dev
,
104 unsigned short type
, const void *_daddr
,
105 const void *_saddr
, unsigned int len
)
107 const u8
*saddr
= _saddr
;
108 const u8
*daddr
= _daddr
;
109 struct lowpan_addr_info
*info
;
112 * if this package isn't ipv6 one, where should it be routed?
114 if (type
!= ETH_P_IPV6
)
118 saddr
= dev
->dev_addr
;
120 raw_dump_inline(__func__
, "saddr", (unsigned char *)saddr
, 8);
121 raw_dump_inline(__func__
, "daddr", (unsigned char *)daddr
, 8);
123 info
= lowpan_skb_priv(skb
);
125 /* TODO: Currently we only support extended_addr */
126 info
->daddr
.mode
= IEEE802154_ADDR_LONG
;
127 memcpy(&info
->daddr
.u
.extended_addr
, daddr
,
128 sizeof(info
->daddr
.u
.extended_addr
));
129 info
->saddr
.mode
= IEEE802154_ADDR_LONG
;
130 memcpy(&info
->saddr
.u
.extended_addr
, saddr
,
131 sizeof(info
->daddr
.u
.extended_addr
));
136 static int lowpan_give_skb_to_devices(struct sk_buff
*skb
,
137 struct net_device
*dev
)
139 struct lowpan_dev_record
*entry
;
140 struct sk_buff
*skb_cp
;
141 int stat
= NET_RX_SUCCESS
;
144 list_for_each_entry_rcu(entry
, &lowpan_devices
, list
)
145 if (lowpan_dev_info(entry
->ldev
)->real_dev
== skb
->dev
) {
146 skb_cp
= skb_copy(skb
, GFP_ATOMIC
);
152 skb_cp
->dev
= entry
->ldev
;
153 stat
= netif_rx(skb_cp
);
160 static int process_data(struct sk_buff
*skb
, const struct ieee802154_hdr
*hdr
)
163 struct ieee802154_addr_sa sa
, da
;
166 raw_dump_table(__func__
, "raw skb data dump", skb
->data
, skb
->len
);
167 /* at least two bytes will be used for the encoding */
171 if (lowpan_fetch_skb_u8(skb
, &iphc0
))
174 if (lowpan_fetch_skb_u8(skb
, &iphc1
))
177 ieee802154_addr_to_sa(&sa
, &hdr
->source
);
178 ieee802154_addr_to_sa(&da
, &hdr
->dest
);
180 if (sa
.addr_type
== IEEE802154_ADDR_SHORT
)
181 sap
= &sa
.short_addr
;
185 if (da
.addr_type
== IEEE802154_ADDR_SHORT
)
186 dap
= &da
.short_addr
;
190 return lowpan_process_data(skb
, skb
->dev
, sap
, sa
.addr_type
,
191 IEEE802154_ADDR_LEN
, dap
, da
.addr_type
,
192 IEEE802154_ADDR_LEN
, iphc0
, iphc1
,
193 lowpan_give_skb_to_devices
);
200 static int lowpan_set_address(struct net_device
*dev
, void *p
)
202 struct sockaddr
*sa
= p
;
204 if (netif_running(dev
))
207 /* TODO: validate addr */
208 memcpy(dev
->dev_addr
, sa
->sa_data
, dev
->addr_len
);
213 static struct sk_buff
*
214 lowpan_alloc_frag(struct sk_buff
*skb
, int size
,
215 const struct ieee802154_hdr
*master_hdr
)
217 struct net_device
*real_dev
= lowpan_dev_info(skb
->dev
)->real_dev
;
218 struct sk_buff
*frag
;
221 frag
= alloc_skb(real_dev
->hard_header_len
+
222 real_dev
->needed_tailroom
+ size
,
226 frag
->dev
= real_dev
;
227 frag
->priority
= skb
->priority
;
228 skb_reserve(frag
, real_dev
->hard_header_len
);
229 skb_reset_network_header(frag
);
230 *mac_cb(frag
) = *mac_cb(skb
);
232 rc
= dev_hard_header(frag
, real_dev
, 0, &master_hdr
->dest
,
233 &master_hdr
->source
, size
);
239 frag
= ERR_PTR(-ENOMEM
);
246 lowpan_xmit_fragment(struct sk_buff
*skb
, const struct ieee802154_hdr
*wpan_hdr
,
247 u8
*frag_hdr
, int frag_hdrlen
,
250 struct sk_buff
*frag
;
252 raw_dump_inline(__func__
, " fragment header", frag_hdr
, frag_hdrlen
);
254 frag
= lowpan_alloc_frag(skb
, frag_hdrlen
+ len
, wpan_hdr
);
256 return -PTR_ERR(frag
);
258 memcpy(skb_put(frag
, frag_hdrlen
), frag_hdr
, frag_hdrlen
);
259 memcpy(skb_put(frag
, len
), skb_network_header(skb
) + offset
, len
);
261 raw_dump_table(__func__
, " fragment dump", frag
->data
, frag
->len
);
263 return dev_queue_xmit(frag
);
267 lowpan_xmit_fragmented(struct sk_buff
*skb
, struct net_device
*dev
,
268 const struct ieee802154_hdr
*wpan_hdr
)
270 u16 dgram_size
, dgram_offset
;
273 int frag_cap
, frag_len
, payload_cap
, rc
;
274 int skb_unprocessed
, skb_offset
;
276 dgram_size
= lowpan_uncompress_size(skb
, &dgram_offset
) -
278 frag_tag
= lowpan_dev_info(dev
)->fragment_tag
++;
280 frag_hdr
[0] = LOWPAN_DISPATCH_FRAG1
| ((dgram_size
>> 8) & 0x07);
281 frag_hdr
[1] = dgram_size
& 0xff;
282 memcpy(frag_hdr
+ 2, &frag_tag
, sizeof(frag_tag
));
284 payload_cap
= ieee802154_max_payload(wpan_hdr
);
286 frag_len
= round_down(payload_cap
- LOWPAN_FRAG1_HEAD_SIZE
-
287 skb_network_header_len(skb
), 8);
289 skb_offset
= skb_network_header_len(skb
);
290 skb_unprocessed
= skb
->len
- skb
->mac_len
- skb_offset
;
292 rc
= lowpan_xmit_fragment(skb
, wpan_hdr
, frag_hdr
,
293 LOWPAN_FRAG1_HEAD_SIZE
, 0,
294 frag_len
+ skb_network_header_len(skb
));
296 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
301 frag_hdr
[0] &= ~LOWPAN_DISPATCH_FRAG1
;
302 frag_hdr
[0] |= LOWPAN_DISPATCH_FRAGN
;
303 frag_cap
= round_down(payload_cap
- LOWPAN_FRAGN_HEAD_SIZE
, 8);
306 dgram_offset
+= frag_len
;
307 skb_offset
+= frag_len
;
308 skb_unprocessed
-= frag_len
;
309 frag_len
= min(frag_cap
, skb_unprocessed
);
311 frag_hdr
[4] = dgram_offset
>> 3;
313 rc
= lowpan_xmit_fragment(skb
, wpan_hdr
, frag_hdr
,
314 LOWPAN_FRAGN_HEAD_SIZE
, skb_offset
,
317 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
318 __func__
, frag_tag
, skb_offset
);
321 } while (skb_unprocessed
> frag_cap
);
324 return NET_XMIT_SUCCESS
;
331 static int lowpan_header(struct sk_buff
*skb
, struct net_device
*dev
)
333 struct ieee802154_addr sa
, da
;
334 struct ieee802154_mac_cb
*cb
= mac_cb_init(skb
);
335 struct lowpan_addr_info info
;
338 memcpy(&info
, lowpan_skb_priv(skb
), sizeof(info
));
340 /* TODO: Currently we only support extended_addr */
341 daddr
= &info
.daddr
.u
.extended_addr
;
342 saddr
= &info
.saddr
.u
.extended_addr
;
344 lowpan_header_compress(skb
, dev
, ETH_P_IPV6
, daddr
, saddr
, skb
->len
);
346 cb
->type
= IEEE802154_FC_TYPE_DATA
;
348 /* prepare wpan address data */
349 sa
.mode
= IEEE802154_ADDR_LONG
;
350 sa
.pan_id
= ieee802154_mlme_ops(dev
)->get_pan_id(dev
);
351 sa
.extended_addr
= ieee802154_devaddr_from_raw(saddr
);
353 /* intra-PAN communications */
354 da
.pan_id
= sa
.pan_id
;
356 /* if the destination address is the broadcast address, use the
357 * corresponding short address
359 if (lowpan_is_addr_broadcast((const u8
*)daddr
)) {
360 da
.mode
= IEEE802154_ADDR_SHORT
;
361 da
.short_addr
= cpu_to_le16(IEEE802154_ADDR_BROADCAST
);
364 da
.mode
= IEEE802154_ADDR_LONG
;
365 da
.extended_addr
= ieee802154_devaddr_from_raw(daddr
);
369 return dev_hard_header(skb
, lowpan_dev_info(dev
)->real_dev
,
370 ETH_P_IPV6
, (void *)&da
, (void *)&sa
, 0);
373 static netdev_tx_t
lowpan_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
375 struct ieee802154_hdr wpan_hdr
;
378 pr_debug("package xmit\n");
380 /* We must take a copy of the skb before we modify/replace the ipv6
381 * header as the header could be used elsewhere
383 skb
= skb_unshare(skb
, GFP_ATOMIC
);
385 return NET_XMIT_DROP
;
387 ret
= lowpan_header(skb
, dev
);
390 return NET_XMIT_DROP
;
393 if (ieee802154_hdr_peek(skb
, &wpan_hdr
) < 0) {
395 return NET_XMIT_DROP
;
398 max_single
= ieee802154_max_payload(&wpan_hdr
);
400 if (skb_tail_pointer(skb
) - skb_network_header(skb
) <= max_single
) {
401 skb
->dev
= lowpan_dev_info(dev
)->real_dev
;
402 return dev_queue_xmit(skb
);
406 pr_debug("frame is too big, fragmentation is needed\n");
407 rc
= lowpan_xmit_fragmented(skb
, dev
, &wpan_hdr
);
409 return rc
< 0 ? NET_XMIT_DROP
: rc
;
413 static struct wpan_phy
*lowpan_get_phy(const struct net_device
*dev
)
415 struct net_device
*real_dev
= lowpan_dev_info(dev
)->real_dev
;
417 return ieee802154_mlme_ops(real_dev
)->get_phy(real_dev
);
420 static __le16
lowpan_get_pan_id(const struct net_device
*dev
)
422 struct net_device
*real_dev
= lowpan_dev_info(dev
)->real_dev
;
424 return ieee802154_mlme_ops(real_dev
)->get_pan_id(real_dev
);
427 static __le16
lowpan_get_short_addr(const struct net_device
*dev
)
429 struct net_device
*real_dev
= lowpan_dev_info(dev
)->real_dev
;
431 return ieee802154_mlme_ops(real_dev
)->get_short_addr(real_dev
);
434 static u8
lowpan_get_dsn(const struct net_device
*dev
)
436 struct net_device
*real_dev
= lowpan_dev_info(dev
)->real_dev
;
438 return ieee802154_mlme_ops(real_dev
)->get_dsn(real_dev
);
441 static struct header_ops lowpan_header_ops
= {
442 .create
= lowpan_header_create
,
445 static struct lock_class_key lowpan_tx_busylock
;
446 static struct lock_class_key lowpan_netdev_xmit_lock_key
;
448 static void lowpan_set_lockdep_class_one(struct net_device
*dev
,
449 struct netdev_queue
*txq
,
452 lockdep_set_class(&txq
->_xmit_lock
,
453 &lowpan_netdev_xmit_lock_key
);
457 static int lowpan_dev_init(struct net_device
*dev
)
459 netdev_for_each_tx_queue(dev
, lowpan_set_lockdep_class_one
, NULL
);
460 dev
->qdisc_tx_busylock
= &lowpan_tx_busylock
;
464 static const struct net_device_ops lowpan_netdev_ops
= {
465 .ndo_init
= lowpan_dev_init
,
466 .ndo_start_xmit
= lowpan_xmit
,
467 .ndo_set_mac_address
= lowpan_set_address
,
470 static struct ieee802154_mlme_ops lowpan_mlme
= {
471 .get_pan_id
= lowpan_get_pan_id
,
472 .get_phy
= lowpan_get_phy
,
473 .get_short_addr
= lowpan_get_short_addr
,
474 .get_dsn
= lowpan_get_dsn
,
477 static void lowpan_setup(struct net_device
*dev
)
479 dev
->addr_len
= IEEE802154_ADDR_LEN
;
480 memset(dev
->broadcast
, 0xff, IEEE802154_ADDR_LEN
);
481 dev
->type
= ARPHRD_IEEE802154
;
482 /* Frame Control + Sequence Number + Address fields + Security Header */
483 dev
->hard_header_len
= 2 + 1 + 20 + 14;
484 dev
->needed_tailroom
= 2; /* FCS */
485 dev
->mtu
= IPV6_MIN_MTU
;
486 dev
->tx_queue_len
= 0;
487 dev
->flags
= IFF_BROADCAST
| IFF_MULTICAST
;
488 dev
->watchdog_timeo
= 0;
490 dev
->netdev_ops
= &lowpan_netdev_ops
;
491 dev
->header_ops
= &lowpan_header_ops
;
492 dev
->ml_priv
= &lowpan_mlme
;
493 dev
->destructor
= free_netdev
;
496 static int lowpan_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
498 if (tb
[IFLA_ADDRESS
]) {
499 if (nla_len(tb
[IFLA_ADDRESS
]) != IEEE802154_ADDR_LEN
)
505 static int lowpan_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
506 struct packet_type
*pt
, struct net_device
*orig_dev
)
508 struct ieee802154_hdr hdr
;
511 skb
= skb_share_check(skb
, GFP_ATOMIC
);
515 if (!netif_running(dev
))
518 if (dev
->type
!= ARPHRD_IEEE802154
)
521 if (ieee802154_hdr_peek_addrs(skb
, &hdr
) < 0)
524 /* check that it's our buffer */
525 if (skb
->data
[0] == LOWPAN_DISPATCH_IPV6
) {
526 skb
->protocol
= htons(ETH_P_IPV6
);
527 skb
->pkt_type
= PACKET_HOST
;
529 /* Pull off the 1-byte of 6lowpan header. */
532 ret
= lowpan_give_skb_to_devices(skb
, NULL
);
533 if (ret
== NET_RX_DROP
)
536 switch (skb
->data
[0] & 0xe0) {
537 case LOWPAN_DISPATCH_IPHC
: /* ipv6 datagram */
538 ret
= process_data(skb
, &hdr
);
539 if (ret
== NET_RX_DROP
)
542 case LOWPAN_DISPATCH_FRAG1
: /* first fragment header */
543 ret
= lowpan_frag_rcv(skb
, LOWPAN_DISPATCH_FRAG1
);
545 ret
= process_data(skb
, &hdr
);
546 if (ret
== NET_RX_DROP
)
550 case LOWPAN_DISPATCH_FRAGN
: /* next fragments headers */
551 ret
= lowpan_frag_rcv(skb
, LOWPAN_DISPATCH_FRAGN
);
553 ret
= process_data(skb
, &hdr
);
554 if (ret
== NET_RX_DROP
)
563 return NET_RX_SUCCESS
;
570 static int lowpan_newlink(struct net
*src_net
, struct net_device
*dev
,
571 struct nlattr
*tb
[], struct nlattr
*data
[])
573 struct net_device
*real_dev
;
574 struct lowpan_dev_record
*entry
;
576 pr_debug("adding new link\n");
580 /* find and hold real wpan device */
581 real_dev
= dev_get_by_index(src_net
, nla_get_u32(tb
[IFLA_LINK
]));
584 if (real_dev
->type
!= ARPHRD_IEEE802154
) {
589 lowpan_dev_info(dev
)->real_dev
= real_dev
;
590 mutex_init(&lowpan_dev_info(dev
)->dev_list_mtx
);
592 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
595 lowpan_dev_info(dev
)->real_dev
= NULL
;
601 /* Set the lowpan harware address to the wpan hardware address. */
602 memcpy(dev
->dev_addr
, real_dev
->dev_addr
, IEEE802154_ADDR_LEN
);
604 mutex_lock(&lowpan_dev_info(dev
)->dev_list_mtx
);
605 INIT_LIST_HEAD(&entry
->list
);
606 list_add_tail(&entry
->list
, &lowpan_devices
);
607 mutex_unlock(&lowpan_dev_info(dev
)->dev_list_mtx
);
609 register_netdevice(dev
);
614 static void lowpan_dellink(struct net_device
*dev
, struct list_head
*head
)
616 struct lowpan_dev_info
*lowpan_dev
= lowpan_dev_info(dev
);
617 struct net_device
*real_dev
= lowpan_dev
->real_dev
;
618 struct lowpan_dev_record
*entry
, *tmp
;
622 mutex_lock(&lowpan_dev_info(dev
)->dev_list_mtx
);
623 list_for_each_entry_safe(entry
, tmp
, &lowpan_devices
, list
) {
624 if (entry
->ldev
== dev
) {
625 list_del(&entry
->list
);
629 mutex_unlock(&lowpan_dev_info(dev
)->dev_list_mtx
);
631 mutex_destroy(&lowpan_dev_info(dev
)->dev_list_mtx
);
633 unregister_netdevice_queue(dev
, head
);
638 static struct rtnl_link_ops lowpan_link_ops __read_mostly
= {
640 .priv_size
= sizeof(struct lowpan_dev_info
),
641 .setup
= lowpan_setup
,
642 .newlink
= lowpan_newlink
,
643 .dellink
= lowpan_dellink
,
644 .validate
= lowpan_validate
,
647 static inline int __init
lowpan_netlink_init(void)
649 return rtnl_link_register(&lowpan_link_ops
);
652 static inline void lowpan_netlink_fini(void)
654 rtnl_link_unregister(&lowpan_link_ops
);
657 static int lowpan_device_event(struct notifier_block
*unused
,
658 unsigned long event
, void *ptr
)
660 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
662 struct lowpan_dev_record
*entry
, *tmp
;
664 if (dev
->type
!= ARPHRD_IEEE802154
)
667 if (event
== NETDEV_UNREGISTER
) {
668 list_for_each_entry_safe(entry
, tmp
, &lowpan_devices
, list
) {
669 if (lowpan_dev_info(entry
->ldev
)->real_dev
== dev
)
670 lowpan_dellink(entry
->ldev
, &del_list
);
673 unregister_netdevice_many(&del_list
);
680 static struct notifier_block lowpan_dev_notifier
= {
681 .notifier_call
= lowpan_device_event
,
684 static struct packet_type lowpan_packet_type
= {
685 .type
= htons(ETH_P_IEEE802154
),
689 static int __init
lowpan_init_module(void)
693 err
= lowpan_net_frag_init();
697 err
= lowpan_netlink_init();
701 dev_add_pack(&lowpan_packet_type
);
703 err
= register_netdevice_notifier(&lowpan_dev_notifier
);
710 dev_remove_pack(&lowpan_packet_type
);
711 lowpan_netlink_fini();
713 lowpan_net_frag_exit();
718 static void __exit
lowpan_cleanup_module(void)
720 lowpan_netlink_fini();
722 dev_remove_pack(&lowpan_packet_type
);
724 lowpan_net_frag_exit();
726 unregister_netdevice_notifier(&lowpan_dev_notifier
);
729 module_init(lowpan_init_module
);
730 module_exit(lowpan_cleanup_module
);
731 MODULE_LICENSE("GPL");
732 MODULE_ALIAS_RTNL_LINK("lowpan");