Merge git://www.linux-watchdog.org/linux-watchdog
[deliverable/linux.git] / net / bluetooth / 6lowpan.c
CommitLineData
18722c24
JR
1/*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12*/
13
18722c24
JR
14#include <linux/if_arp.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17
18#include <net/ipv6.h>
19#include <net/ip6_route.h>
20#include <net/addrconf.h>
21
22#include <net/af_ieee802154.h> /* to get the address type */
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26#include <net/bluetooth/l2cap.h>
27
d0746f3e
JH
28#include "6lowpan.h"
29
18722c24
JR
30#include "../ieee802154/6lowpan.h" /* for the compression support */
31
32#define IFACE_NAME_TEMPLATE "bt%d"
33#define EUI64_ADDR_LEN 8
34
35struct skb_cb {
36 struct in6_addr addr;
37 struct l2cap_conn *conn;
38};
39#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
40
41/* The devices list contains those devices that we are acting
42 * as a proxy. The BT 6LoWPAN device is a virtual device that
43 * connects to the Bluetooth LE device. The real connection to
44 * BT device is done via l2cap layer. There exists one
45 * virtual device / one BT 6LoWPAN network (=hciX device).
46 * The list contains struct lowpan_dev elements.
47 */
48static LIST_HEAD(bt_6lowpan_devices);
49static DEFINE_RWLOCK(devices_lock);
50
51struct lowpan_peer {
52 struct list_head list;
53 struct l2cap_conn *conn;
54
55 /* peer addresses in various formats */
56 unsigned char eui64_addr[EUI64_ADDR_LEN];
57 struct in6_addr peer_addr;
58};
59
60struct lowpan_dev {
61 struct list_head list;
62
63 struct hci_dev *hdev;
64 struct net_device *netdev;
65 struct list_head peers;
66 atomic_t peer_count; /* number of items in peers list */
67
68 struct work_struct delete_netdev;
69 struct delayed_work notify_peers;
70};
71
72static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
73{
74 return netdev_priv(netdev);
75}
76
77static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
78{
79 list_add(&peer->list, &dev->peers);
80 atomic_inc(&dev->peer_count);
81}
82
83static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
84{
85 list_del(&peer->list);
86
87 if (atomic_dec_and_test(&dev->peer_count)) {
88 BT_DBG("last peer");
89 return true;
90 }
91
92 return false;
93}
94
95static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
96 bdaddr_t *ba, __u8 type)
97{
98 struct lowpan_peer *peer, *tmp;
99
100 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
101 ba, type);
102
103 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
104 BT_DBG("addr %pMR type %d",
105 &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
106
107 if (bacmp(&peer->conn->hcon->dst, ba))
108 continue;
109
110 if (type == peer->conn->hcon->dst_type)
111 return peer;
112 }
113
114 return NULL;
115}
116
117static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
118 struct l2cap_conn *conn)
119{
120 struct lowpan_peer *peer, *tmp;
121
122 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123 if (peer->conn == conn)
124 return peer;
125 }
126
127 return NULL;
128}
129
130static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
131{
132 struct lowpan_dev *entry, *tmp;
133 struct lowpan_peer *peer = NULL;
134 unsigned long flags;
135
136 read_lock_irqsave(&devices_lock, flags);
137
138 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
139 peer = peer_lookup_conn(entry, conn);
140 if (peer)
141 break;
142 }
143
144 read_unlock_irqrestore(&devices_lock, flags);
145
146 return peer;
147}
148
149static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
150{
151 struct lowpan_dev *entry, *tmp;
152 struct lowpan_dev *dev = NULL;
153 unsigned long flags;
154
155 read_lock_irqsave(&devices_lock, flags);
156
157 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
158 if (conn->hcon->hdev == entry->hdev) {
159 dev = entry;
160 break;
161 }
162 }
163
164 read_unlock_irqrestore(&devices_lock, flags);
165
166 return dev;
167}
168
18722c24
JR
169static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
170{
171 struct sk_buff *skb_cp;
172 int ret;
173
174 skb_cp = skb_copy(skb, GFP_ATOMIC);
175 if (!skb_cp)
176 return -ENOMEM;
177
178 ret = netif_rx(skb_cp);
179
180 BT_DBG("receive skb %d", ret);
181 if (ret < 0)
182 return NET_RX_DROP;
183
184 return ret;
185}
186
187static int process_data(struct sk_buff *skb, struct net_device *netdev,
188 struct l2cap_conn *conn)
189{
190 const u8 *saddr, *daddr;
191 u8 iphc0, iphc1;
192 struct lowpan_dev *dev;
193 struct lowpan_peer *peer;
194 unsigned long flags;
195
196 dev = lowpan_dev(netdev);
197
198 read_lock_irqsave(&devices_lock, flags);
199 peer = peer_lookup_conn(dev, conn);
200 read_unlock_irqrestore(&devices_lock, flags);
201 if (!peer)
202 goto drop;
203
204 saddr = peer->eui64_addr;
205 daddr = dev->netdev->dev_addr;
206
207 /* at least two bytes will be used for the encoding */
208 if (skb->len < 2)
209 goto drop;
210
211 if (lowpan_fetch_skb_u8(skb, &iphc0))
212 goto drop;
213
214 if (lowpan_fetch_skb_u8(skb, &iphc1))
215 goto drop;
216
217 return lowpan_process_data(skb, netdev,
218 saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
219 daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
220 iphc0, iphc1, give_skb_to_upper);
221
222drop:
223 kfree_skb(skb);
224 return -EINVAL;
225}
226
227static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
228 struct l2cap_conn *conn)
229{
230 struct sk_buff *local_skb;
231 int ret;
232
233 if (!netif_running(dev))
234 goto drop;
235
236 if (dev->type != ARPHRD_6LOWPAN)
237 goto drop;
238
239 /* check that it's our buffer */
240 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
241 /* Copy the packet so that the IPv6 header is
242 * properly aligned.
243 */
244 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
245 skb_tailroom(skb), GFP_ATOMIC);
246 if (!local_skb)
247 goto drop;
248
249 local_skb->protocol = htons(ETH_P_IPV6);
250 local_skb->pkt_type = PACKET_HOST;
251
252 skb_reset_network_header(local_skb);
253 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
254
255 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
256 kfree_skb(local_skb);
257 goto drop;
258 }
259
260 dev->stats.rx_bytes += skb->len;
261 dev->stats.rx_packets++;
262
263 kfree_skb(local_skb);
264 kfree_skb(skb);
265 } else {
266 switch (skb->data[0] & 0xe0) {
267 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
268 local_skb = skb_clone(skb, GFP_ATOMIC);
269 if (!local_skb)
270 goto drop;
271
272 ret = process_data(local_skb, dev, conn);
273 if (ret != NET_RX_SUCCESS)
274 goto drop;
275
276 dev->stats.rx_bytes += skb->len;
277 dev->stats.rx_packets++;
278
279 kfree_skb(skb);
280 break;
281 default:
282 break;
283 }
284 }
285
286 return NET_RX_SUCCESS;
287
288drop:
289 kfree_skb(skb);
290 return NET_RX_DROP;
291}
292
293/* Packet from BT LE device */
294int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
295{
296 struct lowpan_dev *dev;
297 struct lowpan_peer *peer;
298 int err;
299
300 peer = lookup_peer(conn);
301 if (!peer)
302 return -ENOENT;
303
304 dev = lookup_dev(conn);
30d3db44 305 if (!dev || !dev->netdev)
18722c24
JR
306 return -ENOENT;
307
308 err = recv_pkt(skb, dev->netdev, conn);
309 BT_DBG("recv pkt %d", err);
310
311 return err;
312}
313
314static inline int skbuff_copy(void *msg, int len, int count, int mtu,
315 struct sk_buff *skb, struct net_device *dev)
316{
317 struct sk_buff **frag;
318 int sent = 0;
319
320 memcpy(skb_put(skb, count), msg, count);
321
322 sent += count;
323 msg += count;
324 len -= count;
325
326 dev->stats.tx_bytes += count;
327 dev->stats.tx_packets++;
328
329 raw_dump_table(__func__, "Sending", skb->data, skb->len);
330
331 /* Continuation fragments (no L2CAP header) */
332 frag = &skb_shinfo(skb)->frag_list;
333 while (len > 0) {
334 struct sk_buff *tmp;
335
336 count = min_t(unsigned int, mtu, len);
337
338 tmp = bt_skb_alloc(count, GFP_ATOMIC);
78794903
WY
339 if (!tmp)
340 return -ENOMEM;
18722c24
JR
341
342 *frag = tmp;
343
344 memcpy(skb_put(*frag, count), msg, count);
345
346 raw_dump_table(__func__, "Sending fragment",
347 (*frag)->data, count);
348
349 (*frag)->priority = skb->priority;
350
351 sent += count;
352 msg += count;
353 len -= count;
354
355 skb->len += (*frag)->len;
356 skb->data_len += (*frag)->len;
357
358 frag = &(*frag)->next;
359
360 dev->stats.tx_bytes += count;
361 dev->stats.tx_packets++;
362 }
363
364 return sent;
365}
366
367static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
368 size_t len, u32 priority,
369 struct net_device *dev)
370{
371 struct sk_buff *skb;
372 int err, count;
373 struct l2cap_hdr *lh;
374
375 /* FIXME: This mtu check should be not needed and atm is only used for
376 * testing purposes
377 */
378 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
379 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
380
381 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
382
383 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
384
385 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
78794903
WY
386 if (!skb)
387 return ERR_PTR(-ENOMEM);
18722c24
JR
388
389 skb->priority = priority;
390
391 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
392 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
393 lh->len = cpu_to_le16(len);
394
395 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
396 if (unlikely(err < 0)) {
397 kfree_skb(skb);
398 BT_DBG("skbuff copy %d failed", err);
399 return ERR_PTR(err);
400 }
401
402 return skb;
403}
404
405static int conn_send(struct l2cap_conn *conn,
406 void *msg, size_t len, u32 priority,
407 struct net_device *dev)
408{
409 struct sk_buff *skb;
410
411 skb = create_pdu(conn, msg, len, priority, dev);
412 if (IS_ERR(skb))
413 return -EINVAL;
414
415 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
416 skb->priority);
417
418 hci_send_acl(conn->hchan, skb, ACL_START);
419
420 return 0;
421}
422
423static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
424 bdaddr_t *addr, u8 *addr_type)
425{
426 u8 *eui64;
427
428 eui64 = ip6_daddr->s6_addr + 8;
429
430 addr->b[0] = eui64[7];
431 addr->b[1] = eui64[6];
432 addr->b[2] = eui64[5];
433 addr->b[3] = eui64[2];
434 addr->b[4] = eui64[1];
435 addr->b[5] = eui64[0];
436
437 addr->b[5] ^= 2;
438
439 /* Set universal/local bit to 0 */
440 if (addr->b[5] & 1) {
441 addr->b[5] &= ~1;
e825eb1d 442 *addr_type = ADDR_LE_DEV_PUBLIC;
18722c24 443 } else {
e825eb1d 444 *addr_type = ADDR_LE_DEV_RANDOM;
18722c24
JR
445 }
446}
447
448static int header_create(struct sk_buff *skb, struct net_device *netdev,
449 unsigned short type, const void *_daddr,
450 const void *_saddr, unsigned int len)
451{
452 struct ipv6hdr *hdr;
453 struct lowpan_dev *dev;
454 struct lowpan_peer *peer;
455 bdaddr_t addr, *any = BDADDR_ANY;
456 u8 *saddr, *daddr = any->b;
457 u8 addr_type;
458
459 if (type != ETH_P_IPV6)
460 return -EINVAL;
461
462 hdr = ipv6_hdr(skb);
463
464 dev = lowpan_dev(netdev);
465
466 if (ipv6_addr_is_multicast(&hdr->daddr)) {
467 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
468 sizeof(struct in6_addr));
469 lowpan_cb(skb)->conn = NULL;
470 } else {
471 unsigned long flags;
472
473 /* Get destination BT device from skb.
474 * If there is no such peer then discard the packet.
475 */
476 get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
477
478 BT_DBG("dest addr %pMR type %d", &addr, addr_type);
479
480 read_lock_irqsave(&devices_lock, flags);
481 peer = peer_lookup_ba(dev, &addr, addr_type);
482 read_unlock_irqrestore(&devices_lock, flags);
483
484 if (!peer) {
485 BT_DBG("no such peer %pMR found", &addr);
486 return -ENOENT;
487 }
488
489 daddr = peer->eui64_addr;
490
491 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
492 sizeof(struct in6_addr));
493 lowpan_cb(skb)->conn = peer->conn;
494 }
495
496 saddr = dev->netdev->dev_addr;
497
498 return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
499}
500
501/* Packet to BT LE device */
502static int send_pkt(struct l2cap_conn *conn, const void *saddr,
503 const void *daddr, struct sk_buff *skb,
504 struct net_device *netdev)
505{
506 raw_dump_table(__func__, "raw skb data dump before fragmentation",
507 skb->data, skb->len);
508
509 return conn_send(conn, skb->data, skb->len, 0, netdev);
510}
511
512static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
513{
514 struct sk_buff *local_skb;
515 struct lowpan_dev *entry, *tmp;
516 unsigned long flags;
517
518 read_lock_irqsave(&devices_lock, flags);
519
520 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
521 struct lowpan_peer *pentry, *ptmp;
522 struct lowpan_dev *dev;
523
524 if (entry->netdev != netdev)
525 continue;
526
527 dev = lowpan_dev(entry->netdev);
528
529 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
530 local_skb = skb_clone(skb, GFP_ATOMIC);
531
532 send_pkt(pentry->conn, netdev->dev_addr,
533 pentry->eui64_addr, local_skb, netdev);
534
535 kfree_skb(local_skb);
536 }
537 }
538
539 read_unlock_irqrestore(&devices_lock, flags);
540}
541
542static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
543{
544 int err = 0;
545 unsigned char *eui64_addr;
546 struct lowpan_dev *dev;
547 struct lowpan_peer *peer;
548 bdaddr_t addr;
549 u8 addr_type;
550
551 if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
552 /* We need to send the packet to every device
553 * behind this interface.
554 */
555 send_mcast_pkt(skb, netdev);
556 } else {
557 unsigned long flags;
558
559 get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
560 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
561 dev = lowpan_dev(netdev);
562
563 read_lock_irqsave(&devices_lock, flags);
564 peer = peer_lookup_ba(dev, &addr, addr_type);
565 read_unlock_irqrestore(&devices_lock, flags);
566
567 BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
568 &addr, &lowpan_cb(skb)->addr, peer);
569
570 if (peer && peer->conn)
571 err = send_pkt(peer->conn, netdev->dev_addr,
572 eui64_addr, skb, netdev);
573 }
574 dev_kfree_skb(skb);
575
576 if (err)
577 BT_DBG("ERROR: xmit failed (%d)", err);
578
579 return (err < 0) ? NET_XMIT_DROP : err;
580}
581
582static const struct net_device_ops netdev_ops = {
583 .ndo_start_xmit = bt_xmit,
584};
585
586static struct header_ops header_ops = {
587 .create = header_create,
588};
589
590static void netdev_setup(struct net_device *dev)
591{
592 dev->addr_len = EUI64_ADDR_LEN;
593 dev->type = ARPHRD_6LOWPAN;
594
595 dev->hard_header_len = 0;
596 dev->needed_tailroom = 0;
597 dev->mtu = IPV6_MIN_MTU;
598 dev->tx_queue_len = 0;
599 dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
600 dev->watchdog_timeo = 0;
601
602 dev->netdev_ops = &netdev_ops;
603 dev->header_ops = &header_ops;
604 dev->destructor = free_netdev;
605}
606
607static struct device_type bt_type = {
608 .name = "bluetooth",
609};
610
611static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
612{
613 /* addr is the BT address in little-endian format */
614 eui[0] = addr[5];
615 eui[1] = addr[4];
616 eui[2] = addr[3];
617 eui[3] = 0xFF;
618 eui[4] = 0xFE;
619 eui[5] = addr[2];
620 eui[6] = addr[1];
621 eui[7] = addr[0];
622
623 eui[0] ^= 2;
624
625 /* Universal/local bit set, RFC 4291 */
b071a620 626 if (addr_type == ADDR_LE_DEV_PUBLIC)
18722c24
JR
627 eui[0] |= 1;
628 else
629 eui[0] &= ~1;
630}
631
632static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
633 u8 addr_type)
634{
635 netdev->addr_assign_type = NET_ADDR_PERM;
636 set_addr(netdev->dev_addr, addr->b, addr_type);
637 netdev->dev_addr[0] ^= 2;
638}
639
640static void ifup(struct net_device *netdev)
641{
642 int err;
643
644 rtnl_lock();
645 err = dev_open(netdev);
646 if (err < 0)
647 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
648 rtnl_unlock();
649}
650
651static void do_notify_peers(struct work_struct *work)
652{
653 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
654 notify_peers.work);
655
656 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
657}
658
659static bool is_bt_6lowpan(struct hci_conn *hcon)
660{
661 if (hcon->type != LE_LINK)
662 return false;
663
664 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
665}
666
667static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
668{
669 struct lowpan_peer *peer;
670 unsigned long flags;
671
672 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
673 if (!peer)
674 return -ENOMEM;
675
676 peer->conn = conn;
677 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
678
679 /* RFC 2464 ch. 5 */
680 peer->peer_addr.s6_addr[0] = 0xFE;
681 peer->peer_addr.s6_addr[1] = 0x80;
682 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
683 conn->hcon->dst_type);
684
685 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
686 EUI64_ADDR_LEN);
687 peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
688 * is done according RFC2464
689 */
690
691 raw_dump_inline(__func__, "peer IPv6 address",
692 (unsigned char *)&peer->peer_addr, 16);
693 raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
694
695 write_lock_irqsave(&devices_lock, flags);
696 INIT_LIST_HEAD(&peer->list);
697 peer_add(dev, peer);
698 write_unlock_irqrestore(&devices_lock, flags);
699
700 /* Notifying peers about us needs to be done without locks held */
701 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
702 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
703
704 return 0;
705}
706
707/* This gets called when BT LE 6LoWPAN device is connected. We then
708 * create network device that acts as a proxy between BT LE device
709 * and kernel network stack.
710 */
711int bt_6lowpan_add_conn(struct l2cap_conn *conn)
712{
713 struct lowpan_peer *peer = NULL;
714 struct lowpan_dev *dev;
715 struct net_device *netdev;
716 int err = 0;
717 unsigned long flags;
718
719 if (!is_bt_6lowpan(conn->hcon))
720 return 0;
721
722 peer = lookup_peer(conn);
723 if (peer)
724 return -EEXIST;
725
726 dev = lookup_dev(conn);
727 if (dev)
728 return add_peer_conn(conn, dev);
729
730 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
731 if (!netdev)
732 return -ENOMEM;
733
734 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
735
736 netdev->netdev_ops = &netdev_ops;
737 SET_NETDEV_DEV(netdev, &conn->hcon->dev);
738 SET_NETDEV_DEVTYPE(netdev, &bt_type);
739
740 err = register_netdev(netdev);
741 if (err < 0) {
742 BT_INFO("register_netdev failed %d", err);
743 free_netdev(netdev);
744 goto out;
745 }
746
747 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
748 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
749 set_bit(__LINK_STATE_PRESENT, &netdev->state);
750
751 dev = netdev_priv(netdev);
752 dev->netdev = netdev;
753 dev->hdev = conn->hcon->hdev;
754 INIT_LIST_HEAD(&dev->peers);
755
756 write_lock_irqsave(&devices_lock, flags);
757 INIT_LIST_HEAD(&dev->list);
758 list_add(&dev->list, &bt_6lowpan_devices);
759 write_unlock_irqrestore(&devices_lock, flags);
760
761 ifup(netdev);
762
763 return add_peer_conn(conn, dev);
764
765out:
766 return err;
767}
768
769static void delete_netdev(struct work_struct *work)
770{
771 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
772 delete_netdev);
773
774 unregister_netdev(entry->netdev);
775
776 /* The entry pointer is deleted in device_event() */
777}
778
779int bt_6lowpan_del_conn(struct l2cap_conn *conn)
780{
781 struct lowpan_dev *entry, *tmp;
782 struct lowpan_dev *dev = NULL;
783 struct lowpan_peer *peer;
784 int err = -ENOENT;
785 unsigned long flags;
786 bool last = false;
787
8cef8f50 788 if (!conn || !is_bt_6lowpan(conn->hcon))
18722c24
JR
789 return 0;
790
791 write_lock_irqsave(&devices_lock, flags);
792
793 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
794 dev = lowpan_dev(entry->netdev);
795 peer = peer_lookup_conn(dev, conn);
796 if (peer) {
797 last = peer_del(dev, peer);
798 err = 0;
799 break;
800 }
801 }
802
803 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
804 write_unlock_irqrestore(&devices_lock, flags);
805
806 cancel_delayed_work_sync(&dev->notify_peers);
807
808 /* bt_6lowpan_del_conn() is called with hci dev lock held which
809 * means that we must delete the netdevice in worker thread.
810 */
811 INIT_WORK(&entry->delete_netdev, delete_netdev);
812 schedule_work(&entry->delete_netdev);
813 } else {
814 write_unlock_irqrestore(&devices_lock, flags);
815 }
816
817 return err;
818}
819
820static int device_event(struct notifier_block *unused,
821 unsigned long event, void *ptr)
822{
823 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
824 struct lowpan_dev *entry, *tmp;
825 unsigned long flags;
826
827 if (netdev->type != ARPHRD_6LOWPAN)
828 return NOTIFY_DONE;
829
830 switch (event) {
831 case NETDEV_UNREGISTER:
832 write_lock_irqsave(&devices_lock, flags);
833 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
834 list) {
835 if (entry->netdev == netdev) {
836 list_del(&entry->list);
837 kfree(entry);
838 break;
839 }
840 }
841 write_unlock_irqrestore(&devices_lock, flags);
842 break;
843 }
844
845 return NOTIFY_DONE;
846}
847
848static struct notifier_block bt_6lowpan_dev_notifier = {
849 .notifier_call = device_event,
850};
851
852int bt_6lowpan_init(void)
853{
854 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
855}
856
857void bt_6lowpan_cleanup(void)
858{
859 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
860}
This page took 0.065081 seconds and 5 git commands to generate.