Merge tag 'urgent-slab-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / net / bluetooth / 6lowpan.c
1 /*
2 Copyright (c) 2013 Intel Corp.
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License version 2 and
6 only version 2 as published by the Free Software Foundation.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12 */
13
14 #include <linux/if_arp.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17
18 #include <net/ipv6.h>
19 #include <net/ip6_route.h>
20 #include <net/addrconf.h>
21
22 #include <net/af_ieee802154.h> /* to get the address type */
23
24 #include <net/bluetooth/bluetooth.h>
25 #include <net/bluetooth/hci_core.h>
26 #include <net/bluetooth/l2cap.h>
27
28 #include "6lowpan.h"
29
30 #include <net/6lowpan.h> /* for the compression support */
31
32 #define IFACE_NAME_TEMPLATE "bt%d"
33 #define EUI64_ADDR_LEN 8
34
35 struct skb_cb {
36 struct in6_addr addr;
37 struct l2cap_conn *conn;
38 };
39 #define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
40
41 /* The devices list contains those devices that we are acting
42 * as a proxy. The BT 6LoWPAN device is a virtual device that
43 * connects to the Bluetooth LE device. The real connection to
44 * BT device is done via l2cap layer. There exists one
45 * virtual device / one BT 6LoWPAN network (=hciX device).
46 * The list contains struct lowpan_dev elements.
47 */
48 static LIST_HEAD(bt_6lowpan_devices);
49 static DEFINE_RWLOCK(devices_lock);
50
51 struct lowpan_peer {
52 struct list_head list;
53 struct l2cap_conn *conn;
54
55 /* peer addresses in various formats */
56 unsigned char eui64_addr[EUI64_ADDR_LEN];
57 struct in6_addr peer_addr;
58 };
59
60 struct lowpan_dev {
61 struct list_head list;
62
63 struct hci_dev *hdev;
64 struct net_device *netdev;
65 struct list_head peers;
66 atomic_t peer_count; /* number of items in peers list */
67
68 struct work_struct delete_netdev;
69 struct delayed_work notify_peers;
70 };
71
72 static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
73 {
74 return netdev_priv(netdev);
75 }
76
77 static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
78 {
79 list_add(&peer->list, &dev->peers);
80 atomic_inc(&dev->peer_count);
81 }
82
83 static inline bool peer_del(struct lowpan_dev *dev, struct lowpan_peer *peer)
84 {
85 list_del(&peer->list);
86
87 if (atomic_dec_and_test(&dev->peer_count)) {
88 BT_DBG("last peer");
89 return true;
90 }
91
92 return false;
93 }
94
95 static inline struct lowpan_peer *peer_lookup_ba(struct lowpan_dev *dev,
96 bdaddr_t *ba, __u8 type)
97 {
98 struct lowpan_peer *peer, *tmp;
99
100 BT_DBG("peers %d addr %pMR type %d", atomic_read(&dev->peer_count),
101 ba, type);
102
103 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
104 BT_DBG("addr %pMR type %d",
105 &peer->conn->hcon->dst, peer->conn->hcon->dst_type);
106
107 if (bacmp(&peer->conn->hcon->dst, ba))
108 continue;
109
110 if (type == peer->conn->hcon->dst_type)
111 return peer;
112 }
113
114 return NULL;
115 }
116
117 static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
118 struct l2cap_conn *conn)
119 {
120 struct lowpan_peer *peer, *tmp;
121
122 list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
123 if (peer->conn == conn)
124 return peer;
125 }
126
127 return NULL;
128 }
129
130 static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
131 {
132 struct lowpan_dev *entry, *tmp;
133 struct lowpan_peer *peer = NULL;
134 unsigned long flags;
135
136 read_lock_irqsave(&devices_lock, flags);
137
138 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
139 peer = peer_lookup_conn(entry, conn);
140 if (peer)
141 break;
142 }
143
144 read_unlock_irqrestore(&devices_lock, flags);
145
146 return peer;
147 }
148
149 static struct lowpan_dev *lookup_dev(struct l2cap_conn *conn)
150 {
151 struct lowpan_dev *entry, *tmp;
152 struct lowpan_dev *dev = NULL;
153 unsigned long flags;
154
155 read_lock_irqsave(&devices_lock, flags);
156
157 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
158 if (conn->hcon->hdev == entry->hdev) {
159 dev = entry;
160 break;
161 }
162 }
163
164 read_unlock_irqrestore(&devices_lock, flags);
165
166 return dev;
167 }
168
169 static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
170 {
171 struct sk_buff *skb_cp;
172 int ret;
173
174 skb_cp = skb_copy(skb, GFP_ATOMIC);
175 if (!skb_cp)
176 return -ENOMEM;
177
178 ret = netif_rx(skb_cp);
179
180 BT_DBG("receive skb %d", ret);
181 if (ret < 0)
182 return NET_RX_DROP;
183
184 return ret;
185 }
186
187 static int process_data(struct sk_buff *skb, struct net_device *netdev,
188 struct l2cap_conn *conn)
189 {
190 const u8 *saddr, *daddr;
191 u8 iphc0, iphc1;
192 struct lowpan_dev *dev;
193 struct lowpan_peer *peer;
194 unsigned long flags;
195
196 dev = lowpan_dev(netdev);
197
198 read_lock_irqsave(&devices_lock, flags);
199 peer = peer_lookup_conn(dev, conn);
200 read_unlock_irqrestore(&devices_lock, flags);
201 if (!peer)
202 goto drop;
203
204 saddr = peer->eui64_addr;
205 daddr = dev->netdev->dev_addr;
206
207 /* at least two bytes will be used for the encoding */
208 if (skb->len < 2)
209 goto drop;
210
211 if (lowpan_fetch_skb_u8(skb, &iphc0))
212 goto drop;
213
214 if (lowpan_fetch_skb_u8(skb, &iphc1))
215 goto drop;
216
217 return lowpan_process_data(skb, netdev,
218 saddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
219 daddr, IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
220 iphc0, iphc1, give_skb_to_upper);
221
222 drop:
223 kfree_skb(skb);
224 return -EINVAL;
225 }
226
227 static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
228 struct l2cap_conn *conn)
229 {
230 struct sk_buff *local_skb;
231 int ret;
232
233 if (!netif_running(dev))
234 goto drop;
235
236 if (dev->type != ARPHRD_6LOWPAN)
237 goto drop;
238
239 /* check that it's our buffer */
240 if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
241 /* Copy the packet so that the IPv6 header is
242 * properly aligned.
243 */
244 local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
245 skb_tailroom(skb), GFP_ATOMIC);
246 if (!local_skb)
247 goto drop;
248
249 local_skb->protocol = htons(ETH_P_IPV6);
250 local_skb->pkt_type = PACKET_HOST;
251
252 skb_reset_network_header(local_skb);
253 skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
254
255 if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
256 kfree_skb(local_skb);
257 goto drop;
258 }
259
260 dev->stats.rx_bytes += skb->len;
261 dev->stats.rx_packets++;
262
263 kfree_skb(local_skb);
264 kfree_skb(skb);
265 } else {
266 switch (skb->data[0] & 0xe0) {
267 case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
268 local_skb = skb_clone(skb, GFP_ATOMIC);
269 if (!local_skb)
270 goto drop;
271
272 ret = process_data(local_skb, dev, conn);
273 if (ret != NET_RX_SUCCESS)
274 goto drop;
275
276 dev->stats.rx_bytes += skb->len;
277 dev->stats.rx_packets++;
278
279 kfree_skb(skb);
280 break;
281 default:
282 break;
283 }
284 }
285
286 return NET_RX_SUCCESS;
287
288 drop:
289 kfree_skb(skb);
290 return NET_RX_DROP;
291 }
292
293 /* Packet from BT LE device */
294 int bt_6lowpan_recv(struct l2cap_conn *conn, struct sk_buff *skb)
295 {
296 struct lowpan_dev *dev;
297 struct lowpan_peer *peer;
298 int err;
299
300 peer = lookup_peer(conn);
301 if (!peer)
302 return -ENOENT;
303
304 dev = lookup_dev(conn);
305 if (!dev || !dev->netdev)
306 return -ENOENT;
307
308 err = recv_pkt(skb, dev->netdev, conn);
309 BT_DBG("recv pkt %d", err);
310
311 return err;
312 }
313
314 static inline int skbuff_copy(void *msg, int len, int count, int mtu,
315 struct sk_buff *skb, struct net_device *dev)
316 {
317 struct sk_buff **frag;
318 int sent = 0;
319
320 memcpy(skb_put(skb, count), msg, count);
321
322 sent += count;
323 msg += count;
324 len -= count;
325
326 dev->stats.tx_bytes += count;
327 dev->stats.tx_packets++;
328
329 raw_dump_table(__func__, "Sending", skb->data, skb->len);
330
331 /* Continuation fragments (no L2CAP header) */
332 frag = &skb_shinfo(skb)->frag_list;
333 while (len > 0) {
334 struct sk_buff *tmp;
335
336 count = min_t(unsigned int, mtu, len);
337
338 tmp = bt_skb_alloc(count, GFP_ATOMIC);
339 if (!tmp)
340 return -ENOMEM;
341
342 *frag = tmp;
343
344 memcpy(skb_put(*frag, count), msg, count);
345
346 raw_dump_table(__func__, "Sending fragment",
347 (*frag)->data, count);
348
349 (*frag)->priority = skb->priority;
350
351 sent += count;
352 msg += count;
353 len -= count;
354
355 skb->len += (*frag)->len;
356 skb->data_len += (*frag)->len;
357
358 frag = &(*frag)->next;
359
360 dev->stats.tx_bytes += count;
361 dev->stats.tx_packets++;
362 }
363
364 return sent;
365 }
366
367 static struct sk_buff *create_pdu(struct l2cap_conn *conn, void *msg,
368 size_t len, u32 priority,
369 struct net_device *dev)
370 {
371 struct sk_buff *skb;
372 int err, count;
373 struct l2cap_hdr *lh;
374
375 /* FIXME: This mtu check should be not needed and atm is only used for
376 * testing purposes
377 */
378 if (conn->mtu > (L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE))
379 conn->mtu = L2CAP_LE_MIN_MTU + L2CAP_HDR_SIZE;
380
381 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
382
383 BT_DBG("conn %p len %zu mtu %d count %d", conn, len, conn->mtu, count);
384
385 skb = bt_skb_alloc(count + L2CAP_HDR_SIZE, GFP_ATOMIC);
386 if (!skb)
387 return ERR_PTR(-ENOMEM);
388
389 skb->priority = priority;
390
391 lh = (struct l2cap_hdr *)skb_put(skb, L2CAP_HDR_SIZE);
392 lh->cid = cpu_to_le16(L2CAP_FC_6LOWPAN);
393 lh->len = cpu_to_le16(len);
394
395 err = skbuff_copy(msg, len, count, conn->mtu, skb, dev);
396 if (unlikely(err < 0)) {
397 kfree_skb(skb);
398 BT_DBG("skbuff copy %d failed", err);
399 return ERR_PTR(err);
400 }
401
402 return skb;
403 }
404
405 static int conn_send(struct l2cap_conn *conn,
406 void *msg, size_t len, u32 priority,
407 struct net_device *dev)
408 {
409 struct sk_buff *skb;
410
411 skb = create_pdu(conn, msg, len, priority, dev);
412 if (IS_ERR(skb))
413 return -EINVAL;
414
415 BT_DBG("conn %p skb %p len %d priority %u", conn, skb, skb->len,
416 skb->priority);
417
418 hci_send_acl(conn->hchan, skb, ACL_START);
419
420 return 0;
421 }
422
423 static u8 get_addr_type_from_eui64(u8 byte)
424 {
425 /* Is universal(0) or local(1) bit, */
426 if (byte & 0x02)
427 return ADDR_LE_DEV_RANDOM;
428
429 return ADDR_LE_DEV_PUBLIC;
430 }
431
432 static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
433 {
434 u8 *eui64 = ip6_daddr->s6_addr + 8;
435
436 addr->b[0] = eui64[7];
437 addr->b[1] = eui64[6];
438 addr->b[2] = eui64[5];
439 addr->b[3] = eui64[2];
440 addr->b[4] = eui64[1];
441 addr->b[5] = eui64[0];
442 }
443
444 static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
445 bdaddr_t *addr, u8 *addr_type)
446 {
447 copy_to_bdaddr(ip6_daddr, addr);
448
449 /* We need to toggle the U/L bit that we got from IPv6 address
450 * so that we get the proper address and type of the BD address.
451 */
452 addr->b[5] ^= 0x02;
453
454 *addr_type = get_addr_type_from_eui64(addr->b[5]);
455 }
456
457 static int header_create(struct sk_buff *skb, struct net_device *netdev,
458 unsigned short type, const void *_daddr,
459 const void *_saddr, unsigned int len)
460 {
461 struct ipv6hdr *hdr;
462 struct lowpan_dev *dev;
463 struct lowpan_peer *peer;
464 bdaddr_t addr, *any = BDADDR_ANY;
465 u8 *saddr, *daddr = any->b;
466 u8 addr_type;
467
468 if (type != ETH_P_IPV6)
469 return -EINVAL;
470
471 hdr = ipv6_hdr(skb);
472
473 dev = lowpan_dev(netdev);
474
475 if (ipv6_addr_is_multicast(&hdr->daddr)) {
476 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
477 sizeof(struct in6_addr));
478 lowpan_cb(skb)->conn = NULL;
479 } else {
480 unsigned long flags;
481
482 /* Get destination BT device from skb.
483 * If there is no such peer then discard the packet.
484 */
485 convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
486
487 BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
488 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
489 &hdr->daddr);
490
491 read_lock_irqsave(&devices_lock, flags);
492 peer = peer_lookup_ba(dev, &addr, addr_type);
493 read_unlock_irqrestore(&devices_lock, flags);
494
495 if (!peer) {
496 BT_DBG("no such peer %pMR found", &addr);
497 return -ENOENT;
498 }
499
500 daddr = peer->eui64_addr;
501
502 memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
503 sizeof(struct in6_addr));
504 lowpan_cb(skb)->conn = peer->conn;
505 }
506
507 saddr = dev->netdev->dev_addr;
508
509 return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
510 }
511
512 /* Packet to BT LE device */
513 static int send_pkt(struct l2cap_conn *conn, const void *saddr,
514 const void *daddr, struct sk_buff *skb,
515 struct net_device *netdev)
516 {
517 raw_dump_table(__func__, "raw skb data dump before fragmentation",
518 skb->data, skb->len);
519
520 return conn_send(conn, skb->data, skb->len, 0, netdev);
521 }
522
523 static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
524 {
525 struct sk_buff *local_skb;
526 struct lowpan_dev *entry, *tmp;
527 unsigned long flags;
528
529 read_lock_irqsave(&devices_lock, flags);
530
531 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
532 struct lowpan_peer *pentry, *ptmp;
533 struct lowpan_dev *dev;
534
535 if (entry->netdev != netdev)
536 continue;
537
538 dev = lowpan_dev(entry->netdev);
539
540 list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
541 local_skb = skb_clone(skb, GFP_ATOMIC);
542
543 send_pkt(pentry->conn, netdev->dev_addr,
544 pentry->eui64_addr, local_skb, netdev);
545
546 kfree_skb(local_skb);
547 }
548 }
549
550 read_unlock_irqrestore(&devices_lock, flags);
551 }
552
553 static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
554 {
555 int err = 0;
556 unsigned char *eui64_addr;
557 struct lowpan_dev *dev;
558 struct lowpan_peer *peer;
559 bdaddr_t addr;
560 u8 addr_type;
561
562 if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
563 /* We need to send the packet to every device
564 * behind this interface.
565 */
566 send_mcast_pkt(skb, netdev);
567 } else {
568 unsigned long flags;
569
570 convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
571 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
572 dev = lowpan_dev(netdev);
573
574 read_lock_irqsave(&devices_lock, flags);
575 peer = peer_lookup_ba(dev, &addr, addr_type);
576 read_unlock_irqrestore(&devices_lock, flags);
577
578 BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
579 netdev->name, &addr,
580 addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
581 &lowpan_cb(skb)->addr, peer);
582
583 if (peer && peer->conn)
584 err = send_pkt(peer->conn, netdev->dev_addr,
585 eui64_addr, skb, netdev);
586 }
587 dev_kfree_skb(skb);
588
589 if (err)
590 BT_DBG("ERROR: xmit failed (%d)", err);
591
592 return (err < 0) ? NET_XMIT_DROP : err;
593 }
594
595 static const struct net_device_ops netdev_ops = {
596 .ndo_start_xmit = bt_xmit,
597 };
598
599 static struct header_ops header_ops = {
600 .create = header_create,
601 };
602
603 static void netdev_setup(struct net_device *dev)
604 {
605 dev->addr_len = EUI64_ADDR_LEN;
606 dev->type = ARPHRD_6LOWPAN;
607
608 dev->hard_header_len = 0;
609 dev->needed_tailroom = 0;
610 dev->mtu = IPV6_MIN_MTU;
611 dev->tx_queue_len = 0;
612 dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
613 dev->watchdog_timeo = 0;
614
615 dev->netdev_ops = &netdev_ops;
616 dev->header_ops = &header_ops;
617 dev->destructor = free_netdev;
618 }
619
620 static struct device_type bt_type = {
621 .name = "bluetooth",
622 };
623
624 static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
625 {
626 /* addr is the BT address in little-endian format */
627 eui[0] = addr[5];
628 eui[1] = addr[4];
629 eui[2] = addr[3];
630 eui[3] = 0xFF;
631 eui[4] = 0xFE;
632 eui[5] = addr[2];
633 eui[6] = addr[1];
634 eui[7] = addr[0];
635
636 /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
637 if (addr_type == ADDR_LE_DEV_PUBLIC)
638 eui[0] &= ~0x02;
639 else
640 eui[0] |= 0x02;
641
642 BT_DBG("type %d addr %*phC", addr_type, 8, eui);
643 }
644
645 static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
646 u8 addr_type)
647 {
648 netdev->addr_assign_type = NET_ADDR_PERM;
649 set_addr(netdev->dev_addr, addr->b, addr_type);
650 }
651
652 static void ifup(struct net_device *netdev)
653 {
654 int err;
655
656 rtnl_lock();
657 err = dev_open(netdev);
658 if (err < 0)
659 BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
660 rtnl_unlock();
661 }
662
663 static void do_notify_peers(struct work_struct *work)
664 {
665 struct lowpan_dev *dev = container_of(work, struct lowpan_dev,
666 notify_peers.work);
667
668 netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
669 }
670
671 static bool is_bt_6lowpan(struct hci_conn *hcon)
672 {
673 if (hcon->type != LE_LINK)
674 return false;
675
676 return test_bit(HCI_CONN_6LOWPAN, &hcon->flags);
677 }
678
679 static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
680 {
681 struct lowpan_peer *peer;
682 unsigned long flags;
683
684 peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
685 if (!peer)
686 return -ENOMEM;
687
688 peer->conn = conn;
689 memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
690
691 /* RFC 2464 ch. 5 */
692 peer->peer_addr.s6_addr[0] = 0xFE;
693 peer->peer_addr.s6_addr[1] = 0x80;
694 set_addr((u8 *)&peer->peer_addr.s6_addr + 8, conn->hcon->dst.b,
695 conn->hcon->dst_type);
696
697 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
698 EUI64_ADDR_LEN);
699
700 write_lock_irqsave(&devices_lock, flags);
701 INIT_LIST_HEAD(&peer->list);
702 peer_add(dev, peer);
703 write_unlock_irqrestore(&devices_lock, flags);
704
705 /* Notifying peers about us needs to be done without locks held */
706 INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
707 schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
708
709 return 0;
710 }
711
712 /* This gets called when BT LE 6LoWPAN device is connected. We then
713 * create network device that acts as a proxy between BT LE device
714 * and kernel network stack.
715 */
716 int bt_6lowpan_add_conn(struct l2cap_conn *conn)
717 {
718 struct lowpan_peer *peer = NULL;
719 struct lowpan_dev *dev;
720 struct net_device *netdev;
721 int err = 0;
722 unsigned long flags;
723
724 if (!is_bt_6lowpan(conn->hcon))
725 return 0;
726
727 peer = lookup_peer(conn);
728 if (peer)
729 return -EEXIST;
730
731 dev = lookup_dev(conn);
732 if (dev)
733 return add_peer_conn(conn, dev);
734
735 netdev = alloc_netdev(sizeof(*dev), IFACE_NAME_TEMPLATE, netdev_setup);
736 if (!netdev)
737 return -ENOMEM;
738
739 set_dev_addr(netdev, &conn->hcon->src, conn->hcon->src_type);
740
741 netdev->netdev_ops = &netdev_ops;
742 SET_NETDEV_DEV(netdev, &conn->hcon->dev);
743 SET_NETDEV_DEVTYPE(netdev, &bt_type);
744
745 err = register_netdev(netdev);
746 if (err < 0) {
747 BT_INFO("register_netdev failed %d", err);
748 free_netdev(netdev);
749 goto out;
750 }
751
752 BT_DBG("ifindex %d peer bdaddr %pMR my addr %pMR",
753 netdev->ifindex, &conn->hcon->dst, &conn->hcon->src);
754 set_bit(__LINK_STATE_PRESENT, &netdev->state);
755
756 dev = netdev_priv(netdev);
757 dev->netdev = netdev;
758 dev->hdev = conn->hcon->hdev;
759 INIT_LIST_HEAD(&dev->peers);
760
761 write_lock_irqsave(&devices_lock, flags);
762 INIT_LIST_HEAD(&dev->list);
763 list_add(&dev->list, &bt_6lowpan_devices);
764 write_unlock_irqrestore(&devices_lock, flags);
765
766 ifup(netdev);
767
768 return add_peer_conn(conn, dev);
769
770 out:
771 return err;
772 }
773
774 static void delete_netdev(struct work_struct *work)
775 {
776 struct lowpan_dev *entry = container_of(work, struct lowpan_dev,
777 delete_netdev);
778
779 unregister_netdev(entry->netdev);
780
781 /* The entry pointer is deleted in device_event() */
782 }
783
784 int bt_6lowpan_del_conn(struct l2cap_conn *conn)
785 {
786 struct lowpan_dev *entry, *tmp;
787 struct lowpan_dev *dev = NULL;
788 struct lowpan_peer *peer;
789 int err = -ENOENT;
790 unsigned long flags;
791 bool last = false;
792
793 if (!conn || !is_bt_6lowpan(conn->hcon))
794 return 0;
795
796 write_lock_irqsave(&devices_lock, flags);
797
798 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices, list) {
799 dev = lowpan_dev(entry->netdev);
800 peer = peer_lookup_conn(dev, conn);
801 if (peer) {
802 last = peer_del(dev, peer);
803 err = 0;
804 break;
805 }
806 }
807
808 if (!err && last && dev && !atomic_read(&dev->peer_count)) {
809 write_unlock_irqrestore(&devices_lock, flags);
810
811 cancel_delayed_work_sync(&dev->notify_peers);
812
813 /* bt_6lowpan_del_conn() is called with hci dev lock held which
814 * means that we must delete the netdevice in worker thread.
815 */
816 INIT_WORK(&entry->delete_netdev, delete_netdev);
817 schedule_work(&entry->delete_netdev);
818 } else {
819 write_unlock_irqrestore(&devices_lock, flags);
820 }
821
822 return err;
823 }
824
825 static int device_event(struct notifier_block *unused,
826 unsigned long event, void *ptr)
827 {
828 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
829 struct lowpan_dev *entry, *tmp;
830 unsigned long flags;
831
832 if (netdev->type != ARPHRD_6LOWPAN)
833 return NOTIFY_DONE;
834
835 switch (event) {
836 case NETDEV_UNREGISTER:
837 write_lock_irqsave(&devices_lock, flags);
838 list_for_each_entry_safe(entry, tmp, &bt_6lowpan_devices,
839 list) {
840 if (entry->netdev == netdev) {
841 list_del(&entry->list);
842 kfree(entry);
843 break;
844 }
845 }
846 write_unlock_irqrestore(&devices_lock, flags);
847 break;
848 }
849
850 return NOTIFY_DONE;
851 }
852
853 static struct notifier_block bt_6lowpan_dev_notifier = {
854 .notifier_call = device_event,
855 };
856
857 int bt_6lowpan_init(void)
858 {
859 return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
860 }
861
862 void bt_6lowpan_cleanup(void)
863 {
864 unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
865 }
This page took 0.048761 seconds and 5 git commands to generate.