Bluetooth: Add mgmt events for blacklisting
[deliverable/linux.git] / net / bluetooth / hci_sock.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 static int enable_mgmt;
53
54 /* ----- HCI socket interface ----- */
55
56 static inline int hci_test_bit(int nr, void *addr)
57 {
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59 }
60
61 /* Security filter */
62 static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
66 { 0x1000d9fe, 0x0000b00c },
67 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
72 /* OGF_LINK_POLICY */
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
74 /* OGF_HOST_CTL */
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
76 /* OGF_INFO_PARAM */
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
80 }
81 };
82
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
85 };
86
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb,
89 struct sock *skip_sk)
90 {
91 struct sock *sk;
92 struct hlist_node *node;
93
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
97 sk_for_each(sk, node, &hci_sk_list.head) {
98 struct hci_filter *flt;
99 struct sk_buff *nskb;
100
101 if (sk == skip_sk)
102 continue;
103
104 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
105 continue;
106
107 /* Don't send frame to the socket it came from */
108 if (skb->sk == sk)
109 continue;
110
111 if (bt_cb(skb)->channel != hci_pi(sk)->channel)
112 continue;
113
114 if (bt_cb(skb)->channel == HCI_CHANNEL_CONTROL)
115 goto clone;
116
117 /* Apply filter */
118 flt = &hci_pi(sk)->filter;
119
120 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
121 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
122 continue;
123
124 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
125 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
126
127 if (!hci_test_bit(evt, &flt->event_mask))
128 continue;
129
130 if (flt->opcode &&
131 ((evt == HCI_EV_CMD_COMPLETE &&
132 flt->opcode !=
133 get_unaligned((__le16 *)(skb->data + 3))) ||
134 (evt == HCI_EV_CMD_STATUS &&
135 flt->opcode !=
136 get_unaligned((__le16 *)(skb->data + 4)))))
137 continue;
138 }
139
140 clone:
141 nskb = skb_clone(skb, GFP_ATOMIC);
142 if (!nskb)
143 continue;
144
145 /* Put type byte before the data */
146 if (bt_cb(skb)->channel == HCI_CHANNEL_RAW)
147 memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
148
149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
152 read_unlock(&hci_sk_list.lock);
153 }
154
155 static int hci_sock_release(struct socket *sock)
156 {
157 struct sock *sk = sock->sk;
158 struct hci_dev *hdev;
159
160 BT_DBG("sock %p sk %p", sock, sk);
161
162 if (!sk)
163 return 0;
164
165 hdev = hci_pi(sk)->hdev;
166
167 bt_sock_unlink(&hci_sk_list, sk);
168
169 if (hdev) {
170 atomic_dec(&hdev->promisc);
171 hci_dev_put(hdev);
172 }
173
174 sock_orphan(sk);
175
176 skb_queue_purge(&sk->sk_receive_queue);
177 skb_queue_purge(&sk->sk_write_queue);
178
179 sock_put(sk);
180 return 0;
181 }
182
183 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
184 {
185 bdaddr_t bdaddr;
186 int err;
187
188 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
189 return -EFAULT;
190
191 hci_dev_lock_bh(hdev);
192
193 err = hci_blacklist_add(hdev, &bdaddr);
194
195 hci_dev_unlock_bh(hdev);
196
197 return err;
198 }
199
200 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
201 {
202 bdaddr_t bdaddr;
203 int err;
204
205 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
206 return -EFAULT;
207
208 hci_dev_lock_bh(hdev);
209
210 err = hci_blacklist_del(hdev, &bdaddr);
211
212 hci_dev_unlock_bh(hdev);
213
214 return err;
215 }
216
217 /* Ioctls that require bound socket */
218 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
219 {
220 struct hci_dev *hdev = hci_pi(sk)->hdev;
221
222 if (!hdev)
223 return -EBADFD;
224
225 switch (cmd) {
226 case HCISETRAW:
227 if (!capable(CAP_NET_ADMIN))
228 return -EACCES;
229
230 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
231 return -EPERM;
232
233 if (arg)
234 set_bit(HCI_RAW, &hdev->flags);
235 else
236 clear_bit(HCI_RAW, &hdev->flags);
237
238 return 0;
239
240 case HCIGETCONNINFO:
241 return hci_get_conn_info(hdev, (void __user *) arg);
242
243 case HCIGETAUTHINFO:
244 return hci_get_auth_info(hdev, (void __user *) arg);
245
246 case HCIBLOCKADDR:
247 if (!capable(CAP_NET_ADMIN))
248 return -EACCES;
249 return hci_sock_blacklist_add(hdev, (void __user *) arg);
250
251 case HCIUNBLOCKADDR:
252 if (!capable(CAP_NET_ADMIN))
253 return -EACCES;
254 return hci_sock_blacklist_del(hdev, (void __user *) arg);
255
256 default:
257 if (hdev->ioctl)
258 return hdev->ioctl(hdev, cmd, arg);
259 return -EINVAL;
260 }
261 }
262
263 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
264 {
265 struct sock *sk = sock->sk;
266 void __user *argp = (void __user *) arg;
267 int err;
268
269 BT_DBG("cmd %x arg %lx", cmd, arg);
270
271 switch (cmd) {
272 case HCIGETDEVLIST:
273 return hci_get_dev_list(argp);
274
275 case HCIGETDEVINFO:
276 return hci_get_dev_info(argp);
277
278 case HCIGETCONNLIST:
279 return hci_get_conn_list(argp);
280
281 case HCIDEVUP:
282 if (!capable(CAP_NET_ADMIN))
283 return -EACCES;
284 return hci_dev_open(arg);
285
286 case HCIDEVDOWN:
287 if (!capable(CAP_NET_ADMIN))
288 return -EACCES;
289 return hci_dev_close(arg);
290
291 case HCIDEVRESET:
292 if (!capable(CAP_NET_ADMIN))
293 return -EACCES;
294 return hci_dev_reset(arg);
295
296 case HCIDEVRESTAT:
297 if (!capable(CAP_NET_ADMIN))
298 return -EACCES;
299 return hci_dev_reset_stat(arg);
300
301 case HCISETSCAN:
302 case HCISETAUTH:
303 case HCISETENCRYPT:
304 case HCISETPTYPE:
305 case HCISETLINKPOL:
306 case HCISETLINKMODE:
307 case HCISETACLMTU:
308 case HCISETSCOMTU:
309 if (!capable(CAP_NET_ADMIN))
310 return -EACCES;
311 return hci_dev_cmd(cmd, argp);
312
313 case HCIINQUIRY:
314 return hci_inquiry(argp);
315
316 default:
317 lock_sock(sk);
318 err = hci_sock_bound_ioctl(sk, cmd, arg);
319 release_sock(sk);
320 return err;
321 }
322 }
323
324 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
325 {
326 struct sockaddr_hci haddr;
327 struct sock *sk = sock->sk;
328 struct hci_dev *hdev = NULL;
329 int len, err = 0;
330
331 BT_DBG("sock %p sk %p", sock, sk);
332
333 if (!addr)
334 return -EINVAL;
335
336 memset(&haddr, 0, sizeof(haddr));
337 len = min_t(unsigned int, sizeof(haddr), addr_len);
338 memcpy(&haddr, addr, len);
339
340 if (haddr.hci_family != AF_BLUETOOTH)
341 return -EINVAL;
342
343 if (haddr.hci_channel > HCI_CHANNEL_CONTROL)
344 return -EINVAL;
345
346 if (haddr.hci_channel == HCI_CHANNEL_CONTROL && !enable_mgmt)
347 return -EINVAL;
348
349 lock_sock(sk);
350
351 if (sk->sk_state == BT_BOUND || hci_pi(sk)->hdev) {
352 err = -EALREADY;
353 goto done;
354 }
355
356 if (haddr.hci_dev != HCI_DEV_NONE) {
357 hdev = hci_dev_get(haddr.hci_dev);
358 if (!hdev) {
359 err = -ENODEV;
360 goto done;
361 }
362
363 atomic_inc(&hdev->promisc);
364 }
365
366 hci_pi(sk)->channel = haddr.hci_channel;
367 hci_pi(sk)->hdev = hdev;
368 sk->sk_state = BT_BOUND;
369
370 done:
371 release_sock(sk);
372 return err;
373 }
374
375 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
376 {
377 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
378 struct sock *sk = sock->sk;
379 struct hci_dev *hdev = hci_pi(sk)->hdev;
380
381 BT_DBG("sock %p sk %p", sock, sk);
382
383 if (!hdev)
384 return -EBADFD;
385
386 lock_sock(sk);
387
388 *addr_len = sizeof(*haddr);
389 haddr->hci_family = AF_BLUETOOTH;
390 haddr->hci_dev = hdev->id;
391
392 release_sock(sk);
393 return 0;
394 }
395
396 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
397 {
398 __u32 mask = hci_pi(sk)->cmsg_mask;
399
400 if (mask & HCI_CMSG_DIR) {
401 int incoming = bt_cb(skb)->incoming;
402 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
403 }
404
405 if (mask & HCI_CMSG_TSTAMP) {
406 #ifdef CONFIG_COMPAT
407 struct compat_timeval ctv;
408 #endif
409 struct timeval tv;
410 void *data;
411 int len;
412
413 skb_get_timestamp(skb, &tv);
414
415 data = &tv;
416 len = sizeof(tv);
417 #ifdef CONFIG_COMPAT
418 if (msg->msg_flags & MSG_CMSG_COMPAT) {
419 ctv.tv_sec = tv.tv_sec;
420 ctv.tv_usec = tv.tv_usec;
421 data = &ctv;
422 len = sizeof(ctv);
423 }
424 #endif
425
426 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
427 }
428 }
429
430 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
431 struct msghdr *msg, size_t len, int flags)
432 {
433 int noblock = flags & MSG_DONTWAIT;
434 struct sock *sk = sock->sk;
435 struct sk_buff *skb;
436 int copied, err;
437
438 BT_DBG("sock %p, sk %p", sock, sk);
439
440 if (flags & (MSG_OOB))
441 return -EOPNOTSUPP;
442
443 if (sk->sk_state == BT_CLOSED)
444 return 0;
445
446 skb = skb_recv_datagram(sk, flags, noblock, &err);
447 if (!skb)
448 return err;
449
450 msg->msg_namelen = 0;
451
452 copied = skb->len;
453 if (len < copied) {
454 msg->msg_flags |= MSG_TRUNC;
455 copied = len;
456 }
457
458 skb_reset_transport_header(skb);
459 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
460
461 hci_sock_cmsg(sk, msg, skb);
462
463 skb_free_datagram(sk, skb);
464
465 return err ? : copied;
466 }
467
468 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
469 struct msghdr *msg, size_t len)
470 {
471 struct sock *sk = sock->sk;
472 struct hci_dev *hdev;
473 struct sk_buff *skb;
474 int err;
475
476 BT_DBG("sock %p sk %p", sock, sk);
477
478 if (msg->msg_flags & MSG_OOB)
479 return -EOPNOTSUPP;
480
481 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
482 return -EINVAL;
483
484 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
485 return -EINVAL;
486
487 lock_sock(sk);
488
489 switch (hci_pi(sk)->channel) {
490 case HCI_CHANNEL_RAW:
491 break;
492 case HCI_CHANNEL_CONTROL:
493 err = mgmt_control(sk, msg, len);
494 goto done;
495 default:
496 err = -EINVAL;
497 goto done;
498 }
499
500 hdev = hci_pi(sk)->hdev;
501 if (!hdev) {
502 err = -EBADFD;
503 goto done;
504 }
505
506 if (!test_bit(HCI_UP, &hdev->flags)) {
507 err = -ENETDOWN;
508 goto done;
509 }
510
511 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
512 if (!skb)
513 goto done;
514
515 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
516 err = -EFAULT;
517 goto drop;
518 }
519
520 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
521 skb_pull(skb, 1);
522 skb->dev = (void *) hdev;
523
524 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
525 u16 opcode = get_unaligned_le16(skb->data);
526 u16 ogf = hci_opcode_ogf(opcode);
527 u16 ocf = hci_opcode_ocf(opcode);
528
529 if (((ogf > HCI_SFLT_MAX_OGF) ||
530 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
531 !capable(CAP_NET_RAW)) {
532 err = -EPERM;
533 goto drop;
534 }
535
536 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
537 skb_queue_tail(&hdev->raw_q, skb);
538 tasklet_schedule(&hdev->tx_task);
539 } else {
540 skb_queue_tail(&hdev->cmd_q, skb);
541 tasklet_schedule(&hdev->cmd_task);
542 }
543 } else {
544 if (!capable(CAP_NET_RAW)) {
545 err = -EPERM;
546 goto drop;
547 }
548
549 skb_queue_tail(&hdev->raw_q, skb);
550 tasklet_schedule(&hdev->tx_task);
551 }
552
553 err = len;
554
555 done:
556 release_sock(sk);
557 return err;
558
559 drop:
560 kfree_skb(skb);
561 goto done;
562 }
563
564 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
565 {
566 struct hci_ufilter uf = { .opcode = 0 };
567 struct sock *sk = sock->sk;
568 int err = 0, opt = 0;
569
570 BT_DBG("sk %p, opt %d", sk, optname);
571
572 lock_sock(sk);
573
574 switch (optname) {
575 case HCI_DATA_DIR:
576 if (get_user(opt, (int __user *)optval)) {
577 err = -EFAULT;
578 break;
579 }
580
581 if (opt)
582 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
583 else
584 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
585 break;
586
587 case HCI_TIME_STAMP:
588 if (get_user(opt, (int __user *)optval)) {
589 err = -EFAULT;
590 break;
591 }
592
593 if (opt)
594 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
595 else
596 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
597 break;
598
599 case HCI_FILTER:
600 {
601 struct hci_filter *f = &hci_pi(sk)->filter;
602
603 uf.type_mask = f->type_mask;
604 uf.opcode = f->opcode;
605 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
606 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
607 }
608
609 len = min_t(unsigned int, len, sizeof(uf));
610 if (copy_from_user(&uf, optval, len)) {
611 err = -EFAULT;
612 break;
613 }
614
615 if (!capable(CAP_NET_RAW)) {
616 uf.type_mask &= hci_sec_filter.type_mask;
617 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
618 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
619 }
620
621 {
622 struct hci_filter *f = &hci_pi(sk)->filter;
623
624 f->type_mask = uf.type_mask;
625 f->opcode = uf.opcode;
626 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
627 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
628 }
629 break;
630
631 default:
632 err = -ENOPROTOOPT;
633 break;
634 }
635
636 release_sock(sk);
637 return err;
638 }
639
640 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
641 {
642 struct hci_ufilter uf;
643 struct sock *sk = sock->sk;
644 int len, opt;
645
646 if (get_user(len, optlen))
647 return -EFAULT;
648
649 switch (optname) {
650 case HCI_DATA_DIR:
651 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
652 opt = 1;
653 else
654 opt = 0;
655
656 if (put_user(opt, optval))
657 return -EFAULT;
658 break;
659
660 case HCI_TIME_STAMP:
661 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
662 opt = 1;
663 else
664 opt = 0;
665
666 if (put_user(opt, optval))
667 return -EFAULT;
668 break;
669
670 case HCI_FILTER:
671 {
672 struct hci_filter *f = &hci_pi(sk)->filter;
673
674 uf.type_mask = f->type_mask;
675 uf.opcode = f->opcode;
676 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
677 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
678 }
679
680 len = min_t(unsigned int, len, sizeof(uf));
681 if (copy_to_user(optval, &uf, len))
682 return -EFAULT;
683 break;
684
685 default:
686 return -ENOPROTOOPT;
687 break;
688 }
689
690 return 0;
691 }
692
693 static const struct proto_ops hci_sock_ops = {
694 .family = PF_BLUETOOTH,
695 .owner = THIS_MODULE,
696 .release = hci_sock_release,
697 .bind = hci_sock_bind,
698 .getname = hci_sock_getname,
699 .sendmsg = hci_sock_sendmsg,
700 .recvmsg = hci_sock_recvmsg,
701 .ioctl = hci_sock_ioctl,
702 .poll = datagram_poll,
703 .listen = sock_no_listen,
704 .shutdown = sock_no_shutdown,
705 .setsockopt = hci_sock_setsockopt,
706 .getsockopt = hci_sock_getsockopt,
707 .connect = sock_no_connect,
708 .socketpair = sock_no_socketpair,
709 .accept = sock_no_accept,
710 .mmap = sock_no_mmap
711 };
712
713 static struct proto hci_sk_proto = {
714 .name = "HCI",
715 .owner = THIS_MODULE,
716 .obj_size = sizeof(struct hci_pinfo)
717 };
718
719 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
720 int kern)
721 {
722 struct sock *sk;
723
724 BT_DBG("sock %p", sock);
725
726 if (sock->type != SOCK_RAW)
727 return -ESOCKTNOSUPPORT;
728
729 sock->ops = &hci_sock_ops;
730
731 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
732 if (!sk)
733 return -ENOMEM;
734
735 sock_init_data(sock, sk);
736
737 sock_reset_flag(sk, SOCK_ZAPPED);
738
739 sk->sk_protocol = protocol;
740
741 sock->state = SS_UNCONNECTED;
742 sk->sk_state = BT_OPEN;
743
744 bt_sock_link(&hci_sk_list, sk);
745 return 0;
746 }
747
748 static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
749 {
750 struct hci_dev *hdev = (struct hci_dev *) ptr;
751 struct hci_ev_si_device ev;
752
753 BT_DBG("hdev %s event %ld", hdev->name, event);
754
755 /* Send event to sockets */
756 ev.event = event;
757 ev.dev_id = hdev->id;
758 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
759
760 if (event == HCI_DEV_UNREG) {
761 struct sock *sk;
762 struct hlist_node *node;
763
764 /* Detach sockets from device */
765 read_lock(&hci_sk_list.lock);
766 sk_for_each(sk, node, &hci_sk_list.head) {
767 local_bh_disable();
768 bh_lock_sock_nested(sk);
769 if (hci_pi(sk)->hdev == hdev) {
770 hci_pi(sk)->hdev = NULL;
771 sk->sk_err = EPIPE;
772 sk->sk_state = BT_OPEN;
773 sk->sk_state_change(sk);
774
775 hci_dev_put(hdev);
776 }
777 bh_unlock_sock(sk);
778 local_bh_enable();
779 }
780 read_unlock(&hci_sk_list.lock);
781 }
782
783 return NOTIFY_DONE;
784 }
785
786 static const struct net_proto_family hci_sock_family_ops = {
787 .family = PF_BLUETOOTH,
788 .owner = THIS_MODULE,
789 .create = hci_sock_create,
790 };
791
792 static struct notifier_block hci_sock_nblock = {
793 .notifier_call = hci_sock_dev_event
794 };
795
796 int __init hci_sock_init(void)
797 {
798 int err;
799
800 err = proto_register(&hci_sk_proto, 0);
801 if (err < 0)
802 return err;
803
804 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
805 if (err < 0)
806 goto error;
807
808 hci_register_notifier(&hci_sock_nblock);
809
810 BT_INFO("HCI socket layer initialized");
811
812 return 0;
813
814 error:
815 BT_ERR("HCI socket registration failed");
816 proto_unregister(&hci_sk_proto);
817 return err;
818 }
819
820 void hci_sock_cleanup(void)
821 {
822 if (bt_sock_unregister(BTPROTO_HCI) < 0)
823 BT_ERR("HCI socket unregistration failed");
824
825 hci_unregister_notifier(&hci_sock_nblock);
826
827 proto_unregister(&hci_sk_proto);
828 }
829
830 module_param(enable_mgmt, bool, 0644);
831 MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
This page took 0.092926 seconds and 5 git commands to generate.