Bluetooth: Fix issue with shared SKB between HCI raw socket and driver
[deliverable/linux.git] / net / bluetooth / hci_sock.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 static bool enable_mgmt;
53
54 /* ----- HCI socket interface ----- */
55
56 static inline int hci_test_bit(int nr, void *addr)
57 {
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
59 }
60
61 /* Security filter */
62 static struct hci_sec_filter hci_sec_filter = {
63 /* Packet types */
64 0x10,
65 /* Events */
66 { 0x1000d9fe, 0x0000b00c },
67 /* Commands */
68 {
69 { 0x0 },
70 /* OGF_LINK_CTL */
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
72 /* OGF_LINK_POLICY */
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
74 /* OGF_HOST_CTL */
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
76 /* OGF_INFO_PARAM */
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
80 }
81 };
82
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
85 };
86
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 struct sock *sk;
91 struct hlist_node *node;
92 struct sk_buff *skb_copy = NULL;
93
94 BT_DBG("hdev %p len %d", hdev, skb->len);
95
96 read_lock(&hci_sk_list.lock);
97
98 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
101
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
103 continue;
104
105 /* Don't send frame to the socket it came from */
106 if (skb->sk == sk)
107 continue;
108
109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
110 continue;
111
112 /* Apply filter */
113 flt = &hci_pi(sk)->filter;
114
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
117 continue;
118
119 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
120 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
121
122 if (!hci_test_bit(evt, &flt->event_mask))
123 continue;
124
125 if (flt->opcode &&
126 ((evt == HCI_EV_CMD_COMPLETE &&
127 flt->opcode !=
128 get_unaligned((__le16 *)(skb->data + 3))) ||
129 (evt == HCI_EV_CMD_STATUS &&
130 flt->opcode !=
131 get_unaligned((__le16 *)(skb->data + 4)))))
132 continue;
133 }
134
135 if (!skb_copy) {
136 /* Create a private copy with headroom */
137 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
138 if (!skb_copy)
139 continue;
140
141 /* Put type byte before the data */
142 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
143 }
144
145 nskb = skb_clone(skb_copy, GFP_ATOMIC);
146 if (!nskb)
147 continue;
148
149 if (sock_queue_rcv_skb(sk, nskb))
150 kfree_skb(nskb);
151 }
152
153 read_unlock(&hci_sk_list.lock);
154
155 kfree_skb(skb_copy);
156 }
157
158 /* Send frame to control socket */
159 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
160 {
161 struct sock *sk;
162 struct hlist_node *node;
163
164 BT_DBG("len %d", skb->len);
165
166 read_lock(&hci_sk_list.lock);
167
168 sk_for_each(sk, node, &hci_sk_list.head) {
169 struct sk_buff *nskb;
170
171 /* Skip the original socket */
172 if (sk == skip_sk)
173 continue;
174
175 if (sk->sk_state != BT_BOUND)
176 continue;
177
178 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
179 continue;
180
181 nskb = skb_clone(skb, GFP_ATOMIC);
182 if (!nskb)
183 continue;
184
185 if (sock_queue_rcv_skb(sk, nskb))
186 kfree_skb(nskb);
187 }
188
189 read_unlock(&hci_sk_list.lock);
190 }
191
192 static int hci_sock_release(struct socket *sock)
193 {
194 struct sock *sk = sock->sk;
195 struct hci_dev *hdev;
196
197 BT_DBG("sock %p sk %p", sock, sk);
198
199 if (!sk)
200 return 0;
201
202 hdev = hci_pi(sk)->hdev;
203
204 bt_sock_unlink(&hci_sk_list, sk);
205
206 if (hdev) {
207 atomic_dec(&hdev->promisc);
208 hci_dev_put(hdev);
209 }
210
211 sock_orphan(sk);
212
213 skb_queue_purge(&sk->sk_receive_queue);
214 skb_queue_purge(&sk->sk_write_queue);
215
216 sock_put(sk);
217 return 0;
218 }
219
220 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
221 {
222 bdaddr_t bdaddr;
223 int err;
224
225 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
226 return -EFAULT;
227
228 hci_dev_lock(hdev);
229
230 err = hci_blacklist_add(hdev, &bdaddr, 0);
231
232 hci_dev_unlock(hdev);
233
234 return err;
235 }
236
237 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
238 {
239 bdaddr_t bdaddr;
240 int err;
241
242 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
243 return -EFAULT;
244
245 hci_dev_lock(hdev);
246
247 err = hci_blacklist_del(hdev, &bdaddr, 0);
248
249 hci_dev_unlock(hdev);
250
251 return err;
252 }
253
254 /* Ioctls that require bound socket */
255 static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
256 {
257 struct hci_dev *hdev = hci_pi(sk)->hdev;
258
259 if (!hdev)
260 return -EBADFD;
261
262 switch (cmd) {
263 case HCISETRAW:
264 if (!capable(CAP_NET_ADMIN))
265 return -EACCES;
266
267 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
268 return -EPERM;
269
270 if (arg)
271 set_bit(HCI_RAW, &hdev->flags);
272 else
273 clear_bit(HCI_RAW, &hdev->flags);
274
275 return 0;
276
277 case HCIGETCONNINFO:
278 return hci_get_conn_info(hdev, (void __user *) arg);
279
280 case HCIGETAUTHINFO:
281 return hci_get_auth_info(hdev, (void __user *) arg);
282
283 case HCIBLOCKADDR:
284 if (!capable(CAP_NET_ADMIN))
285 return -EACCES;
286 return hci_sock_blacklist_add(hdev, (void __user *) arg);
287
288 case HCIUNBLOCKADDR:
289 if (!capable(CAP_NET_ADMIN))
290 return -EACCES;
291 return hci_sock_blacklist_del(hdev, (void __user *) arg);
292
293 default:
294 if (hdev->ioctl)
295 return hdev->ioctl(hdev, cmd, arg);
296 return -EINVAL;
297 }
298 }
299
300 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
301 {
302 struct sock *sk = sock->sk;
303 void __user *argp = (void __user *) arg;
304 int err;
305
306 BT_DBG("cmd %x arg %lx", cmd, arg);
307
308 switch (cmd) {
309 case HCIGETDEVLIST:
310 return hci_get_dev_list(argp);
311
312 case HCIGETDEVINFO:
313 return hci_get_dev_info(argp);
314
315 case HCIGETCONNLIST:
316 return hci_get_conn_list(argp);
317
318 case HCIDEVUP:
319 if (!capable(CAP_NET_ADMIN))
320 return -EACCES;
321 return hci_dev_open(arg);
322
323 case HCIDEVDOWN:
324 if (!capable(CAP_NET_ADMIN))
325 return -EACCES;
326 return hci_dev_close(arg);
327
328 case HCIDEVRESET:
329 if (!capable(CAP_NET_ADMIN))
330 return -EACCES;
331 return hci_dev_reset(arg);
332
333 case HCIDEVRESTAT:
334 if (!capable(CAP_NET_ADMIN))
335 return -EACCES;
336 return hci_dev_reset_stat(arg);
337
338 case HCISETSCAN:
339 case HCISETAUTH:
340 case HCISETENCRYPT:
341 case HCISETPTYPE:
342 case HCISETLINKPOL:
343 case HCISETLINKMODE:
344 case HCISETACLMTU:
345 case HCISETSCOMTU:
346 if (!capable(CAP_NET_ADMIN))
347 return -EACCES;
348 return hci_dev_cmd(cmd, argp);
349
350 case HCIINQUIRY:
351 return hci_inquiry(argp);
352
353 default:
354 lock_sock(sk);
355 err = hci_sock_bound_ioctl(sk, cmd, arg);
356 release_sock(sk);
357 return err;
358 }
359 }
360
361 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
362 {
363 struct sockaddr_hci haddr;
364 struct sock *sk = sock->sk;
365 struct hci_dev *hdev = NULL;
366 int len, err = 0;
367
368 BT_DBG("sock %p sk %p", sock, sk);
369
370 if (!addr)
371 return -EINVAL;
372
373 memset(&haddr, 0, sizeof(haddr));
374 len = min_t(unsigned int, sizeof(haddr), addr_len);
375 memcpy(&haddr, addr, len);
376
377 if (haddr.hci_family != AF_BLUETOOTH)
378 return -EINVAL;
379
380 lock_sock(sk);
381
382 if (sk->sk_state == BT_BOUND) {
383 err = -EALREADY;
384 goto done;
385 }
386
387 switch (haddr.hci_channel) {
388 case HCI_CHANNEL_RAW:
389 if (hci_pi(sk)->hdev) {
390 err = -EALREADY;
391 goto done;
392 }
393
394 if (haddr.hci_dev != HCI_DEV_NONE) {
395 hdev = hci_dev_get(haddr.hci_dev);
396 if (!hdev) {
397 err = -ENODEV;
398 goto done;
399 }
400
401 atomic_inc(&hdev->promisc);
402 }
403
404 hci_pi(sk)->hdev = hdev;
405 break;
406
407 case HCI_CHANNEL_CONTROL:
408 if (haddr.hci_dev != HCI_DEV_NONE || !enable_mgmt) {
409 err = -EINVAL;
410 goto done;
411 }
412
413 set_bit(HCI_PI_MGMT_INIT, &hci_pi(sk)->flags);
414 break;
415
416 default:
417 err = -EINVAL;
418 goto done;
419 }
420
421
422 hci_pi(sk)->channel = haddr.hci_channel;
423 sk->sk_state = BT_BOUND;
424
425 done:
426 release_sock(sk);
427 return err;
428 }
429
430 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
431 {
432 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
433 struct sock *sk = sock->sk;
434 struct hci_dev *hdev = hci_pi(sk)->hdev;
435
436 BT_DBG("sock %p sk %p", sock, sk);
437
438 if (!hdev)
439 return -EBADFD;
440
441 lock_sock(sk);
442
443 *addr_len = sizeof(*haddr);
444 haddr->hci_family = AF_BLUETOOTH;
445 haddr->hci_dev = hdev->id;
446
447 release_sock(sk);
448 return 0;
449 }
450
451 static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
452 {
453 __u32 mask = hci_pi(sk)->cmsg_mask;
454
455 if (mask & HCI_CMSG_DIR) {
456 int incoming = bt_cb(skb)->incoming;
457 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
458 }
459
460 if (mask & HCI_CMSG_TSTAMP) {
461 #ifdef CONFIG_COMPAT
462 struct compat_timeval ctv;
463 #endif
464 struct timeval tv;
465 void *data;
466 int len;
467
468 skb_get_timestamp(skb, &tv);
469
470 data = &tv;
471 len = sizeof(tv);
472 #ifdef CONFIG_COMPAT
473 if (msg->msg_flags & MSG_CMSG_COMPAT) {
474 ctv.tv_sec = tv.tv_sec;
475 ctv.tv_usec = tv.tv_usec;
476 data = &ctv;
477 len = sizeof(ctv);
478 }
479 #endif
480
481 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
482 }
483 }
484
485 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
486 struct msghdr *msg, size_t len, int flags)
487 {
488 int noblock = flags & MSG_DONTWAIT;
489 struct sock *sk = sock->sk;
490 struct sk_buff *skb;
491 int copied, err;
492
493 BT_DBG("sock %p, sk %p", sock, sk);
494
495 if (flags & (MSG_OOB))
496 return -EOPNOTSUPP;
497
498 if (sk->sk_state == BT_CLOSED)
499 return 0;
500
501 skb = skb_recv_datagram(sk, flags, noblock, &err);
502 if (!skb)
503 return err;
504
505 msg->msg_namelen = 0;
506
507 copied = skb->len;
508 if (len < copied) {
509 msg->msg_flags |= MSG_TRUNC;
510 copied = len;
511 }
512
513 skb_reset_transport_header(skb);
514 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
515
516 switch (hci_pi(sk)->channel) {
517 case HCI_CHANNEL_RAW:
518 hci_sock_cmsg(sk, msg, skb);
519 break;
520 }
521
522 skb_free_datagram(sk, skb);
523
524 return err ? : copied;
525 }
526
527 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
528 struct msghdr *msg, size_t len)
529 {
530 struct sock *sk = sock->sk;
531 struct hci_dev *hdev;
532 struct sk_buff *skb;
533 int err;
534
535 BT_DBG("sock %p sk %p", sock, sk);
536
537 if (msg->msg_flags & MSG_OOB)
538 return -EOPNOTSUPP;
539
540 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
541 return -EINVAL;
542
543 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
544 return -EINVAL;
545
546 lock_sock(sk);
547
548 switch (hci_pi(sk)->channel) {
549 case HCI_CHANNEL_RAW:
550 break;
551 case HCI_CHANNEL_CONTROL:
552 err = mgmt_control(sk, msg, len);
553 goto done;
554 default:
555 err = -EINVAL;
556 goto done;
557 }
558
559 hdev = hci_pi(sk)->hdev;
560 if (!hdev) {
561 err = -EBADFD;
562 goto done;
563 }
564
565 if (!test_bit(HCI_UP, &hdev->flags)) {
566 err = -ENETDOWN;
567 goto done;
568 }
569
570 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
571 if (!skb)
572 goto done;
573
574 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
575 err = -EFAULT;
576 goto drop;
577 }
578
579 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
580 skb_pull(skb, 1);
581 skb->dev = (void *) hdev;
582
583 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
584 u16 opcode = get_unaligned_le16(skb->data);
585 u16 ogf = hci_opcode_ogf(opcode);
586 u16 ocf = hci_opcode_ocf(opcode);
587
588 if (((ogf > HCI_SFLT_MAX_OGF) ||
589 !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
590 !capable(CAP_NET_RAW)) {
591 err = -EPERM;
592 goto drop;
593 }
594
595 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
596 skb_queue_tail(&hdev->raw_q, skb);
597 queue_work(hdev->workqueue, &hdev->tx_work);
598 } else {
599 skb_queue_tail(&hdev->cmd_q, skb);
600 queue_work(hdev->workqueue, &hdev->cmd_work);
601 }
602 } else {
603 if (!capable(CAP_NET_RAW)) {
604 err = -EPERM;
605 goto drop;
606 }
607
608 skb_queue_tail(&hdev->raw_q, skb);
609 queue_work(hdev->workqueue, &hdev->tx_work);
610 }
611
612 err = len;
613
614 done:
615 release_sock(sk);
616 return err;
617
618 drop:
619 kfree_skb(skb);
620 goto done;
621 }
622
623 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
624 {
625 struct hci_ufilter uf = { .opcode = 0 };
626 struct sock *sk = sock->sk;
627 int err = 0, opt = 0;
628
629 BT_DBG("sk %p, opt %d", sk, optname);
630
631 lock_sock(sk);
632
633 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
634 err = -EINVAL;
635 goto done;
636 }
637
638 switch (optname) {
639 case HCI_DATA_DIR:
640 if (get_user(opt, (int __user *)optval)) {
641 err = -EFAULT;
642 break;
643 }
644
645 if (opt)
646 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
647 else
648 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
649 break;
650
651 case HCI_TIME_STAMP:
652 if (get_user(opt, (int __user *)optval)) {
653 err = -EFAULT;
654 break;
655 }
656
657 if (opt)
658 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
659 else
660 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
661 break;
662
663 case HCI_FILTER:
664 {
665 struct hci_filter *f = &hci_pi(sk)->filter;
666
667 uf.type_mask = f->type_mask;
668 uf.opcode = f->opcode;
669 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
670 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
671 }
672
673 len = min_t(unsigned int, len, sizeof(uf));
674 if (copy_from_user(&uf, optval, len)) {
675 err = -EFAULT;
676 break;
677 }
678
679 if (!capable(CAP_NET_RAW)) {
680 uf.type_mask &= hci_sec_filter.type_mask;
681 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
682 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
683 }
684
685 {
686 struct hci_filter *f = &hci_pi(sk)->filter;
687
688 f->type_mask = uf.type_mask;
689 f->opcode = uf.opcode;
690 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
691 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
692 }
693 break;
694
695 default:
696 err = -ENOPROTOOPT;
697 break;
698 }
699
700 done:
701 release_sock(sk);
702 return err;
703 }
704
705 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
706 {
707 struct hci_ufilter uf;
708 struct sock *sk = sock->sk;
709 int len, opt, err = 0;
710
711 BT_DBG("sk %p, opt %d", sk, optname);
712
713 if (get_user(len, optlen))
714 return -EFAULT;
715
716 lock_sock(sk);
717
718 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
719 err = -EINVAL;
720 goto done;
721 }
722
723 switch (optname) {
724 case HCI_DATA_DIR:
725 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
726 opt = 1;
727 else
728 opt = 0;
729
730 if (put_user(opt, optval))
731 err = -EFAULT;
732 break;
733
734 case HCI_TIME_STAMP:
735 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
736 opt = 1;
737 else
738 opt = 0;
739
740 if (put_user(opt, optval))
741 err = -EFAULT;
742 break;
743
744 case HCI_FILTER:
745 {
746 struct hci_filter *f = &hci_pi(sk)->filter;
747
748 uf.type_mask = f->type_mask;
749 uf.opcode = f->opcode;
750 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
751 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
752 }
753
754 len = min_t(unsigned int, len, sizeof(uf));
755 if (copy_to_user(optval, &uf, len))
756 err = -EFAULT;
757 break;
758
759 default:
760 err = -ENOPROTOOPT;
761 break;
762 }
763
764 done:
765 release_sock(sk);
766 return err;
767 }
768
769 static const struct proto_ops hci_sock_ops = {
770 .family = PF_BLUETOOTH,
771 .owner = THIS_MODULE,
772 .release = hci_sock_release,
773 .bind = hci_sock_bind,
774 .getname = hci_sock_getname,
775 .sendmsg = hci_sock_sendmsg,
776 .recvmsg = hci_sock_recvmsg,
777 .ioctl = hci_sock_ioctl,
778 .poll = datagram_poll,
779 .listen = sock_no_listen,
780 .shutdown = sock_no_shutdown,
781 .setsockopt = hci_sock_setsockopt,
782 .getsockopt = hci_sock_getsockopt,
783 .connect = sock_no_connect,
784 .socketpair = sock_no_socketpair,
785 .accept = sock_no_accept,
786 .mmap = sock_no_mmap
787 };
788
789 static struct proto hci_sk_proto = {
790 .name = "HCI",
791 .owner = THIS_MODULE,
792 .obj_size = sizeof(struct hci_pinfo)
793 };
794
795 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
796 int kern)
797 {
798 struct sock *sk;
799
800 BT_DBG("sock %p", sock);
801
802 if (sock->type != SOCK_RAW)
803 return -ESOCKTNOSUPPORT;
804
805 sock->ops = &hci_sock_ops;
806
807 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
808 if (!sk)
809 return -ENOMEM;
810
811 sock_init_data(sock, sk);
812
813 sock_reset_flag(sk, SOCK_ZAPPED);
814
815 sk->sk_protocol = protocol;
816
817 sock->state = SS_UNCONNECTED;
818 sk->sk_state = BT_OPEN;
819
820 bt_sock_link(&hci_sk_list, sk);
821 return 0;
822 }
823
824 static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
825 {
826 struct hci_dev *hdev = (struct hci_dev *) ptr;
827 struct hci_ev_si_device ev;
828
829 BT_DBG("hdev %s event %ld", hdev->name, event);
830
831 /* Send event to sockets */
832 ev.event = event;
833 ev.dev_id = hdev->id;
834 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
835
836 if (event == HCI_DEV_UNREG) {
837 struct sock *sk;
838 struct hlist_node *node;
839
840 /* Detach sockets from device */
841 read_lock(&hci_sk_list.lock);
842 sk_for_each(sk, node, &hci_sk_list.head) {
843 bh_lock_sock_nested(sk);
844 if (hci_pi(sk)->hdev == hdev) {
845 hci_pi(sk)->hdev = NULL;
846 sk->sk_err = EPIPE;
847 sk->sk_state = BT_OPEN;
848 sk->sk_state_change(sk);
849
850 hci_dev_put(hdev);
851 }
852 bh_unlock_sock(sk);
853 }
854 read_unlock(&hci_sk_list.lock);
855 }
856
857 return NOTIFY_DONE;
858 }
859
860 static const struct net_proto_family hci_sock_family_ops = {
861 .family = PF_BLUETOOTH,
862 .owner = THIS_MODULE,
863 .create = hci_sock_create,
864 };
865
866 static struct notifier_block hci_sock_nblock = {
867 .notifier_call = hci_sock_dev_event
868 };
869
870 int __init hci_sock_init(void)
871 {
872 int err;
873
874 err = proto_register(&hci_sk_proto, 0);
875 if (err < 0)
876 return err;
877
878 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
879 if (err < 0)
880 goto error;
881
882 hci_register_notifier(&hci_sock_nblock);
883
884 BT_INFO("HCI socket layer initialized");
885
886 return 0;
887
888 error:
889 BT_ERR("HCI socket registration failed");
890 proto_unregister(&hci_sk_proto);
891 return err;
892 }
893
894 void hci_sock_cleanup(void)
895 {
896 if (bt_sock_unregister(BTPROTO_HCI) < 0)
897 BT_ERR("HCI socket unregistration failed");
898
899 hci_unregister_notifier(&hci_sock_nblock);
900
901 proto_unregister(&hci_sk_proto);
902 }
903
904 module_param(enable_mgmt, bool, 0644);
905 MODULE_PARM_DESC(enable_mgmt, "Enable Management interface");
This page took 0.053884 seconds and 6 git commands to generate.